lionagi 0.0.315__py3-none-any.whl → 0.1.0__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (103) hide show
  1. lionagi/core/__init__.py +19 -8
  2. lionagi/core/agent/__init__.py +0 -3
  3. lionagi/core/agent/base_agent.py +26 -30
  4. lionagi/core/branch/__init__.py +0 -4
  5. lionagi/core/branch/{base_branch.py → base.py} +13 -14
  6. lionagi/core/branch/branch.py +22 -20
  7. lionagi/core/branch/executable_branch.py +0 -347
  8. lionagi/core/branch/{branch_flow_mixin.py → flow_mixin.py} +6 -6
  9. lionagi/core/branch/util.py +1 -1
  10. lionagi/core/direct/__init__.py +13 -1
  11. lionagi/core/direct/cot.py +123 -1
  12. lionagi/core/direct/plan.py +164 -0
  13. lionagi/core/direct/predict.py +13 -9
  14. lionagi/core/direct/react.py +12 -8
  15. lionagi/core/direct/score.py +4 -4
  16. lionagi/core/direct/select.py +4 -4
  17. lionagi/core/direct/utils.py +23 -0
  18. lionagi/core/direct/vote.py +2 -2
  19. lionagi/core/execute/base_executor.py +50 -0
  20. lionagi/core/execute/branch_executor.py +233 -0
  21. lionagi/core/execute/instruction_map_executor.py +131 -0
  22. lionagi/core/execute/structure_executor.py +218 -0
  23. lionagi/core/flow/monoflow/ReAct.py +4 -4
  24. lionagi/core/flow/monoflow/chat.py +6 -6
  25. lionagi/core/flow/monoflow/chat_mixin.py +24 -34
  26. lionagi/core/flow/monoflow/followup.py +4 -4
  27. lionagi/core/flow/polyflow/__init__.py +1 -1
  28. lionagi/core/flow/polyflow/chat.py +15 -12
  29. lionagi/core/{prompt/action_template.py → form/action_form.py} +2 -2
  30. lionagi/core/{prompt → form}/field_validator.py +40 -31
  31. lionagi/core/form/form.py +302 -0
  32. lionagi/core/form/mixin.py +214 -0
  33. lionagi/core/{prompt/scored_template.py → form/scored_form.py} +2 -2
  34. lionagi/core/generic/__init__.py +37 -0
  35. lionagi/core/generic/action.py +26 -0
  36. lionagi/core/generic/component.py +457 -0
  37. lionagi/core/generic/condition.py +44 -0
  38. lionagi/core/generic/data_logger.py +305 -0
  39. lionagi/core/generic/edge.py +110 -0
  40. lionagi/core/generic/mail.py +90 -0
  41. lionagi/core/generic/mailbox.py +36 -0
  42. lionagi/core/generic/node.py +285 -0
  43. lionagi/core/generic/relation.py +70 -0
  44. lionagi/core/generic/signal.py +22 -0
  45. lionagi/core/generic/structure.py +362 -0
  46. lionagi/core/generic/transfer.py +20 -0
  47. lionagi/core/generic/work.py +40 -0
  48. lionagi/core/graph/graph.py +126 -0
  49. lionagi/core/graph/tree.py +190 -0
  50. lionagi/core/mail/__init__.py +0 -8
  51. lionagi/core/mail/mail_manager.py +12 -10
  52. lionagi/core/mail/schema.py +9 -2
  53. lionagi/core/messages/__init__.py +0 -3
  54. lionagi/core/messages/schema.py +17 -225
  55. lionagi/core/session/__init__.py +0 -3
  56. lionagi/core/session/session.py +25 -23
  57. lionagi/core/tool/__init__.py +3 -1
  58. lionagi/core/tool/tool.py +28 -0
  59. lionagi/core/tool/tool_manager.py +75 -75
  60. lionagi/integrations/chunker/chunk.py +7 -7
  61. lionagi/integrations/config/oai_configs.py +4 -4
  62. lionagi/integrations/loader/load.py +6 -6
  63. lionagi/integrations/loader/load_util.py +8 -8
  64. lionagi/libs/ln_api.py +3 -3
  65. lionagi/libs/ln_parse.py +43 -6
  66. lionagi/libs/ln_validate.py +288 -0
  67. lionagi/libs/sys_util.py +28 -6
  68. lionagi/tests/libs/test_async.py +0 -0
  69. lionagi/tests/libs/test_field_validators.py +353 -0
  70. lionagi/tests/test_core/test_base_branch.py +0 -1
  71. lionagi/tests/test_core/test_branch.py +3 -0
  72. lionagi/tests/test_core/test_session_base_util.py +1 -0
  73. lionagi/version.py +1 -1
  74. {lionagi-0.0.315.dist-info → lionagi-0.1.0.dist-info}/METADATA +1 -1
  75. lionagi-0.1.0.dist-info/RECORD +136 -0
  76. lionagi/core/prompt/prompt_template.py +0 -312
  77. lionagi/core/schema/__init__.py +0 -22
  78. lionagi/core/schema/action_node.py +0 -29
  79. lionagi/core/schema/base_mixin.py +0 -296
  80. lionagi/core/schema/base_node.py +0 -199
  81. lionagi/core/schema/condition.py +0 -24
  82. lionagi/core/schema/data_logger.py +0 -354
  83. lionagi/core/schema/data_node.py +0 -93
  84. lionagi/core/schema/prompt_template.py +0 -67
  85. lionagi/core/schema/structure.py +0 -912
  86. lionagi/core/tool/manual.py +0 -1
  87. lionagi-0.0.315.dist-info/RECORD +0 -121
  88. /lionagi/core/{branch/base → execute}/__init__.py +0 -0
  89. /lionagi/core/flow/{base/baseflow.py → baseflow.py} +0 -0
  90. /lionagi/core/flow/{base/__init__.py → mono_chat_mixin.py} +0 -0
  91. /lionagi/core/{prompt → form}/__init__.py +0 -0
  92. /lionagi/{tests/test_integrations → core/graph}/__init__.py +0 -0
  93. /lionagi/tests/{test_libs → integrations}/__init__.py +0 -0
  94. /lionagi/tests/{test_libs/test_async.py → libs/__init__.py} +0 -0
  95. /lionagi/tests/{test_libs → libs}/test_api.py +0 -0
  96. /lionagi/tests/{test_libs → libs}/test_convert.py +0 -0
  97. /lionagi/tests/{test_libs → libs}/test_func_call.py +0 -0
  98. /lionagi/tests/{test_libs → libs}/test_nested.py +0 -0
  99. /lionagi/tests/{test_libs → libs}/test_parse.py +0 -0
  100. /lionagi/tests/{test_libs → libs}/test_sys_util.py +0 -0
  101. {lionagi-0.0.315.dist-info → lionagi-0.1.0.dist-info}/LICENSE +0 -0
  102. {lionagi-0.0.315.dist-info → lionagi-0.1.0.dist-info}/WHEEL +0 -0
  103. {lionagi-0.0.315.dist-info → lionagi-0.1.0.dist-info}/top_level.txt +0 -0
@@ -1,9 +1,9 @@
1
1
  from abc import ABC
2
2
  from typing import Any, Optional, Union, TypeVar
3
3
 
4
- from ..schema import TOOL_TYPE, Tool
5
- from ..messages import Instruction, System
6
- from ..flow.monoflow import MonoChat, MonoFollowup, MonoReAct
4
+ from lionagi.core.tool import Tool, TOOL_TYPE
5
+ from lionagi.core.messages.schema import Instruction, System
6
+ from lionagi.core.flow.monoflow import MonoChat, MonoFollowup, MonoReAct
7
7
 
8
8
  T = TypeVar("T", bound=Tool)
9
9
 
@@ -20,7 +20,7 @@ class BranchFlowMixin(ABC):
20
20
  out: bool = True,
21
21
  invoke: bool = True,
22
22
  output_fields=None,
23
- prompt_template=None,
23
+ form=None,
24
24
  **kwargs,
25
25
  ) -> Any:
26
26
  flow = MonoChat(self)
@@ -33,7 +33,7 @@ class BranchFlowMixin(ABC):
33
33
  out=out,
34
34
  invoke=invoke,
35
35
  output_fields=output_fields,
36
- prompt_template=prompt_template,
36
+ form=form,
37
37
  **kwargs,
38
38
  )
39
39
 
@@ -93,4 +93,4 @@ class BranchFlowMixin(ABC):
93
93
  output_prompt=output_prompt,
94
94
  out=out,
95
95
  **kwargs,
96
- )
96
+ )
@@ -320,4 +320,4 @@ class MessageUtil:
320
320
  else:
321
321
  with contextlib.suppress(Exception):
322
322
  answers.append(nested.nget(content, ["system_info"]))
323
- return "\n".join(answers)
323
+ return "\n".join(answers)
@@ -3,5 +3,17 @@ from .select import select
3
3
  from .score import score
4
4
  from .react import react
5
5
  from .vote import vote
6
+ from .plan import plan
7
+ from .cot import chain_of_thoughts, chain_of_react
6
8
 
7
- __all__ = ["predict", "select", "score", "vote", "react"]
9
+
10
+ __all__ = [
11
+ "predict",
12
+ "select",
13
+ "score",
14
+ "vote",
15
+ "react",
16
+ "plan",
17
+ "chain_of_thoughts",
18
+ "chain_of_react",
19
+ ]
@@ -1 +1,123 @@
1
- # TODO: chain of thoughts
1
+ from lionagi.libs import convert
2
+
3
+ from lionagi.core.direct.predict import predict
4
+ from lionagi.core.direct.plan import plan
5
+ from lionagi.core.direct.react import react
6
+
7
+ from .utils import _process_tools
8
+
9
+
10
+ async def chain_of_thoughts(
11
+ sentence=None,
12
+ branch=None,
13
+ instruction=None,
14
+ reason=False,
15
+ confidence_score=False,
16
+ num_steps=3,
17
+ directive_kwargs={},
18
+ return_branch=False,
19
+ **kwargs,
20
+ ):
21
+
22
+ out_, outs, answer, reasons, confidence_score = "", [], "", [], 0
23
+ if branch is not None:
24
+ out_ = await plan(
25
+ sentence,
26
+ branch=branch,
27
+ instruction=instruction,
28
+ num_steps=num_steps,
29
+ **kwargs,
30
+ )
31
+ else:
32
+ out_, branch = await plan(
33
+ sentence,
34
+ instruction=instruction,
35
+ branch=branch,
36
+ num_steps=num_steps,
37
+ return_branch=True,
38
+ **kwargs,
39
+ )
40
+
41
+ for i in range(len(out_.plan)):
42
+ _out = await predict(
43
+ branch=branch,
44
+ instruction=out_.plan[f"step_{i+1}"],
45
+ reason=reason,
46
+ confidence_score=confidence_score,
47
+ **directive_kwargs,
48
+ )
49
+ answer += _out.answer
50
+ if reason:
51
+ reasons.append(_out.reason)
52
+ if confidence_score:
53
+ confidence_score += _out.confidence_score
54
+ outs.append(_out)
55
+
56
+ setattr(out_, "chain_output", outs)
57
+ setattr(out_, "chain_answer", answer)
58
+
59
+ if reason:
60
+ setattr(out_, "chain_reasons", reasons)
61
+ if confidence_score:
62
+ setattr(out_, "chain_confidence_score", confidence_score / len(outs))
63
+
64
+ if return_branch:
65
+ return out_, branch
66
+
67
+ return out_
68
+
69
+
70
+ async def chain_of_react(
71
+ sentence=None,
72
+ branch=None,
73
+ instruction=None,
74
+ num_steps=3,
75
+ tools=None,
76
+ directive_system=None,
77
+ directive_kwargs={},
78
+ return_branch=False,
79
+ **kwargs,
80
+ ):
81
+ out_, outs, reasons, actions, action_responses = "", [], [], [], []
82
+ if branch is not None:
83
+ out_ = await plan(
84
+ sentence,
85
+ branch=branch,
86
+ instruction=instruction,
87
+ num_steps=num_steps,
88
+ **kwargs,
89
+ )
90
+ else:
91
+ out_, branch = await plan(
92
+ sentence,
93
+ instruction=instruction,
94
+ branch=branch,
95
+ num_steps=num_steps,
96
+ return_branch=True,
97
+ **kwargs,
98
+ )
99
+
100
+ _process_tools(tools, branch)
101
+
102
+ for i in range(len(out_.plan)):
103
+ _out = await react(
104
+ branch=branch,
105
+ system=directive_system,
106
+ instruction=out_.plan[f"step_{i+1}"],
107
+ **directive_kwargs,
108
+ )
109
+ outs.append(_out)
110
+ reasons.append(_out.reason)
111
+ actions.append(_out.actions)
112
+ if _out.action_needed:
113
+ action_responses.append(_out.action_response)
114
+
115
+ setattr(out_, "chain_output", convert.to_list(outs))
116
+ setattr(out_, "chain_reason", convert.to_list(reasons))
117
+ setattr(out_, "chain_actions", convert.to_list(actions))
118
+ setattr(out_, "chain_action_response", convert.to_list(action_responses))
119
+
120
+ if return_branch:
121
+ return out_, branch
122
+
123
+ return out_
@@ -0,0 +1,164 @@
1
+ # plan.py
2
+
3
+ from lionagi.libs import func_call, ParseUtil
4
+ from lionagi.integrations.bridge.pydantic_.pydantic_bridge import Field
5
+ from lionagi.core.form.scored_form import ScoredForm
6
+ from lionagi.core.branch.branch import Branch
7
+
8
+
9
+ class PlanTemplate(ScoredForm):
10
+ template_name: str = "default_plan"
11
+ sentence: str | list | dict = Field(
12
+ default_factory=str,
13
+ description="the given sentence(s) or context to generate a plan for",
14
+ )
15
+ plan: dict | str = Field(
16
+ default_factory=dict,
17
+ description="the generated step by step plan, return as a dictionary following {step_n: {plan: ..., reason: ...}} format",
18
+ )
19
+ signature: str = "sentence -> plan"
20
+
21
+ def __init__(
22
+ self,
23
+ sentence=None,
24
+ instruction=None,
25
+ confidence_score=False,
26
+ reason=False,
27
+ num_step=3,
28
+ **kwargs,
29
+ ):
30
+ super().__init__(**kwargs)
31
+
32
+ self.sentence = sentence
33
+ self.task = f"Generate a {num_step}_step plan based on the given context. Instruction: {instruction}."
34
+
35
+ if reason:
36
+ self.output_fields.append("reason")
37
+
38
+ if confidence_score:
39
+ self.output_fields.append("confidence_score")
40
+
41
+
42
+ async def _plan(
43
+ sentence,
44
+ *,
45
+ instruction=None,
46
+ branch=None,
47
+ confidence_score=False,
48
+ reason=False,
49
+ retries=2,
50
+ delay=0.5,
51
+ backoff_factor=2,
52
+ default_value=None,
53
+ timeout=None,
54
+ branch_name=None,
55
+ system=None,
56
+ messages=None,
57
+ service=None,
58
+ sender=None,
59
+ llmconfig=None,
60
+ tools=None,
61
+ datalogger=None,
62
+ persist_path=None,
63
+ tool_manager=None,
64
+ return_branch=False,
65
+ **kwargs,
66
+ ):
67
+ if "temperature" not in kwargs:
68
+ kwargs["temperature"] = 0.1
69
+
70
+ instruction = instruction or ""
71
+
72
+ branch = branch or Branch(
73
+ name=branch_name,
74
+ system=system,
75
+ messages=messages,
76
+ service=service,
77
+ sender=sender,
78
+ llmconfig=llmconfig,
79
+ tools=tools,
80
+ datalogger=datalogger,
81
+ persist_path=persist_path,
82
+ tool_manager=tool_manager,
83
+ )
84
+
85
+ _template = PlanTemplate(
86
+ sentence=sentence,
87
+ instruction=instruction,
88
+ confidence_score=confidence_score,
89
+ reason=reason,
90
+ )
91
+
92
+ await func_call.rcall(
93
+ branch.chat,
94
+ form=_template,
95
+ retries=retries,
96
+ delay=delay,
97
+ backoff_factor=backoff_factor,
98
+ default=default_value,
99
+ timeout=timeout,
100
+ **kwargs,
101
+ )
102
+
103
+ _template.plan = ParseUtil.fuzzy_parse_json(_template.plan)
104
+
105
+ return (_template, branch) if return_branch else _template
106
+
107
+
108
+ async def plan(
109
+ sentence,
110
+ *,
111
+ instruction=None,
112
+ num_instances=1,
113
+ branch=None,
114
+ confidence_score=False,
115
+ reason=False,
116
+ retries=2,
117
+ delay=0.5,
118
+ backoff_factor=2,
119
+ default_value=None,
120
+ timeout=None,
121
+ branch_name=None,
122
+ system=None,
123
+ messages=None,
124
+ service=None,
125
+ sender=None,
126
+ llmconfig=None,
127
+ tools=None,
128
+ datalogger=None,
129
+ persist_path=None,
130
+ tool_manager=None,
131
+ return_branch=False,
132
+ **kwargs,
133
+ ):
134
+ async def _inner(i=0):
135
+ return await _plan(
136
+ sentence=sentence,
137
+ instruction=instruction,
138
+ branch=branch,
139
+ confidence_score=confidence_score,
140
+ reason=reason,
141
+ retries=retries,
142
+ delay=delay,
143
+ backoff_factor=backoff_factor,
144
+ default_value=default_value,
145
+ timeout=timeout,
146
+ branch_name=branch_name,
147
+ system=system,
148
+ messages=messages,
149
+ service=service,
150
+ sender=sender,
151
+ llmconfig=llmconfig,
152
+ tools=tools,
153
+ datalogger=datalogger,
154
+ persist_path=persist_path,
155
+ tool_manager=tool_manager,
156
+ return_branch=return_branch,
157
+ **kwargs,
158
+ )
159
+
160
+ if num_instances == 1:
161
+ return await _inner()
162
+
163
+ elif num_instances > 1:
164
+ return await func_call.alcall(range(num_instances), _inner)
@@ -9,11 +9,11 @@ confidence score, and reason for the prediction.
9
9
  from lionagi.libs import func_call
10
10
  from lionagi.integrations.bridge.pydantic_.pydantic_bridge import Field
11
11
 
12
- from ..prompt.scored_template import ScoredTemplate
13
- from ..branch import Branch
12
+ from lionagi.core.form.scored_form import ScoredForm
13
+ from lionagi.core.branch.branch import Branch
14
14
 
15
15
 
16
- class PredictTemplate(ScoredTemplate):
16
+ class PredictTemplate(ScoredForm):
17
17
  """
18
18
  A class for predicting the next sentence(s) based on a given sentence.
19
19
 
@@ -43,14 +43,15 @@ class PredictTemplate(ScoredTemplate):
43
43
  default_factory=int, description="the number of sentences to predict"
44
44
  )
45
45
  answer: str | list = Field(
46
- default_factory=str, description="the predicted sentence(s)"
46
+ default_factory=str, description="the predicted sentence(s) or desired output"
47
47
  )
48
48
  signature: str = "sentence -> answer"
49
49
 
50
50
  def __init__(
51
51
  self,
52
52
  sentence=None,
53
- num_sentences=None,
53
+ instruction=None,
54
+ num_sentences=1,
54
55
  confidence_score=False,
55
56
  reason=False,
56
57
  **kwargs,
@@ -67,9 +68,9 @@ class PredictTemplate(ScoredTemplate):
67
68
  """
68
69
  super().__init__(**kwargs)
69
70
 
70
- self.sentence = sentence
71
+ self.sentence = sentence or ""
71
72
  self.num_sentences = num_sentences
72
- self.task = f"predict the next {self.num_sentences} sentence(s)"
73
+ self.task = f"follow instruction to predict the next {self.num_sentences} sentence(s). Instruction: {instruction}."
73
74
 
74
75
  if reason:
75
76
  self.output_fields.append("reason")
@@ -82,6 +83,8 @@ async def predict(
82
83
  sentence=None,
83
84
  num_sentences=1,
84
85
  confidence_score=False,
86
+ instruction=None,
87
+ branch=None,
85
88
  reason=False,
86
89
  retries=2,
87
90
  delay=0.5,
@@ -128,7 +131,7 @@ async def predict(
128
131
  Returns:
129
132
  PredictTemplate: The predict template with the predicted sentence(s).
130
133
  """
131
- branch = Branch(
134
+ branch = branch or Branch(
132
135
  name=branch_name,
133
136
  system=system,
134
137
  messages=messages,
@@ -142,6 +145,7 @@ async def predict(
142
145
  )
143
146
 
144
147
  predict_template = PredictTemplate(
148
+ instruction=instruction,
145
149
  sentence=sentence,
146
150
  num_sentences=num_sentences,
147
151
  confidence_score=confidence_score,
@@ -150,7 +154,7 @@ async def predict(
150
154
 
151
155
  await func_call.rcall(
152
156
  branch.chat,
153
- prompt_template=predict_template,
157
+ form=predict_template,
154
158
  retries=retries,
155
159
  delay=delay,
156
160
  backoff_factor=backoff_factor,
@@ -1,13 +1,14 @@
1
- from lionagi.libs import func_call, convert, AsyncUtil
1
+ from lionagi.libs import func_call, AsyncUtil
2
2
 
3
3
  from lionagi.integrations.bridge.pydantic_.pydantic_bridge import Field
4
- from ..prompt.action_template import ActionedTemplate
5
- from ..branch import Branch
4
+ from lionagi.core.form.action_form import ActionForm
5
+ from lionagi.core.branch.branch import Branch
6
+ from lionagi.core.direct.utils import _process_tools
6
7
 
7
8
 
8
- class ReactTemplate(ActionedTemplate):
9
+ class ReactTemplate(ActionForm):
9
10
  template_name: str = "default_react"
10
- sentence: str | list | dict = Field(
11
+ sentence: str | list | dict | None = Field(
11
12
  default_factory=str,
12
13
  description="the given sentence(s) to reason and take actions on",
13
14
  )
@@ -29,7 +30,7 @@ class ReactTemplate(ActionedTemplate):
29
30
 
30
31
 
31
32
  async def _react(
32
- sentence,
33
+ sentence=None,
33
34
  *,
34
35
  instruction=None,
35
36
  branch=None,
@@ -58,6 +59,9 @@ async def _react(
58
59
 
59
60
  instruction = instruction or ""
60
61
 
62
+ if branch and tools:
63
+ _process_tools(tools, branch)
64
+
61
65
  branch = branch or Branch(
62
66
  name=branch_name,
63
67
  system=system,
@@ -79,7 +83,7 @@ async def _react(
79
83
 
80
84
  await func_call.rcall(
81
85
  branch.chat,
82
- prompt_template=_template,
86
+ form=_template,
83
87
  retries=retries,
84
88
  delay=delay,
85
89
  backoff_factor=backoff_factor,
@@ -109,7 +113,7 @@ async def _react(
109
113
 
110
114
 
111
115
  async def react(
112
- sentence,
116
+ sentence=None,
113
117
  *,
114
118
  instruction=None,
115
119
  num_instances=1,
@@ -12,11 +12,11 @@ ScoreTemplate class and a language model.
12
12
  from pydantic import Field
13
13
  import numpy as np
14
14
  from lionagi.libs import func_call, convert
15
- from ..prompt.scored_template import ScoredTemplate
16
- from ..branch import Branch
15
+ from lionagi.core.form.scored_form import ScoredForm
16
+ from lionagi.core.branch.branch import Branch
17
17
 
18
18
 
19
- class ScoreTemplate(ScoredTemplate):
19
+ class ScoreTemplate(ScoredForm):
20
20
  """
21
21
  A class for scoring a given context using a language model.
22
22
 
@@ -169,7 +169,7 @@ async def _score(
169
169
 
170
170
  await func_call.rcall(
171
171
  branch.chat,
172
- prompt_template=_template,
172
+ form=_template,
173
173
  retries=retries,
174
174
  delay=delay,
175
175
  backoff_factor=backoff_factor,
@@ -13,11 +13,11 @@ from enum import Enum
13
13
  from pydantic import Field
14
14
 
15
15
  from lionagi.libs import func_call, StringMatch
16
- from ..prompt.scored_template import ScoredTemplate
17
- from ..branch import Branch
16
+ from lionagi.core.form.scored_form import ScoredForm
17
+ from lionagi.core.branch.branch import Branch
18
18
 
19
19
 
20
- class SelectTemplate(ScoredTemplate):
20
+ class SelectTemplate(ScoredForm):
21
21
  """
22
22
  A class for selecting an item from given choices based on a given context.
23
23
 
@@ -153,7 +153,7 @@ async def select(
153
153
 
154
154
  await func_call.rcall(
155
155
  branch.chat,
156
- prompt_template=_template,
156
+ form=_template,
157
157
  retries=retries,
158
158
  delay=delay,
159
159
  backoff_factor=backoff_factor,
@@ -1,3 +1,6 @@
1
+ from typing import Callable
2
+ from lionagi.core.tool import func_to_tool, Tool
3
+
1
4
  # import contextlib
2
5
  # from lionagi.libs import ParseUtil, StringMatch, convert, func_call
3
6
 
@@ -85,3 +88,23 @@
85
88
  # return _out
86
89
 
87
90
  # return out_ if len(out_) > 1 else out_[0]
91
+
92
+
93
+ def _process_tools(tool_obj, branch):
94
+ if isinstance(tool_obj, Callable):
95
+ _process_tool(tool_obj, branch)
96
+ else:
97
+ for i in tool_obj:
98
+ _process_tool(i, branch)
99
+
100
+
101
+ def _process_tool(tool_obj, branch):
102
+ if (
103
+ isinstance(tool_obj, Tool)
104
+ and tool_obj.schema_["function"]["name"] not in branch.tool_manager.registry
105
+ ):
106
+ branch.register_tools(tool_obj)
107
+ if isinstance(tool_obj, Callable):
108
+ tool = func_to_tool(tool_obj)[0]
109
+ if tool.schema_["function"]["name"] not in branch.tool_manager.registry:
110
+ branch.register_tools(tool)
@@ -8,8 +8,8 @@ number of generations, number of outputs to return, number of scorers, score ran
8
8
 
9
9
  from lionagi.libs import func_call
10
10
  import numpy as np
11
- from .predict import predict
12
- from .score import score
11
+ from lionagi.core.direct.predict import predict
12
+ from lionagi.core.direct.score import score
13
13
 
14
14
 
15
15
  async def vote(
@@ -0,0 +1,50 @@
1
+ from collections import deque
2
+ from abc import ABC, abstractmethod
3
+ from typing import Any
4
+
5
+ from pydantic import Field
6
+
7
+ from lionagi.core.generic import BaseComponent
8
+ from lionagi.core.mail.schema import BaseMail
9
+
10
+
11
+ class BaseExecutor(BaseComponent, ABC):
12
+ pending_ins: dict = Field(
13
+ default_factory=dict, description="The pending incoming mails."
14
+ )
15
+ pending_outs: deque = Field(
16
+ default_factory=deque, description="The pending outgoing mails."
17
+ )
18
+ execute_stop: bool = Field(
19
+ False, description="A flag indicating whether to stop execution."
20
+ )
21
+ context: dict | str | None = Field(
22
+ None, description="The context buffer for the next instruction."
23
+ )
24
+ execution_responses: list = Field(
25
+ default_factory=list, description="The list of responses."
26
+ )
27
+ context_log: list = Field(default_factory=list, description="The context log.")
28
+ verbose: bool = Field(
29
+ True, description="A flag indicating whether to provide verbose output."
30
+ )
31
+ execute_stop: bool = Field(
32
+ False, description="A flag indicating whether to stop execution."
33
+ )
34
+
35
+ def send(self, recipient_id: str, category: str, package: Any) -> None:
36
+ """
37
+ Sends a mail to a recipient.
38
+
39
+ Args:
40
+ recipient_id (str): The ID of the recipient.
41
+ category (str): The category of the mail.
42
+ package (Any): The package to send in the mail.
43
+ """
44
+ mail = BaseMail(
45
+ sender_id=self.id_,
46
+ recipient_id=recipient_id,
47
+ category=category,
48
+ package=package,
49
+ )
50
+ self.pending_outs.append(mail)