lionagi 0.0.314__tar.gz → 0.0.316__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (130) hide show
  1. {lionagi-0.0.314/lionagi.egg-info → lionagi-0.0.316}/PKG-INFO +1 -1
  2. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/__init__.py +3 -0
  3. lionagi-0.0.316/lionagi/core/direct/__init__.py +10 -0
  4. lionagi-0.0.316/lionagi/core/direct/cot.py +88 -0
  5. lionagi-0.0.316/lionagi/core/direct/plan.py +162 -0
  6. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/direct/predict.py +9 -5
  7. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/direct/react.py +8 -4
  8. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/direct/select.py +1 -3
  9. lionagi-0.0.316/lionagi/core/direct/sentiment.py +1 -0
  10. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/direct/utils.py +20 -0
  11. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/integrations/bridge/llamaindex_/node_parser.py +6 -9
  12. lionagi-0.0.316/lionagi/integrations/chunker/chunk.py +175 -0
  13. lionagi-0.0.316/lionagi/integrations/loader/load.py +152 -0
  14. lionagi-0.0.316/lionagi/integrations/loader/load_util.py +266 -0
  15. lionagi-0.0.316/lionagi/tests/test_libs/__init__.py +0 -0
  16. lionagi-0.0.316/lionagi/tests/test_libs/test_async.py +0 -0
  17. lionagi-0.0.316/lionagi/version.py +1 -0
  18. {lionagi-0.0.314 → lionagi-0.0.316/lionagi.egg-info}/PKG-INFO +1 -1
  19. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi.egg-info/SOURCES.txt +8 -0
  20. lionagi-0.0.314/lionagi/core/direct/__init__.py +0 -7
  21. lionagi-0.0.314/lionagi/version.py +0 -1
  22. {lionagi-0.0.314 → lionagi-0.0.316}/LICENSE +0 -0
  23. {lionagi-0.0.314 → lionagi-0.0.316}/README.md +0 -0
  24. {lionagi-0.0.314 → lionagi-0.0.316}/README.rst +0 -0
  25. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/__init__.py +0 -0
  26. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/agent/__init__.py +0 -0
  27. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/agent/base_agent.py +0 -0
  28. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/branch/__init__.py +0 -0
  29. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/branch/base/__init__.py +0 -0
  30. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/branch/base_branch.py +0 -0
  31. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/branch/branch.py +0 -0
  32. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/branch/branch_flow_mixin.py +0 -0
  33. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/branch/executable_branch.py +0 -0
  34. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/branch/util.py +0 -0
  35. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/direct/score.py +0 -0
  36. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/direct/vote.py +0 -0
  37. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/flow/__init__.py +0 -0
  38. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/flow/base/__init__.py +0 -0
  39. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/flow/base/baseflow.py +0 -0
  40. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/flow/monoflow/ReAct.py +0 -0
  41. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/flow/monoflow/__init__.py +0 -0
  42. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/flow/monoflow/chat.py +0 -0
  43. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/flow/monoflow/chat_mixin.py +0 -0
  44. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/flow/monoflow/followup.py +0 -0
  45. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/flow/polyflow/__init__.py +0 -0
  46. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/flow/polyflow/chat.py +0 -0
  47. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/mail/__init__.py +0 -0
  48. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/mail/mail_manager.py +0 -0
  49. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/mail/schema.py +0 -0
  50. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/messages/__init__.py +0 -0
  51. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/messages/schema.py +0 -0
  52. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/prompt/__init__.py +0 -0
  53. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/prompt/action_template.py +0 -0
  54. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/prompt/field_validator.py +0 -0
  55. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/prompt/prompt_template.py +0 -0
  56. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/prompt/scored_template.py +0 -0
  57. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/schema/__init__.py +0 -0
  58. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/schema/action_node.py +0 -0
  59. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/schema/base_mixin.py +0 -0
  60. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/schema/base_node.py +0 -0
  61. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/schema/condition.py +0 -0
  62. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/schema/data_logger.py +0 -0
  63. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/schema/data_node.py +0 -0
  64. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/schema/prompt_template.py +0 -0
  65. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/schema/structure.py +0 -0
  66. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/session/__init__.py +0 -0
  67. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/session/session.py +0 -0
  68. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/tool/__init__.py +0 -0
  69. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/tool/manual.py +0 -0
  70. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/core/tool/tool_manager.py +0 -0
  71. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/integrations/__init__.py +0 -0
  72. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/integrations/bridge/__init__.py +0 -0
  73. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/integrations/bridge/langchain_/__init__.py +0 -0
  74. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/integrations/bridge/langchain_/documents.py +0 -0
  75. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/integrations/bridge/langchain_/langchain_bridge.py +0 -0
  76. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/integrations/bridge/llamaindex_/__init__.py +0 -0
  77. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/integrations/bridge/llamaindex_/index.py +0 -0
  78. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/integrations/bridge/llamaindex_/llama_index_bridge.py +0 -0
  79. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/integrations/bridge/llamaindex_/reader.py +0 -0
  80. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/integrations/bridge/llamaindex_/textnode.py +0 -0
  81. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/integrations/bridge/pydantic_/__init__.py +0 -0
  82. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/integrations/bridge/pydantic_/pydantic_bridge.py +0 -0
  83. {lionagi-0.0.314/lionagi/tests → lionagi-0.0.316/lionagi/integrations/chunker}/__init__.py +0 -0
  84. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/integrations/config/__init__.py +0 -0
  85. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/integrations/config/mlx_configs.py +0 -0
  86. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/integrations/config/oai_configs.py +0 -0
  87. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/integrations/config/ollama_configs.py +0 -0
  88. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/integrations/config/openrouter_configs.py +0 -0
  89. {lionagi-0.0.314/lionagi/tests/test_core → lionagi-0.0.316/lionagi/integrations/loader}/__init__.py +0 -0
  90. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/integrations/provider/__init__.py +0 -0
  91. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/integrations/provider/litellm.py +0 -0
  92. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/integrations/provider/mistralai.py +0 -0
  93. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/integrations/provider/mlx_service.py +0 -0
  94. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/integrations/provider/oai.py +0 -0
  95. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/integrations/provider/ollama.py +0 -0
  96. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/integrations/provider/openrouter.py +0 -0
  97. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/integrations/provider/services.py +0 -0
  98. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/integrations/provider/transformers.py +0 -0
  99. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/libs/__init__.py +0 -0
  100. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/libs/ln_api.py +0 -0
  101. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/libs/ln_async.py +0 -0
  102. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/libs/ln_convert.py +0 -0
  103. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/libs/ln_dataframe.py +0 -0
  104. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/libs/ln_func_call.py +0 -0
  105. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/libs/ln_nested.py +0 -0
  106. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/libs/ln_parse.py +0 -0
  107. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/libs/sys_util.py +0 -0
  108. {lionagi-0.0.314/lionagi/tests/test_integrations → lionagi-0.0.316/lionagi/tests}/__init__.py +0 -0
  109. {lionagi-0.0.314/lionagi/tests/test_libs → lionagi-0.0.316/lionagi/tests/test_core}/__init__.py +0 -0
  110. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/tests/test_core/test_base_branch.py +0 -0
  111. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/tests/test_core/test_branch.py +0 -0
  112. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/tests/test_core/test_chat_flow.py +0 -0
  113. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/tests/test_core/test_mail_manager.py +0 -0
  114. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/tests/test_core/test_prompts.py +0 -0
  115. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/tests/test_core/test_session.py +0 -0
  116. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/tests/test_core/test_session_base_util.py +0 -0
  117. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/tests/test_core/test_tool_manager.py +0 -0
  118. /lionagi-0.0.314/lionagi/tests/test_libs/test_async.py → /lionagi-0.0.316/lionagi/tests/test_integrations/__init__.py +0 -0
  119. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/tests/test_libs/test_api.py +0 -0
  120. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/tests/test_libs/test_convert.py +0 -0
  121. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/tests/test_libs/test_func_call.py +0 -0
  122. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/tests/test_libs/test_nested.py +0 -0
  123. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/tests/test_libs/test_parse.py +0 -0
  124. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi/tests/test_libs/test_sys_util.py +0 -0
  125. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi.egg-info/dependency_links.txt +0 -0
  126. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi.egg-info/requires.txt +0 -0
  127. {lionagi-0.0.314 → lionagi-0.0.316}/lionagi.egg-info/top_level.txt +0 -0
  128. {lionagi-0.0.314 → lionagi-0.0.316}/pyproject.toml +0 -0
  129. {lionagi-0.0.314 → lionagi-0.0.316}/setup.cfg +0 -0
  130. {lionagi-0.0.314 → lionagi-0.0.316}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lionagi
3
- Version: 0.0.314
3
+ Version: 0.0.316
4
4
  Summary: Towards automated general intelligence.
5
5
  Author: HaiyangLi
6
6
  Author-email: Haiyang Li <ocean@lionagi.ai>
@@ -8,6 +8,9 @@ from dotenv import load_dotenv
8
8
 
9
9
  from .core import direct, Branch, Session, func_to_tool
10
10
  from .integrations.provider.services import Services
11
+ from .integrations.chunker.chunk import chunk
12
+ from .integrations.loader.load import load
13
+
11
14
 
12
15
  logger = logging.getLogger(__name__)
13
16
  logger.setLevel(logging.INFO)
@@ -0,0 +1,10 @@
1
+ from .predict import predict
2
+ from .select import select
3
+ from .score import score
4
+ from .react import react
5
+ from .vote import vote
6
+ from .plan import plan
7
+ from .cot import chain_of_thoughts, chain_of_react
8
+
9
+
10
+ __all__ = ["predict", "select", "score", "vote", "react", "plan", "chain_of_thoughts", "chain_of_react"]
@@ -0,0 +1,88 @@
1
+ from typing import Callable
2
+ from lionagi.libs import convert
3
+ from ..tool import func_to_tool
4
+ from ..schema import Tool
5
+ from .predict import predict
6
+ from .plan import plan
7
+ from .react import react
8
+
9
+ from .utils import _process_tools
10
+
11
+
12
+ async def chain_of_thoughts(
13
+ sentence=None,
14
+ branch=None,
15
+ instruction=None,
16
+ reason=False,
17
+ confidence_score=False,
18
+ num_steps=3,
19
+ directive_kwargs={},
20
+ return_branch=False,
21
+ **kwargs
22
+ ):
23
+
24
+ out_, outs, answer, reasons, confidence_score = "", [], "", [], 0
25
+ if branch is not None:
26
+ out_ = await plan(sentence, branch=branch, instruction=instruction, num_steps=num_steps, **kwargs)
27
+ else:
28
+ out_, branch = await plan(sentence, instruction=instruction, branch=branch, num_steps=num_steps, return_branch=True, **kwargs)
29
+
30
+ for i in range(len(out_.plan)):
31
+ _out = await predict(branch=branch, instruction=out_.plan[f"step_{i+1}"], reason=reason, confidence_score=confidence_score, **directive_kwargs)
32
+ answer += _out.answer
33
+ if reason:
34
+ reasons.append(_out.reason)
35
+ if confidence_score:
36
+ confidence_score += _out.confidence_score
37
+ outs.append(_out)
38
+
39
+ setattr(out_, "chain_output", outs)
40
+ setattr(out_, "chain_answer", answer)
41
+
42
+ if reason:
43
+ setattr(out_, "chain_reasons", reasons)
44
+ if confidence_score:
45
+ setattr(out_, "chain_confidence_score", confidence_score/len(outs))
46
+
47
+ if return_branch:
48
+ return out_, branch
49
+
50
+ return out_
51
+
52
+
53
+ async def chain_of_react(
54
+ sentence=None,
55
+ branch=None,
56
+ instruction=None,
57
+ num_steps=3,
58
+ tools=None,
59
+ directive_system=None,
60
+ directive_kwargs={},
61
+ return_branch=False,
62
+ **kwargs
63
+ ):
64
+ out_, outs, reasons, actions, action_responses = "", [], [], [], []
65
+ if branch is not None:
66
+ out_ = await plan(sentence, branch=branch, instruction=instruction, num_steps=num_steps, **kwargs)
67
+ else:
68
+ out_, branch = await plan(sentence, instruction=instruction, branch=branch, num_steps=num_steps, return_branch=True, **kwargs)
69
+
70
+ _process_tools(tools, branch)
71
+
72
+ for i in range(len(out_.plan)):
73
+ _out = await react(branch=branch, system=directive_system, instruction=out_.plan[f"step_{i+1}"], **directive_kwargs)
74
+ outs.append(_out)
75
+ reasons.append(_out.reason)
76
+ actions.append(_out.actions)
77
+ if _out.action_needed:
78
+ action_responses.append(_out.action_response)
79
+
80
+ setattr(out_, "chain_output", convert.to_list(outs))
81
+ setattr(out_, "chain_reason", convert.to_list(reasons))
82
+ setattr(out_, "chain_actions", convert.to_list(actions))
83
+ setattr(out_, "chain_action_response", convert.to_list(action_responses))
84
+
85
+ if return_branch:
86
+ return out_, branch
87
+
88
+ return out_
@@ -0,0 +1,162 @@
1
+ # plan.py
2
+
3
+ from lionagi.libs import func_call, ParseUtil
4
+ from lionagi.integrations.bridge.pydantic_.pydantic_bridge import Field
5
+ from ..prompt.scored_template import ScoredTemplate
6
+ from ..branch import Branch
7
+
8
+
9
+ class PlanTemplate(ScoredTemplate):
10
+ template_name: str = "default_plan"
11
+ sentence: str | list | dict = Field(
12
+ default_factory=str,
13
+ description="the given sentence(s) or context to generate a plan for",
14
+ )
15
+ plan: dict | str= Field(
16
+ default_factory=dict, description="the generated step by step plan, return as a dictionary following {step_n: {plan: ..., reason: ...}} format")
17
+ signature: str = "sentence -> plan"
18
+
19
+ def __init__(
20
+ self,
21
+ sentence=None,
22
+ instruction=None,
23
+ confidence_score=False,
24
+ reason=False,
25
+ num_step=3,
26
+ **kwargs,
27
+ ):
28
+ super().__init__(**kwargs)
29
+
30
+ self.sentence = sentence
31
+ self.task = f"Generate a {num_step}_step plan based on the given context. Instruction: {instruction}."
32
+
33
+ if reason:
34
+ self.output_fields.append("reason")
35
+
36
+ if confidence_score:
37
+ self.output_fields.append("confidence_score")
38
+
39
+
40
+ async def _plan(
41
+ sentence,
42
+ *,
43
+ instruction=None,
44
+ branch=None,
45
+ confidence_score=False,
46
+ reason=False,
47
+ retries=2,
48
+ delay=0.5,
49
+ backoff_factor=2,
50
+ default_value=None,
51
+ timeout=None,
52
+ branch_name=None,
53
+ system=None,
54
+ messages=None,
55
+ service=None,
56
+ sender=None,
57
+ llmconfig=None,
58
+ tools=None,
59
+ datalogger=None,
60
+ persist_path=None,
61
+ tool_manager=None,
62
+ return_branch=False,
63
+ **kwargs,
64
+ ):
65
+ if "temperature" not in kwargs:
66
+ kwargs["temperature"] = 0.1
67
+
68
+ instruction = instruction or ""
69
+
70
+ branch = branch or Branch(
71
+ name=branch_name,
72
+ system=system,
73
+ messages=messages,
74
+ service=service,
75
+ sender=sender,
76
+ llmconfig=llmconfig,
77
+ tools=tools,
78
+ datalogger=datalogger,
79
+ persist_path=persist_path,
80
+ tool_manager=tool_manager,
81
+ )
82
+
83
+ _template = PlanTemplate(
84
+ sentence=sentence,
85
+ instruction=instruction,
86
+ confidence_score=confidence_score,
87
+ reason=reason,
88
+ )
89
+
90
+ await func_call.rcall(
91
+ branch.chat,
92
+ prompt_template=_template,
93
+ retries=retries,
94
+ delay=delay,
95
+ backoff_factor=backoff_factor,
96
+ default=default_value,
97
+ timeout=timeout,
98
+ **kwargs,
99
+ )
100
+
101
+ _template.plan = ParseUtil.fuzzy_parse_json(_template.plan)
102
+
103
+ return (_template, branch) if return_branch else _template
104
+
105
+
106
+ async def plan(
107
+ sentence,
108
+ *,
109
+ instruction=None,
110
+ num_instances=1,
111
+ branch=None,
112
+ confidence_score=False,
113
+ reason=False,
114
+ retries=2,
115
+ delay=0.5,
116
+ backoff_factor=2,
117
+ default_value=None,
118
+ timeout=None,
119
+ branch_name=None,
120
+ system=None,
121
+ messages=None,
122
+ service=None,
123
+ sender=None,
124
+ llmconfig=None,
125
+ tools=None,
126
+ datalogger=None,
127
+ persist_path=None,
128
+ tool_manager=None,
129
+ return_branch=False,
130
+ **kwargs,
131
+ ):
132
+ async def _inner(i=0):
133
+ return await _plan(
134
+ sentence=sentence,
135
+ instruction=instruction,
136
+ branch=branch,
137
+ confidence_score=confidence_score,
138
+ reason=reason,
139
+ retries=retries,
140
+ delay=delay,
141
+ backoff_factor=backoff_factor,
142
+ default_value=default_value,
143
+ timeout=timeout,
144
+ branch_name=branch_name,
145
+ system=system,
146
+ messages=messages,
147
+ service=service,
148
+ sender=sender,
149
+ llmconfig=llmconfig,
150
+ tools=tools,
151
+ datalogger=datalogger,
152
+ persist_path=persist_path,
153
+ tool_manager=tool_manager,
154
+ return_branch=return_branch,
155
+ **kwargs,
156
+ )
157
+
158
+ if num_instances == 1:
159
+ return await _inner()
160
+
161
+ elif num_instances > 1:
162
+ return await func_call.alcall(range(num_instances), _inner)
@@ -43,14 +43,15 @@ class PredictTemplate(ScoredTemplate):
43
43
  default_factory=int, description="the number of sentences to predict"
44
44
  )
45
45
  answer: str | list = Field(
46
- default_factory=str, description="the predicted sentence(s)"
46
+ default_factory=str, description="the predicted sentence(s) or desired output"
47
47
  )
48
48
  signature: str = "sentence -> answer"
49
49
 
50
50
  def __init__(
51
51
  self,
52
52
  sentence=None,
53
- num_sentences=None,
53
+ instruction=None,
54
+ num_sentences=1,
54
55
  confidence_score=False,
55
56
  reason=False,
56
57
  **kwargs,
@@ -67,9 +68,9 @@ class PredictTemplate(ScoredTemplate):
67
68
  """
68
69
  super().__init__(**kwargs)
69
70
 
70
- self.sentence = sentence
71
+ self.sentence = sentence or ''
71
72
  self.num_sentences = num_sentences
72
- self.task = f"predict the next {self.num_sentences} sentence(s)"
73
+ self.task = f"follow instruction to predict the next {self.num_sentences} sentence(s). Instruction: {instruction}."
73
74
 
74
75
  if reason:
75
76
  self.output_fields.append("reason")
@@ -82,6 +83,8 @@ async def predict(
82
83
  sentence=None,
83
84
  num_sentences=1,
84
85
  confidence_score=False,
86
+ instruction=None,
87
+ branch=None,
85
88
  reason=False,
86
89
  retries=2,
87
90
  delay=0.5,
@@ -128,7 +131,7 @@ async def predict(
128
131
  Returns:
129
132
  PredictTemplate: The predict template with the predicted sentence(s).
130
133
  """
131
- branch = Branch(
134
+ branch = branch or Branch(
132
135
  name=branch_name,
133
136
  system=system,
134
137
  messages=messages,
@@ -142,6 +145,7 @@ async def predict(
142
145
  )
143
146
 
144
147
  predict_template = PredictTemplate(
148
+ instruction=instruction,
145
149
  sentence=sentence,
146
150
  num_sentences=num_sentences,
147
151
  confidence_score=confidence_score,
@@ -3,11 +3,12 @@ from lionagi.libs import func_call, convert, AsyncUtil
3
3
  from lionagi.integrations.bridge.pydantic_.pydantic_bridge import Field
4
4
  from ..prompt.action_template import ActionedTemplate
5
5
  from ..branch import Branch
6
+ from .utils import _process_tools
6
7
 
7
8
 
8
9
  class ReactTemplate(ActionedTemplate):
9
10
  template_name: str = "default_react"
10
- sentence: str | list | dict = Field(
11
+ sentence: str | list | dict | None= Field(
11
12
  default_factory=str,
12
13
  description="the given sentence(s) to reason and take actions on",
13
14
  )
@@ -29,7 +30,7 @@ class ReactTemplate(ActionedTemplate):
29
30
 
30
31
 
31
32
  async def _react(
32
- sentence,
33
+ sentence=None,
33
34
  *,
34
35
  instruction=None,
35
36
  branch=None,
@@ -58,6 +59,9 @@ async def _react(
58
59
 
59
60
  instruction = instruction or ""
60
61
 
62
+ if branch and tools:
63
+ _process_tools(tools, branch)
64
+
61
65
  branch = branch or Branch(
62
66
  name=branch_name,
63
67
  system=system,
@@ -109,7 +113,7 @@ async def _react(
109
113
 
110
114
 
111
115
  async def react(
112
- sentence,
116
+ sentence=None,
113
117
  *,
114
118
  instruction=None,
115
119
  num_instances=1,
@@ -159,7 +163,7 @@ async def react(
159
163
  return_branch=return_branch,
160
164
  **kwargs,
161
165
  )
162
-
166
+
163
167
  if num_instances == 1:
164
168
  return await _inner()
165
169
 
@@ -39,9 +39,7 @@ class SelectTemplate(ScoredTemplate):
39
39
  answer: Enum | str = Field(
40
40
  default_factory=str, description="selection from given choices"
41
41
  )
42
- choices: list = Field(
43
- default_factory=list, description="the given choices"
44
- )
42
+ choices: list = Field(default_factory=list, description="the given choices")
45
43
 
46
44
  signature: str = "sentence -> answer"
47
45
 
@@ -0,0 +1 @@
1
+ # TODO: sentiment analysis
@@ -1,3 +1,6 @@
1
+ from typing import Callable
2
+ from ..tool import func_to_tool
3
+ from ..schema import Tool
1
4
  # import contextlib
2
5
  # from lionagi.libs import ParseUtil, StringMatch, convert, func_call
3
6
 
@@ -85,3 +88,20 @@
85
88
  # return _out
86
89
 
87
90
  # return out_ if len(out_) > 1 else out_[0]
91
+
92
+
93
+ def _process_tools(tool_obj, branch):
94
+ if isinstance(tool_obj, Callable):
95
+ _process_tool(tool_obj, branch)
96
+ else:
97
+ for i in tool_obj:
98
+ _process_tool(i, branch)
99
+
100
+
101
+ def _process_tool(tool_obj, branch):
102
+ if isinstance(tool_obj, Tool) and tool_obj.schema_["function"]["name"] not in branch.tool_manager.registry:
103
+ branch.register_tools(tool_obj)
104
+ if isinstance(tool_obj, Callable):
105
+ tool = func_to_tool(tool_obj)[0]
106
+ if tool.schema_["function"]["name"] not in branch.tool_manager.registry:
107
+ branch.register_tools(tool)
@@ -29,19 +29,18 @@ def get_llama_index_node_parser(node_parser: Any):
29
29
  import llama_index.core.node_parser
30
30
 
31
31
  if not isinstance(node_parser, str) and not issubclass(node_parser, NodeParser):
32
- raise TypeError(f"node_parser must be a string or NodeParser.")
32
+ raise TypeError("node_parser must be a string or NodeParser.")
33
33
 
34
34
  if isinstance(node_parser, str):
35
35
  if node_parser == "CodeSplitter":
36
36
  SysUtil.check_import("tree_sitter_languages")
37
37
 
38
38
  try:
39
- parser = getattr(llama_index.core.node_parser, node_parser)
40
- return parser
39
+ return getattr(llama_index.core.node_parser, node_parser)
41
40
  except Exception as e:
42
41
  raise AttributeError(
43
42
  f"llama_index_core has no such attribute:" f" {node_parser}, Error: {e}"
44
- )
43
+ ) from e
45
44
 
46
45
  elif isinstance(node_parser, NodeParser):
47
46
  return node_parser
@@ -75,10 +74,8 @@ def llama_index_parse_node(
75
74
  parser = get_llama_index_node_parser(node_parser)
76
75
  try:
77
76
  parser = parser(*parser_args, **parser_kwargs)
78
- except:
77
+ except Exception:
79
78
  parser = parser.from_defaults(*parser_args, **parser_kwargs)
80
- nodes = parser.get_nodes_from_documents(documents)
81
- return nodes
82
-
79
+ return parser.get_nodes_from_documents(documents)
83
80
  except Exception as e:
84
- raise ValueError(f"Failed to parse. Error: {e}")
81
+ raise ValueError(f"Failed to parse. Error: {e}") from e
@@ -0,0 +1,175 @@
1
+ from typing import Union, Callable
2
+
3
+ from lionagi.libs import func_call
4
+ from lionagi.core.schema import DataNode
5
+ from ..bridge.langchain_.langchain_bridge import LangchainBridge
6
+ from ..bridge.llamaindex_.llama_index_bridge import LlamaIndexBridge
7
+
8
+
9
+ from ..loader.load_util import ChunkerType, file_to_chunks, _datanode_parser
10
+
11
+
12
+ def datanodes_convert(documents, chunker_type):
13
+
14
+ for i in range(len(documents)):
15
+ if type(documents[i]) == DataNode:
16
+ if chunker_type == ChunkerType.LLAMAINDEX:
17
+ documents[i] = documents[i].to_llama_index()
18
+ elif chunker_type == ChunkerType.LANGCHAIN:
19
+ documents[i] = documents[i].to_langchain()
20
+ return documents
21
+
22
+
23
+ def text_chunker(documents, args, kwargs):
24
+
25
+ def chunk_node(node):
26
+ chunks = file_to_chunks(node.to_dict(), *args, **kwargs)
27
+ func_call.lcall(chunks, lambda chunk: chunk.pop("node_id"))
28
+ return [DataNode.from_obj({**chunk}) for chunk in chunks]
29
+
30
+ return [chunk_node(doc) for doc in documents]
31
+
32
+
33
+ def chunk(
34
+ documents,
35
+ chunker,
36
+ chunker_type=ChunkerType.PLAIN,
37
+ chunker_args=None,
38
+ chunker_kwargs=None,
39
+ chunking_kwargs=None,
40
+ documents_convert_func=None,
41
+ to_datanode: bool | Callable = True,
42
+ ):
43
+
44
+ if chunker_args is None:
45
+ chunker_args = []
46
+ if chunker_kwargs is None:
47
+ chunker_kwargs = {}
48
+ if chunking_kwargs is None:
49
+ chunking_kwargs = {}
50
+
51
+ if chunker_type == ChunkerType.PLAIN:
52
+ return chunk_funcs[ChunkerType.PLAIN](
53
+ documents, chunker, chunker_args, chunker_kwargs
54
+ )
55
+
56
+ elif chunker_type == ChunkerType.LANGCHAIN:
57
+ return chunk_funcs[ChunkerType.LANGCHAIN](
58
+ documents,
59
+ documents_convert_func,
60
+ chunker,
61
+ chunker_args,
62
+ chunker_kwargs,
63
+ to_datanode,
64
+ )
65
+
66
+ elif chunker_type == ChunkerType.LLAMAINDEX:
67
+ return chunk_funcs[ChunkerType.LLAMAINDEX](
68
+ documents,
69
+ documents_convert_func,
70
+ chunker,
71
+ chunker_args,
72
+ chunker_kwargs,
73
+ to_datanode,
74
+ )
75
+
76
+ elif chunker_type == ChunkerType.SELFDEFINED:
77
+ return chunk_funcs[ChunkerType.SELFDEFINED](
78
+ documents,
79
+ chunker,
80
+ chunker_args,
81
+ chunker_kwargs,
82
+ chunking_kwargs,
83
+ to_datanode,
84
+ )
85
+
86
+ else:
87
+ raise ValueError(
88
+ f"{chunker_type} is not supported. Please choose from {list(ChunkerType)}"
89
+ )
90
+
91
+
92
+ def _self_defined_chunker(
93
+ documents,
94
+ chunker,
95
+ chunker_args,
96
+ chunker_kwargs,
97
+ chunking_kwargs,
98
+ to_datanode: bool | Callable,
99
+ ):
100
+ try:
101
+ splitter = chunker(*chunker_args, **chunker_kwargs)
102
+ nodes = splitter.split(documents, **chunking_kwargs)
103
+ except Exception as e:
104
+ raise ValueError(
105
+ f"Self defined chunker {chunker} is not valid. Error: {e}"
106
+ ) from e
107
+
108
+ if isinstance(to_datanode, bool) and to_datanode is True:
109
+ raise ValueError("Please define a valid parser to DataNode.")
110
+ elif isinstance(to_datanode, Callable):
111
+ nodes = _datanode_parser(nodes, to_datanode)
112
+ return nodes
113
+
114
+
115
+ def _llama_index_chunker(
116
+ documents,
117
+ documents_convert_func,
118
+ chunker,
119
+ chunker_args,
120
+ chunker_kwargs,
121
+ to_datanode: bool | Callable,
122
+ ):
123
+ if documents_convert_func:
124
+ documents = documents_convert_func(documents, "llama_index")
125
+ nodes = LlamaIndexBridge.llama_index_parse_node(
126
+ documents, chunker, chunker_args, chunker_kwargs
127
+ )
128
+
129
+ if isinstance(to_datanode, bool) and to_datanode is True:
130
+ nodes = [DataNode.from_llama_index(i) for i in nodes]
131
+ elif isinstance(to_datanode, Callable):
132
+ nodes = _datanode_parser(nodes, to_datanode)
133
+ return nodes
134
+
135
+
136
+ def _langchain_chunker(
137
+ documents,
138
+ documents_convert_func,
139
+ chunker,
140
+ chunker_args,
141
+ chunker_kwargs,
142
+ to_datanode: bool | Callable,
143
+ ):
144
+ if documents_convert_func:
145
+ documents = documents_convert_func(documents, "langchain")
146
+ nodes = LangchainBridge.langchain_text_splitter(
147
+ documents, chunker, chunker_args, chunker_kwargs
148
+ )
149
+ if isinstance(to_datanode, bool) and to_datanode is True:
150
+ if isinstance(documents, str):
151
+ nodes = [DataNode(content=i) for i in nodes]
152
+ else:
153
+ nodes = [DataNode.from_langchain(i) for i in nodes]
154
+ elif isinstance(to_datanode, Callable):
155
+ nodes = _datanode_parser(nodes, to_datanode)
156
+ return nodes
157
+
158
+
159
+ def _plain_chunker(documents, chunker, chunker_args, chunker_kwargs):
160
+ try:
161
+ if chunker == "text_chunker":
162
+ chunker = text_chunker
163
+ return chunker(documents, chunker_args, chunker_kwargs)
164
+ except Exception as e:
165
+ raise ValueError(
166
+ f"Reader {chunker} is currently not supported. Error: {e}"
167
+ ) from e
168
+
169
+
170
+ chunk_funcs = {
171
+ ChunkerType.PLAIN: _plain_chunker,
172
+ ChunkerType.LANGCHAIN: _langchain_chunker,
173
+ ChunkerType.LLAMAINDEX: _llama_index_chunker,
174
+ ChunkerType.SELFDEFINED: _self_defined_chunker,
175
+ }