lionagi 0.0.208__py3-none-any.whl → 0.0.210__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (104) hide show
  1. lionagi/__init__.py +4 -6
  2. lionagi/api_service/base_endpoint.py +65 -0
  3. lionagi/api_service/base_rate_limiter.py +121 -0
  4. lionagi/api_service/base_service.py +146 -0
  5. lionagi/api_service/chat_completion.py +6 -0
  6. lionagi/api_service/embeddings.py +6 -0
  7. lionagi/api_service/payload_package.py +47 -0
  8. lionagi/api_service/status_tracker.py +29 -0
  9. lionagi/core/__init__.py +5 -9
  10. lionagi/core/branch.py +1191 -0
  11. lionagi/core/flow.py +423 -0
  12. lionagi/core/{instruction_set/instruction_set.py → instruction_set.py} +3 -3
  13. lionagi/core/session.py +872 -0
  14. lionagi/schema/__init__.py +5 -8
  15. lionagi/schema/base_schema.py +821 -0
  16. lionagi/{_services → services}/base_service.py +4 -4
  17. lionagi/{_services → services}/oai.py +4 -4
  18. lionagi/structures/graph.py +1 -1
  19. lionagi/structures/relationship.py +1 -1
  20. lionagi/structures/structure.py +1 -1
  21. lionagi/tools/tool_manager.py +0 -163
  22. lionagi/tools/tool_util.py +2 -1
  23. lionagi/utils/__init__.py +7 -14
  24. lionagi/utils/api_util.py +63 -2
  25. lionagi/utils/core_utils.py +338 -0
  26. lionagi/utils/sys_util.py +3 -3
  27. lionagi/version.py +1 -1
  28. {lionagi-0.0.208.dist-info → lionagi-0.0.210.dist-info}/METADATA +28 -29
  29. lionagi-0.0.210.dist-info/RECORD +56 -0
  30. lionagi/_services/anthropic.py +0 -79
  31. lionagi/_services/anyscale.py +0 -0
  32. lionagi/_services/azure.py +0 -1
  33. lionagi/_services/bedrock.py +0 -0
  34. lionagi/_services/everlyai.py +0 -0
  35. lionagi/_services/gemini.py +0 -0
  36. lionagi/_services/gpt4all.py +0 -0
  37. lionagi/_services/huggingface.py +0 -0
  38. lionagi/_services/litellm.py +0 -33
  39. lionagi/_services/localai.py +0 -0
  40. lionagi/_services/openllm.py +0 -0
  41. lionagi/_services/openrouter.py +0 -44
  42. lionagi/_services/perplexity.py +0 -0
  43. lionagi/_services/predibase.py +0 -0
  44. lionagi/_services/rungpt.py +0 -0
  45. lionagi/_services/vllm.py +0 -0
  46. lionagi/_services/xinference.py +0 -0
  47. lionagi/agents/planner.py +0 -1
  48. lionagi/agents/prompter.py +0 -1
  49. lionagi/agents/scorer.py +0 -1
  50. lionagi/agents/summarizer.py +0 -1
  51. lionagi/agents/validator.py +0 -1
  52. lionagi/bridge/__init__.py +0 -22
  53. lionagi/bridge/langchain.py +0 -195
  54. lionagi/bridge/llama_index.py +0 -266
  55. lionagi/core/branch/__init__.py +0 -0
  56. lionagi/core/branch/branch.py +0 -841
  57. lionagi/core/branch/cluster.py +0 -1
  58. lionagi/core/branch/conversation.py +0 -787
  59. lionagi/core/core_util.py +0 -0
  60. lionagi/core/flow/__init__.py +0 -0
  61. lionagi/core/flow/flow.py +0 -19
  62. lionagi/core/flow/flow_util.py +0 -62
  63. lionagi/core/instruction_set/__init__.py +0 -0
  64. lionagi/core/messages/__init__.py +0 -0
  65. lionagi/core/sessions/__init__.py +0 -0
  66. lionagi/core/sessions/session.py +0 -504
  67. lionagi/datastores/__init__.py +0 -1
  68. lionagi/datastores/chroma.py +0 -1
  69. lionagi/datastores/deeplake.py +0 -1
  70. lionagi/datastores/elasticsearch.py +0 -1
  71. lionagi/datastores/lantern.py +0 -1
  72. lionagi/datastores/pinecone.py +0 -1
  73. lionagi/datastores/postgres.py +0 -1
  74. lionagi/datastores/qdrant.py +0 -1
  75. lionagi/loaders/__init__.py +0 -18
  76. lionagi/loaders/chunker.py +0 -166
  77. lionagi/loaders/load_util.py +0 -240
  78. lionagi/loaders/reader.py +0 -122
  79. lionagi/models/__init__.py +0 -0
  80. lionagi/models/base_model.py +0 -0
  81. lionagi/models/imodel.py +0 -53
  82. lionagi/schema/async_queue.py +0 -158
  83. lionagi/schema/base_condition.py +0 -1
  84. lionagi/schema/base_node.py +0 -422
  85. lionagi/schema/base_tool.py +0 -44
  86. lionagi/schema/data_logger.py +0 -126
  87. lionagi/schema/data_node.py +0 -88
  88. lionagi/schema/status_tracker.py +0 -37
  89. lionagi/tests/test_utils/test_encrypt_util.py +0 -323
  90. lionagi/utils/encrypt_util.py +0 -283
  91. lionagi/utils/url_util.py +0 -55
  92. lionagi-0.0.208.dist-info/RECORD +0 -106
  93. lionagi/{agents → api_service}/__init__.py +0 -0
  94. lionagi/core/{branch/branch_manager.py → branch_manager.py} +0 -0
  95. lionagi/core/{messages/messages.py → messages.py} +3 -3
  96. /lionagi/{_services → services}/__init__.py +0 -0
  97. /lionagi/{_services → services}/mistralai.py +0 -0
  98. /lionagi/{_services → services}/mlx_service.py +0 -0
  99. /lionagi/{_services → services}/ollama.py +0 -0
  100. /lionagi/{_services → services}/services.py +0 -0
  101. /lionagi/{_services → services}/transformers.py +0 -0
  102. {lionagi-0.0.208.dist-info → lionagi-0.0.210.dist-info}/LICENSE +0 -0
  103. {lionagi-0.0.208.dist-info → lionagi-0.0.210.dist-info}/WHEEL +0 -0
  104. {lionagi-0.0.208.dist-info → lionagi-0.0.210.dist-info}/top_level.txt +0 -0
lionagi/core/flow.py ADDED
@@ -0,0 +1,423 @@
1
+ from typing import Any, Dict, List, Optional, Union
2
+ from lionagi.utils import as_dict, lcall, to_list, alcall, get_flattened_keys
3
+ from lionagi.schema import Tool
4
+ from lionagi.core.messages import Instruction, System
5
+
6
+
7
+ class ChatFlow:
8
+
9
+ @staticmethod
10
+ async def call_chatcompletion(branch, sender=None, with_sender=False, tokenizer_kwargs={}, **kwargs):
11
+ """
12
+ Asynchronously calls the chat completion service with the current message queue.
13
+
14
+ Args:
15
+ branch: The Branch instance calling the service.
16
+ sender (Optional[str]): The name of the sender to include in chat completions.
17
+ with_sender (bool): If True, includes sender information in messages.
18
+ tokenizer_kwargs (dict): Keyword arguments for the tokenizer used in chat completion.
19
+ **kwargs: Arbitrary keyword arguments for the chat completion service.
20
+
21
+ Examples:
22
+ >>> await ChatFlow.call_chatcompletion(branch, sender="user")
23
+ """
24
+ messages = branch.chat_messages if not with_sender else branch.chat_messages_with_sender
25
+ payload, completion = await branch.service.serve_chat(
26
+ messages=messages, tokenizer_kwargs=tokenizer_kwargs, **kwargs
27
+ )
28
+ if "choices" in completion:
29
+ add_msg_config = {"response":completion['choices'][0]}
30
+ if sender is not None:
31
+ add_msg_config["sender"] = sender
32
+
33
+ branch.logger.add_entry({"input": payload, "output": completion})
34
+ branch.add_message(**add_msg_config)
35
+ branch.status_tracker.num_tasks_succeeded += 1
36
+ else:
37
+ branch.status_tracker.num_tasks_failed += 1
38
+
39
+ @staticmethod
40
+ async def chat(
41
+ branch,
42
+ instruction: Union[Instruction, str],
43
+ context: Optional[Any] = None,
44
+ sender: Optional[str] = None,
45
+ system: Optional[Union[System, str, Dict[str, Any]]] = None,
46
+ tools: Union[bool, Tool, List[Tool], str, List[str]] = False,
47
+ out: bool = True,
48
+ invoke: bool = True,
49
+ **kwargs) -> Any:
50
+
51
+ """
52
+ a chat conversation with LLM, processing instructions and system messages, optionally invoking tools.
53
+
54
+ Args:
55
+ branch: The Branch instance to perform chat operations.
56
+ instruction (Union[Instruction, str]): The instruction for the chat.
57
+ context (Optional[Any]): Additional context for the chat.
58
+ sender (Optional[str]): The sender of the chat message.
59
+ system (Optional[Union[System, str, Dict[str, Any]]]): System message to be processed.
60
+ tools (Union[bool, Tool, List[Tool], str, List[str]]): Specifies tools to be invoked.
61
+ out (bool): If True, outputs the chat response.
62
+ invoke (bool): If True, invokes tools as part of the chat.
63
+ **kwargs: Arbitrary keyword arguments for chat completion.
64
+
65
+ Examples:
66
+ >>> await ChatFlow.chat(branch, "Ask about user preferences")
67
+ """
68
+ if system:
69
+ branch.change_first_system_message(system)
70
+ branch.add_message(instruction=instruction, context=context, sender=sender)
71
+
72
+ if 'tool_parsed' in kwargs:
73
+ kwargs.pop('tool_parsed')
74
+ tool_kwarg = {'tools': tools}
75
+ kwargs = {**tool_kwarg, **kwargs}
76
+ else:
77
+ if tools and branch.has_tools:
78
+ kwargs = branch.tool_manager._tool_parser(tools=tools, **kwargs)
79
+
80
+ config = {**branch.llmconfig, **kwargs}
81
+ if sender is not None:
82
+ config.update({"sender": sender})
83
+
84
+ await branch.call_chatcompletion(**config)
85
+
86
+ async def _output():
87
+ content_ = as_dict(branch.messages.content.iloc[-1])
88
+ if invoke:
89
+ try:
90
+ tool_uses = content_
91
+ func_calls = lcall(
92
+ [as_dict(i) for i in tool_uses["action_list"]],
93
+ branch.tool_manager.get_function_call
94
+ )
95
+
96
+ outs = await alcall(func_calls, branch.tool_manager.invoke)
97
+ outs = to_list(outs, flatten=True)
98
+
99
+ for out_, f in zip(outs, func_calls):
100
+ branch.add_message(
101
+ response={
102
+ "function": f[0],
103
+ "arguments": f[1],
104
+ "output": out_
105
+ }
106
+ )
107
+ except:
108
+ pass
109
+ if out:
110
+ if (
111
+ len(content_.items()) == 1
112
+ and len(get_flattened_keys(content_)) == 1
113
+ ):
114
+ key = get_flattened_keys(content_)[0]
115
+ return content_[key]
116
+ return content_
117
+
118
+ return await _output()
119
+
120
+ @staticmethod
121
+ async def ReAct(
122
+ branch,
123
+ instruction: Union[Instruction, str],
124
+ context = None,
125
+ sender = None,
126
+ system = None,
127
+ tools = None,
128
+ num_rounds: int = 1,
129
+ **kwargs
130
+ ):
131
+ """
132
+ Performs a reason-action cycle with optional tool invocation over multiple rounds.
133
+
134
+ Args:
135
+ branch: The Branch instance to perform ReAct operations.
136
+ instruction (Union[Instruction, str]): Initial instruction for the cycle.
137
+ context: Context relevant to the instruction.
138
+ sender (Optional[str]): Identifier for the message sender.
139
+ system: Initial system message or configuration.
140
+ tools: Tools to be registered or used during the cycle.
141
+ num_rounds (int): Number of reason-action cycles to perform.
142
+ **kwargs: Additional keyword arguments for customization.
143
+
144
+ Examples:
145
+ >>> await ChatFlow.ReAct(branch, "Analyze user feedback", num_rounds=2)
146
+ """
147
+ if tools is not None:
148
+ if isinstance(tools, list) and isinstance(tools[0], Tool):
149
+ branch.register_tools(tools)
150
+
151
+ if branch.tool_manager.registry == {}:
152
+ raise ValueError("No tools found, You need to register tools for ReAct (reason-action)")
153
+
154
+ else:
155
+ kwargs = branch.tool_manager._tool_parser(tools=True, **kwargs)
156
+
157
+ out = ''
158
+ i = 0
159
+ while i < num_rounds:
160
+ prompt = f"""you have {(num_rounds-i)*2} step left in current task. if available, integrate previous tool responses. perform reasoning and prepare action plan according to available tools only, apply divide and conquer technique.
161
+ """
162
+ instruct = {"Notice": prompt}
163
+
164
+ if i == 0:
165
+ instruct["Task"] = instruction
166
+ out = await branch.chat(
167
+ instruction=instruct, context=context,
168
+ system=system, sender=sender, **kwargs
169
+ )
170
+
171
+ elif i >0:
172
+ out = await branch.chat(
173
+ instruction=instruct, sender=sender, **kwargs
174
+ )
175
+
176
+ prompt = f"""
177
+ you have {(num_rounds-i)*2-1} step left in current task, invoke tool usage to perform actions
178
+ """
179
+ out = await branch.chat(prompt, tool_choice="auto", tool_parsed=True, sender=sender, **kwargs)
180
+
181
+ i += 1
182
+ if not branch._is_invoked():
183
+ return out
184
+
185
+ if branch._is_invoked():
186
+ prompt = """
187
+ present the final result to user
188
+ """
189
+ return await branch.chat(prompt, sender=sender, tool_parsed=True, **kwargs)
190
+ else:
191
+ return out
192
+
193
+ @staticmethod
194
+ async def auto_followup(
195
+ branch,
196
+ instruction: Union[Instruction, str],
197
+ context = None,
198
+ sender = None,
199
+ system = None,
200
+ tools: Union[bool, Tool, List[Tool], str, List[str], List[Dict]] = False,
201
+ max_followup: int = 3,
202
+ out=True,
203
+ **kwargs
204
+ ) -> None:
205
+ """
206
+ Automatically performs follow-up actions based on chat interactions and tool invocations.
207
+
208
+ Args:
209
+ branch: The Branch instance to perform follow-up operations.
210
+ instruction (Union[Instruction, str]): The initial instruction for follow-up.
211
+ context: Context relevant to the instruction.
212
+ sender (Optional[str]): Identifier for the message sender.
213
+ system: Initial system message or configuration.
214
+ tools: Specifies tools to be considered for follow-up actions.
215
+ max_followup (int): Maximum number of follow-up chats allowed.
216
+ out (bool): If True, outputs the result of the follow-up action.
217
+ **kwargs: Additional keyword arguments for follow-up customization.
218
+
219
+ Examples:
220
+ >>> await ChatFlow.auto_followup(branch, "Finalize report", max_followup=2)
221
+ """
222
+ if branch.tool_manager.registry != {} and tools:
223
+ kwargs = branch.tool_manager._tool_parser(tools=tools, **kwargs)
224
+
225
+ n_tries = 0
226
+ while (max_followup - n_tries) > 0:
227
+ prompt = f"""
228
+ In the current task you are allowed a maximum of another {max_followup-n_tries} followup chats.
229
+ if further actions are needed, invoke tools usage. If you are done, present the final result
230
+ to user without further tool usage
231
+ """
232
+ if n_tries > 0:
233
+ _out = await branch.chat(prompt, sender=sender, tool_choice="auto", tool_parsed=True, **kwargs)
234
+ n_tries += 1
235
+
236
+ if not branch._is_invoked():
237
+ return _out if out else None
238
+
239
+ elif n_tries == 0:
240
+ instruct = {"notice": prompt, "task": instruction}
241
+ out = await branch.chat(
242
+ instruct, context=context, system=system, sender=sender, tool_choice="auto",
243
+ tool_parsed=True, **kwargs
244
+ )
245
+ n_tries += 1
246
+
247
+ if not branch._is_invoked():
248
+ return _out if out else None
249
+
250
+ if branch._is_invoked():
251
+ """
252
+ In the current task, you are at your last step, present the final result to user
253
+ """
254
+ return await branch.chat(instruction, sender=sender, tool_parsed=True, **kwargs)
255
+
256
+ # async def followup(
257
+ # self,
258
+ # instruction: Union[Instruction, str],
259
+ # context = None,
260
+ # sender = None,
261
+ # system = None,
262
+ # tools: Union[bool, Tool, List[Tool], str, List[str], List[Dict]] = False,
263
+ # max_followup: int = 3,
264
+ # out=True,
265
+ # **kwargs
266
+ # ) -> None:
267
+
268
+ # """
269
+ # auto tool usages until LLM decides done. Then presents final results.
270
+ # """
271
+
272
+ # if self.tool_manager.registry != {} and tools:
273
+ # kwargs = self.tool_manager._tool_parser(tools=tools, **kwargs)
274
+
275
+ # n_tries = 0
276
+ # while (max_followup - n_tries) > 0:
277
+ # prompt = f"""
278
+ # In the current task you are allowed a maximum of another {max_followup-n_tries} followup chats.
279
+ # if further actions are needed, invoke tools usage. If you are done, present the final result
280
+ # to user without further tool usage.
281
+ # """
282
+ # if n_tries > 0:
283
+ # _out = await self.chat(prompt, sender=sender, tool_choice="auto", tool_parsed=True, **kwargs)
284
+ # n_tries += 1
285
+
286
+ # if not self._is_invoked():
287
+ # return _out if out else None
288
+
289
+ # elif n_tries == 0:
290
+ # instruct = {"notice": prompt, "task": instruction}
291
+ # out = await self.chat(
292
+ # instruct, context=context, system=system, sender=sender, tool_choice="auto",
293
+ # tool_parsed=True, **kwargs
294
+ # )
295
+ # n_tries += 1
296
+
297
+ # if not self._is_invoked():
298
+ # return _out if out else None
299
+
300
+ # async def auto_ReAct(
301
+ # self,
302
+ # instruction: Union[Instruction, str],
303
+ # context = None,
304
+ # sender = None,
305
+ # system = None,
306
+ # tools = None,
307
+ # max_rounds: int = 1,
308
+
309
+ # fallback: Optional[Callable] = None,
310
+ # fallback_kwargs: Optional[Dict] = None,
311
+ # **kwargs
312
+ # ):
313
+ # if tools is not None:
314
+ # if isinstance(tools, list) and isinstance(tools[0], Tool):
315
+ # self.register_tools(tools)
316
+
317
+ # if self.tool_manager.registry == {}:
318
+ # raise ValueError("No tools found, You need to register tools for ReAct (reason-action)")
319
+
320
+ # else:
321
+ # kwargs = self.tool_manager._tool_parser(tools=True, **kwargs)
322
+
323
+ # i = 0
324
+ # while i < max_rounds:
325
+ # prompt = f"""
326
+ # you have {(max_rounds-i)*2} step left in current task. reflect, perform
327
+ # reason for action plan according to available tools only, apply divide and conquer technique, retain from invoking functions
328
+ # """
329
+ # instruct = {"Notice": prompt}
330
+
331
+ # if i == 0:
332
+ # instruct["Task"] = instruction
333
+ # await self.chat(
334
+ # instruction=instruct, context=context,
335
+ # system=system, out=False, sender=sender, **kwargs
336
+ # )
337
+
338
+ # elif i >0:
339
+ # await self.chat(
340
+ # instruction=instruct, out=False, sender=sender, **kwargs
341
+ # )
342
+
343
+ # prompt = f"""
344
+ # you have {(max_rounds-i)*2-1} step left in current task, invoke tool usage to perform the action
345
+ # """
346
+ # await self.chat(prompt, tool_choice="auto", tool_parsed=True, out=False,sender=sender, **kwargs)
347
+
348
+ # i += 1
349
+
350
+ # if self._is_invoked():
351
+ # if fallback is not None:
352
+ # if asyncio.iscoroutinefunction(fallback):
353
+ # return await fallback(**fallback_kwargs)
354
+ # else:
355
+ # return fallback(**fallback_kwargs)
356
+ # prompt = """
357
+ # present the final result to user
358
+ # """
359
+ # return await self.chat(prompt, sender=sender, tool_parsed=True, **kwargs)
360
+
361
+
362
+ # from .sessions import Session
363
+
364
+ # def get_config(temperature, max_tokens, key_scheme, n):
365
+ # f = lambda i:{
366
+ # "temperature": temperature[i],
367
+ # "max_tokens": max_tokens[i],
368
+ # }
369
+ # return {
370
+ # "key": f"{key_scheme}{n+1}",
371
+ # "config": f(n)
372
+ # }
373
+
374
+ # async def run_workflow(
375
+ # session, prompts, temperature, max_tokens,
376
+ # key_scheme, num_prompts, context
377
+ # ):
378
+ # for i in range(num_prompts):
379
+ # key_, config_ = get_config(temperature, max_tokens, key_scheme, i)
380
+ # if i == 0:
381
+ # await session.initiate(instruction=prompts[key_], context=context, **config_)
382
+ # else:
383
+ # await session.followup(instruction=prompts[key_], **config_)
384
+
385
+ # return session
386
+
387
+ # async def run_auto_workflow(
388
+ # session, prompts, temperature, max_tokens,
389
+ # key_scheme, num_prompts, context
390
+ # ):
391
+ # for i in range(num_prompts):
392
+ # key_, config_ = get_config(temperature, max_tokens, key_scheme, i)
393
+ # if i == 0:
394
+ # await session.initiate(instruction=prompts[key_], context=context, **config_)
395
+ # else:
396
+ # await session.auto_followup(instruction=prompts[key_], **config_)
397
+
398
+ # return session
399
+
400
+ # async def run_session(
401
+ # prompts, dir, llmconfig, key_scheme, num_prompts,
402
+ # temperature, max_tokens, type_=None, tools=None
403
+ # ):
404
+ # prompts_ = prompts.copy()
405
+ # session = Session(
406
+ # system=prompts_.pop('system', 'You are a helpful assistant'),
407
+ # dir = dir,
408
+ # llmconfig = llmconfig
409
+ # )
410
+ # if tools:
411
+ # session.register_tools(tools)
412
+ # if type_ is None:
413
+ # session = await run_workflow(
414
+ # session, prompts_, temperature, max_tokens,
415
+ # key_scheme=key_scheme, num_prompts=num_prompts
416
+ # )
417
+ # elif type_ == 'auto':
418
+ # session = await run_auto_workflow(
419
+ # session, prompts_, temperature, max_tokens,
420
+ # key_scheme=key_scheme, num_prompts=num_prompts
421
+ # )
422
+
423
+ # return session
@@ -1,7 +1,7 @@
1
1
  from typing import List, Union
2
- from ...schema import Tool
3
- from ...structures import Relationship, Structure
4
- from ..messages.messages import Instruction
2
+ from lionagi.schema import Tool
3
+ from lionagi.structures import Relationship, Structure
4
+ from lionagi.core.messages import Instruction
5
5
 
6
6
 
7
7
  class InstructionSet(Structure):