lionagi 0.0.306__py3-none-any.whl → 0.0.308__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- lionagi/__init__.py +2 -5
- lionagi/core/__init__.py +7 -5
- lionagi/core/agent/__init__.py +3 -0
- lionagi/core/agent/base_agent.py +10 -12
- lionagi/core/branch/__init__.py +4 -0
- lionagi/core/branch/base_branch.py +81 -81
- lionagi/core/branch/branch.py +16 -28
- lionagi/core/branch/branch_flow_mixin.py +3 -7
- lionagi/core/branch/executable_branch.py +86 -56
- lionagi/core/branch/util.py +77 -162
- lionagi/core/{flow/direct → direct}/__init__.py +1 -1
- lionagi/core/{flow/direct/predict.py → direct/parallel_predict.py} +39 -17
- lionagi/core/direct/parallel_react.py +0 -0
- lionagi/core/direct/parallel_score.py +0 -0
- lionagi/core/direct/parallel_select.py +0 -0
- lionagi/core/direct/parallel_sentiment.py +0 -0
- lionagi/core/direct/predict.py +174 -0
- lionagi/core/{flow/direct → direct}/react.py +2 -2
- lionagi/core/{flow/direct → direct}/score.py +28 -23
- lionagi/core/{flow/direct → direct}/select.py +48 -45
- lionagi/core/direct/utils.py +83 -0
- lionagi/core/flow/monoflow/ReAct.py +6 -5
- lionagi/core/flow/monoflow/__init__.py +9 -0
- lionagi/core/flow/monoflow/chat.py +10 -10
- lionagi/core/flow/monoflow/chat_mixin.py +11 -10
- lionagi/core/flow/monoflow/followup.py +6 -5
- lionagi/core/flow/polyflow/__init__.py +1 -0
- lionagi/core/flow/polyflow/chat.py +15 -3
- lionagi/core/mail/mail_manager.py +18 -19
- lionagi/core/mail/schema.py +5 -4
- lionagi/core/messages/schema.py +18 -20
- lionagi/core/prompt/__init__.py +0 -0
- lionagi/core/prompt/prompt_template.py +0 -0
- lionagi/core/schema/__init__.py +2 -2
- lionagi/core/schema/action_node.py +11 -3
- lionagi/core/schema/base_mixin.py +56 -59
- lionagi/core/schema/base_node.py +34 -37
- lionagi/core/schema/condition.py +24 -0
- lionagi/core/schema/data_logger.py +96 -99
- lionagi/core/schema/data_node.py +19 -19
- lionagi/core/schema/prompt_template.py +0 -0
- lionagi/core/schema/structure.py +171 -169
- lionagi/core/session/__init__.py +1 -3
- lionagi/core/session/session.py +196 -214
- lionagi/core/tool/tool_manager.py +95 -103
- lionagi/integrations/__init__.py +1 -3
- lionagi/integrations/bridge/langchain_/documents.py +17 -18
- lionagi/integrations/bridge/langchain_/langchain_bridge.py +14 -14
- lionagi/integrations/bridge/llamaindex_/llama_index_bridge.py +22 -22
- lionagi/integrations/bridge/llamaindex_/node_parser.py +12 -12
- lionagi/integrations/bridge/llamaindex_/reader.py +11 -11
- lionagi/integrations/bridge/llamaindex_/textnode.py +7 -7
- lionagi/integrations/config/openrouter_configs.py +0 -1
- lionagi/integrations/provider/oai.py +26 -26
- lionagi/integrations/provider/services.py +38 -38
- lionagi/libs/__init__.py +34 -1
- lionagi/libs/ln_api.py +211 -221
- lionagi/libs/ln_async.py +53 -60
- lionagi/libs/ln_convert.py +118 -120
- lionagi/libs/ln_dataframe.py +32 -33
- lionagi/libs/ln_func_call.py +334 -342
- lionagi/libs/ln_nested.py +99 -107
- lionagi/libs/ln_parse.py +161 -165
- lionagi/libs/sys_util.py +52 -52
- lionagi/tests/test_core/test_session.py +254 -266
- lionagi/tests/test_core/test_session_base_util.py +299 -300
- lionagi/tests/test_core/test_tool_manager.py +70 -74
- lionagi/tests/test_libs/test_nested.py +2 -7
- lionagi/tests/test_libs/test_parse.py +2 -2
- lionagi/version.py +1 -1
- {lionagi-0.0.306.dist-info → lionagi-0.0.308.dist-info}/METADATA +4 -2
- lionagi-0.0.308.dist-info/RECORD +115 -0
- lionagi/core/flow/direct/utils.py +0 -43
- lionagi-0.0.306.dist-info/RECORD +0 -106
- /lionagi/core/{flow/direct → direct}/sentiment.py +0 -0
- {lionagi-0.0.306.dist-info → lionagi-0.0.308.dist-info}/LICENSE +0 -0
- {lionagi-0.0.306.dist-info → lionagi-0.0.308.dist-info}/WHEEL +0 -0
- {lionagi-0.0.306.dist-info → lionagi-0.0.308.dist-info}/top_level.txt +0 -0
@@ -10,17 +10,17 @@ def to_llama_index_node(lion_node, node_type: Any = None, **kwargs: Any) -> Any:
|
|
10
10
|
the expected Llama Index node schema, and then creates a Llama Index node object of the specified type.
|
11
11
|
|
12
12
|
Args:
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
13
|
+
lion_node: The Lion node to convert. Must have a `to_dict` method.
|
14
|
+
node_type (Any, optional): The type of Llama Index node to create. Can be a string name of a node class
|
15
|
+
within the Llama Index schema or a class that inherits from `BaseNode`. Defaults to 'TextNode'.
|
16
|
+
**kwargs: Additional keyword arguments to be included in the Llama Index node's initialization.
|
17
17
|
|
18
18
|
Returns:
|
19
|
-
|
19
|
+
Any: A new instance of the specified Llama Index node type populated with data from the Lion node.
|
20
20
|
|
21
21
|
Raises:
|
22
|
-
|
23
|
-
|
22
|
+
TypeError: If `node_type` is neither a string nor a subclass of `BaseNode`.
|
23
|
+
AttributeError: If an error occurs due to an invalid node type or during the creation of the node object.
|
24
24
|
"""
|
25
25
|
|
26
26
|
SysUtil.check_import("llama_index", pip_name="llama-index")
|
@@ -8,19 +8,19 @@ class OpenAIService(BaseService):
|
|
8
8
|
A service to interact with OpenAI's API endpoints.
|
9
9
|
|
10
10
|
Attributes:
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
11
|
+
base_url (str): The base URL for the OpenAI API.
|
12
|
+
available_endpoints (list): A list of available API endpoints.
|
13
|
+
schema (dict): The schema configuration for the API.
|
14
|
+
key_scheme (str): The environment variable name for OpenAI API key.
|
15
|
+
token_encoding_name (str): The default token encoding scheme.
|
16
16
|
|
17
17
|
Examples:
|
18
|
-
|
19
|
-
|
20
|
-
|
18
|
+
>>> service = OpenAIService(api_key="your_api_key")
|
19
|
+
>>> asyncio.run(service.serve("Hello, world!","chat/completions"))
|
20
|
+
(payload, completion)
|
21
21
|
|
22
|
-
|
23
|
-
|
22
|
+
>>> service = OpenAIService()
|
23
|
+
>>> asyncio.run(service.serve("Convert this text to speech.","audio_speech"))
|
24
24
|
"""
|
25
25
|
|
26
26
|
base_url = "https://api.openai.com/v1/"
|
@@ -57,25 +57,25 @@ class OpenAIService(BaseService):
|
|
57
57
|
Serves the input using the specified endpoint and method.
|
58
58
|
|
59
59
|
Args:
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
60
|
+
input_: The input text to be processed.
|
61
|
+
endpoint: The API endpoint to use for processing.
|
62
|
+
method: The HTTP method to use for the request.
|
63
|
+
**kwargs: Additional keyword arguments to pass to the payload creation.
|
64
64
|
|
65
65
|
Returns:
|
66
|
-
|
66
|
+
A tuple containing the payload and the completion assistant_response from the API.
|
67
67
|
|
68
68
|
Raises:
|
69
|
-
|
69
|
+
ValueError: If the specified endpoint is not supported.
|
70
70
|
|
71
71
|
Examples:
|
72
|
-
|
73
|
-
|
74
|
-
|
72
|
+
>>> service = OpenAIService(api_key="your_api_key")
|
73
|
+
>>> asyncio.run(service.serve("Hello, world!","chat/completions"))
|
74
|
+
(payload, completion)
|
75
75
|
|
76
|
-
|
77
|
-
|
78
|
-
|
76
|
+
>>> service = OpenAIService()
|
77
|
+
>>> asyncio.run(service.serve("Convert this text to speech.","audio_speech"))
|
78
|
+
ValueError: 'audio_speech' is currently not supported
|
79
79
|
"""
|
80
80
|
if endpoint not in self.active_endpoint:
|
81
81
|
await self.init_endpoint(endpoint)
|
@@ -89,14 +89,14 @@ class OpenAIService(BaseService):
|
|
89
89
|
Serves the chat completion request with the given messages.
|
90
90
|
|
91
91
|
Args:
|
92
|
-
|
93
|
-
|
92
|
+
messages: The messages to be included in the chat completion.
|
93
|
+
**kwargs: Additional keyword arguments for payload creation.
|
94
94
|
|
95
95
|
Returns:
|
96
|
-
|
96
|
+
A tuple containing the payload and the completion assistant_response from the API.
|
97
97
|
|
98
98
|
Raises:
|
99
|
-
|
99
|
+
Exception: If the API call fails.
|
100
100
|
"""
|
101
101
|
if "chat/completions" not in self.active_endpoint:
|
102
102
|
await self.init_endpoint("chat/completions")
|
@@ -6,15 +6,15 @@ class Services:
|
|
6
6
|
A provider to interact with OpenAI's API endpoints.
|
7
7
|
|
8
8
|
Attributes:
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
9
|
+
api_key (Optional[str]): The API key used for authentication.
|
10
|
+
schema (Dict[str, Any]): The schema defining the provider's endpoints.
|
11
|
+
status_tracker (StatusTracker): The object tracking the status of API calls.
|
12
|
+
endpoints (Dict[str, EndPoint]): A dictionary of endpoint objects.
|
13
|
+
base_url (str): The base URL for the OpenAI API.
|
14
|
+
available_endpoints (list): A list of available API endpoints, including
|
15
|
+
'chat/completions'
|
16
|
+
key_scheme (str): The environment variable name for API key.
|
17
|
+
token_encoding_name (str): The default token encoding scheme.
|
18
18
|
"""
|
19
19
|
|
20
20
|
from lionagi.integrations.provider.oai import OpenAIService
|
@@ -27,15 +27,15 @@ class Services:
|
|
27
27
|
A provider to interact with OpenRouter's API endpoints.
|
28
28
|
|
29
29
|
Attributes:
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
30
|
+
api_key (Optional[str]): The API key used for authentication.
|
31
|
+
schema (Dict[str, Any]): The schema defining the provider's endpoints.
|
32
|
+
status_tracker (StatusTracker): The object tracking the status of API calls.
|
33
|
+
endpoints (Dict[str, EndPoint]): A dictionary of endpoint objects.
|
34
|
+
base_url (str): The base URL for the OpenAI API.
|
35
|
+
available_endpoints (list): A list of available API endpoints, including
|
36
|
+
'chat/completions'
|
37
|
+
key_scheme (str): The environment variable name for API key.
|
38
|
+
token_encoding_name (str): The default token encoding scheme.
|
39
39
|
"""
|
40
40
|
|
41
41
|
from lionagi.integrations.provider.openrouter import OpenRouterService
|
@@ -48,24 +48,24 @@ class Services:
|
|
48
48
|
A provider to interact with Transformers' pipeline
|
49
49
|
|
50
50
|
Attributes:
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
51
|
+
task (str): The specific task to be performed by the transformer model.
|
52
|
+
Currently, only 'conversational' tasks are supported.
|
53
|
+
model (Union[str, Any]): Identifier for the transformer model to be used. This
|
54
|
+
can be a model name or a path to a model.
|
55
|
+
config (Union[str, Dict, Any]): Configuration for the transformer model. Can
|
56
|
+
include tokenizer information among others.
|
57
|
+
pipe (pipeline): The loaded transformer pipeline for the specified task, model,
|
58
|
+
and configuration.
|
59
59
|
|
60
60
|
Warnings:
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
61
|
+
- Ensure the selected model is suitable for conversational tasks to avoid
|
62
|
+
unexpected behavior.
|
63
|
+
- As this provider heavily relies on external libraries (Hugging Face's
|
64
|
+
Transformers), ensure they are installed and updated to compatible versions.
|
65
65
|
|
66
66
|
Dependencies:
|
67
|
-
|
68
|
-
|
67
|
+
- Requires the `transformers` library by Hugging Face and `asyncio` for
|
68
|
+
asynchronous operations.
|
69
69
|
"""
|
70
70
|
|
71
71
|
from lionagi.integrations.provider.transformers import TransformersService
|
@@ -99,8 +99,8 @@ class Services:
|
|
99
99
|
A provider to interact with Ollama
|
100
100
|
|
101
101
|
Attributes:
|
102
|
-
|
103
|
-
|
102
|
+
model (str): name of the model to use
|
103
|
+
kwargs (Optional[Any]): additional kwargs for calling the model
|
104
104
|
"""
|
105
105
|
|
106
106
|
from lionagi.integrations.provider.ollama import OllamaService
|
@@ -113,8 +113,8 @@ class Services:
|
|
113
113
|
A provider to interact with Litellm
|
114
114
|
|
115
115
|
Attributes:
|
116
|
-
|
117
|
-
|
116
|
+
model (str): name of the model to use
|
117
|
+
kwargs (Optional[Any]): additional kwargs for calling the model
|
118
118
|
"""
|
119
119
|
|
120
120
|
from .litellm import LiteLLMService
|
@@ -127,8 +127,8 @@ class Services:
|
|
127
127
|
A provider to interact with MlX
|
128
128
|
|
129
129
|
Attributes:
|
130
|
-
|
131
|
-
|
130
|
+
model (str): name of the model to use
|
131
|
+
kwargs (Optional[Any]): additional kwargs for calling the model
|
132
132
|
"""
|
133
133
|
|
134
134
|
from lionagi.integrations.provider.mlx_service import MlXService
|
lionagi/libs/__init__.py
CHANGED
@@ -1 +1,34 @@
|
|
1
|
-
from . import
|
1
|
+
from lionagi.libs.sys_util import SysUtil
|
2
|
+
from lionagi.libs.ln_async import AsyncUtil
|
3
|
+
|
4
|
+
import lionagi.libs.ln_convert as convert
|
5
|
+
import lionagi.libs.ln_dataframe as dataframe
|
6
|
+
import lionagi.libs.ln_func_call as func_call
|
7
|
+
from lionagi.libs.ln_func_call import CallDecorator
|
8
|
+
import lionagi.libs.ln_nested as nested
|
9
|
+
from lionagi.libs.ln_parse import ParseUtil, StringMatch
|
10
|
+
|
11
|
+
from lionagi.libs.ln_api import (
|
12
|
+
APIUtil,
|
13
|
+
SimpleRateLimiter,
|
14
|
+
StatusTracker,
|
15
|
+
BaseService,
|
16
|
+
PayloadPackage,
|
17
|
+
)
|
18
|
+
|
19
|
+
__all__ = [
|
20
|
+
"SysUtil",
|
21
|
+
"convert",
|
22
|
+
"func_call",
|
23
|
+
"dataframe",
|
24
|
+
"nested",
|
25
|
+
"AsyncUtil",
|
26
|
+
"ParseUtil",
|
27
|
+
"StringMatch",
|
28
|
+
"APIUtil",
|
29
|
+
"BaseService",
|
30
|
+
"PayloadPackage",
|
31
|
+
"StatusTracker",
|
32
|
+
"SimpleRateLimiter",
|
33
|
+
"CallDecorator",
|
34
|
+
]
|