lionagi 0.12.2__py3-none-any.whl → 0.12.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. lionagi/config.py +123 -0
  2. lionagi/fields/file.py +1 -1
  3. lionagi/fields/reason.py +1 -1
  4. lionagi/libs/file/concat.py +1 -6
  5. lionagi/libs/file/concat_files.py +1 -5
  6. lionagi/libs/file/save.py +1 -1
  7. lionagi/libs/package/imports.py +8 -177
  8. lionagi/libs/parse.py +30 -0
  9. lionagi/libs/schema/load_pydantic_model_from_schema.py +259 -0
  10. lionagi/libs/token_transform/perplexity.py +2 -4
  11. lionagi/libs/token_transform/synthlang_/resources/frameworks/framework_options.json +46 -46
  12. lionagi/libs/token_transform/synthlang_/translate_to_synthlang.py +1 -1
  13. lionagi/operations/chat/chat.py +2 -2
  14. lionagi/operations/communicate/communicate.py +20 -5
  15. lionagi/operations/parse/parse.py +131 -43
  16. lionagi/protocols/generic/log.py +1 -2
  17. lionagi/protocols/generic/pile.py +18 -4
  18. lionagi/protocols/messages/assistant_response.py +20 -1
  19. lionagi/protocols/messages/templates/README.md +6 -10
  20. lionagi/service/connections/__init__.py +15 -0
  21. lionagi/service/connections/api_calling.py +230 -0
  22. lionagi/service/connections/endpoint.py +410 -0
  23. lionagi/service/connections/endpoint_config.py +137 -0
  24. lionagi/service/connections/header_factory.py +56 -0
  25. lionagi/service/connections/match_endpoint.py +49 -0
  26. lionagi/service/connections/providers/__init__.py +3 -0
  27. lionagi/service/connections/providers/anthropic_.py +87 -0
  28. lionagi/service/connections/providers/exa_.py +33 -0
  29. lionagi/service/connections/providers/oai_.py +166 -0
  30. lionagi/service/connections/providers/ollama_.py +122 -0
  31. lionagi/service/connections/providers/perplexity_.py +29 -0
  32. lionagi/service/imodel.py +36 -144
  33. lionagi/service/manager.py +1 -7
  34. lionagi/service/{endpoints/rate_limited_processor.py → rate_limited_processor.py} +4 -2
  35. lionagi/service/resilience.py +545 -0
  36. lionagi/service/third_party/README.md +71 -0
  37. lionagi/service/third_party/__init__.py +0 -0
  38. lionagi/service/third_party/anthropic_models.py +159 -0
  39. lionagi/service/third_party/exa_models.py +165 -0
  40. lionagi/service/third_party/openai_models.py +18241 -0
  41. lionagi/service/third_party/pplx_models.py +156 -0
  42. lionagi/service/types.py +5 -4
  43. lionagi/session/branch.py +12 -7
  44. lionagi/tools/file/reader.py +1 -1
  45. lionagi/tools/memory/tools.py +497 -0
  46. lionagi/utils.py +921 -123
  47. lionagi/version.py +1 -1
  48. {lionagi-0.12.2.dist-info → lionagi-0.12.4.dist-info}/METADATA +33 -16
  49. {lionagi-0.12.2.dist-info → lionagi-0.12.4.dist-info}/RECORD +53 -63
  50. lionagi/libs/file/create_path.py +0 -80
  51. lionagi/libs/file/file_util.py +0 -358
  52. lionagi/libs/parse/__init__.py +0 -3
  53. lionagi/libs/parse/fuzzy_parse_json.py +0 -117
  54. lionagi/libs/parse/to_dict.py +0 -336
  55. lionagi/libs/parse/to_json.py +0 -61
  56. lionagi/libs/parse/to_num.py +0 -378
  57. lionagi/libs/parse/to_xml.py +0 -57
  58. lionagi/libs/parse/xml_parser.py +0 -148
  59. lionagi/libs/schema/breakdown_pydantic_annotation.py +0 -48
  60. lionagi/service/endpoints/__init__.py +0 -3
  61. lionagi/service/endpoints/base.py +0 -706
  62. lionagi/service/endpoints/chat_completion.py +0 -116
  63. lionagi/service/endpoints/match_endpoint.py +0 -72
  64. lionagi/service/providers/__init__.py +0 -3
  65. lionagi/service/providers/anthropic_/__init__.py +0 -3
  66. lionagi/service/providers/anthropic_/messages.py +0 -99
  67. lionagi/service/providers/exa_/models.py +0 -3
  68. lionagi/service/providers/exa_/search.py +0 -80
  69. lionagi/service/providers/exa_/types.py +0 -7
  70. lionagi/service/providers/groq_/__init__.py +0 -3
  71. lionagi/service/providers/groq_/chat_completions.py +0 -56
  72. lionagi/service/providers/ollama_/__init__.py +0 -3
  73. lionagi/service/providers/ollama_/chat_completions.py +0 -134
  74. lionagi/service/providers/openai_/__init__.py +0 -3
  75. lionagi/service/providers/openai_/chat_completions.py +0 -101
  76. lionagi/service/providers/openai_/spec.py +0 -14
  77. lionagi/service/providers/openrouter_/__init__.py +0 -3
  78. lionagi/service/providers/openrouter_/chat_completions.py +0 -62
  79. lionagi/service/providers/perplexity_/__init__.py +0 -3
  80. lionagi/service/providers/perplexity_/chat_completions.py +0 -44
  81. lionagi/service/providers/perplexity_/models.py +0 -5
  82. lionagi/service/providers/types.py +0 -17
  83. /lionagi/{service/providers/exa_/__init__.py → py.typed} +0 -0
  84. /lionagi/service/{endpoints/token_calculator.py → token_calculator.py} +0 -0
  85. {lionagi-0.12.2.dist-info → lionagi-0.12.4.dist-info}/WHEEL +0 -0
  86. {lionagi-0.12.2.dist-info → lionagi-0.12.4.dist-info}/licenses/LICENSE +0 -0
@@ -1,116 +0,0 @@
1
- # Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
- import warnings
5
- from collections.abc import AsyncGenerator
6
-
7
- from .base import EndPoint
8
-
9
- warnings.filterwarnings(
10
- "ignore",
11
- message=".*Valid config keys have changed in V2.*",
12
- category=UserWarning,
13
- module="pydantic._internal._config",
14
- )
15
-
16
-
17
- CHAT_COMPLETION_CONFIG = {
18
- "endpoint": "chat/completions",
19
- "method": "post",
20
- "requires_tokens": True,
21
- "openai_compatible": True,
22
- "is_invokeable": True,
23
- "is_streamable": True,
24
- }
25
-
26
-
27
- class ChatCompletionEndPoint(EndPoint):
28
-
29
- def __init__(self, config: dict = CHAT_COMPLETION_CONFIG):
30
- super().__init__(config)
31
-
32
- async def _invoke(
33
- self,
34
- payload: dict,
35
- headers: dict,
36
- **kwargs,
37
- ):
38
- from lionagi.libs.package.imports import check_import
39
-
40
- check_import("litellm")
41
- import litellm # type: ignore
42
-
43
- litellm.drop_params = True
44
- from litellm import acompletion # type: ignore
45
-
46
- provider = self.config.provider
47
-
48
- if not provider in payload["model"]:
49
- payload["model"] = f"{provider}/{payload['model']}"
50
-
51
- api_key = None
52
-
53
- if "Authorization" in headers:
54
- api_key = headers.pop("Authorization").replace("Bearer ", "")
55
-
56
- if "x-api-key" in headers:
57
- api_key = headers.pop("x-api-key")
58
-
59
- params = {
60
- "api_key": api_key,
61
- "base_url": self.config.base_url,
62
- **payload,
63
- **kwargs,
64
- }
65
- if headers:
66
- params["extra_headers"] = headers
67
- if not self.openai_compatible:
68
- params.pop("base_url")
69
-
70
- return await acompletion(**params)
71
-
72
- async def _stream(
73
- self,
74
- payload: dict,
75
- headers: dict,
76
- **kwargs,
77
- ) -> AsyncGenerator:
78
- from lionagi.libs.package.imports import check_import
79
-
80
- check_import("litellm")
81
- import litellm # type: ignore
82
-
83
- litellm.drop_params = True
84
- from litellm import acompletion # type: ignore
85
-
86
- provider = self.config.provider
87
-
88
- if not provider in payload["model"]:
89
- payload["model"] = f"{provider}/{payload['model']}"
90
-
91
- api_key = None
92
-
93
- if "Authorization" in headers:
94
- api_key = headers.pop("Authorization").replace("Bearer ", "")
95
-
96
- if "x-api-key" in headers:
97
- api_key = headers.pop("x-api-key")
98
-
99
- params = {
100
- "api_key": api_key,
101
- "base_url": self.config.base_url,
102
- **payload,
103
- **kwargs,
104
- }
105
- if headers:
106
- params["extra_headers"] = headers
107
- if not self.openai_compatible:
108
- params.pop("base_url")
109
-
110
- params["stream"] = True
111
- async for i in await acompletion(**params):
112
- yield i
113
-
114
- @property
115
- def allowed_roles(self):
116
- return ["system", "user", "assistant"]
@@ -1,72 +0,0 @@
1
- # Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
-
5
- from .chat_completion import EndPoint
6
-
7
-
8
- def match_endpoint(
9
- provider: str,
10
- base_url: str,
11
- endpoint: str,
12
- endpoint_params: list[str] | None = None,
13
- ) -> EndPoint:
14
-
15
- if endpoint in ["chat/completions", "chat", "messages"]:
16
- from ..providers.openai_.chat_completions import (
17
- OpenAIChatCompletionEndPoint,
18
- )
19
-
20
- if provider == "openai":
21
- return OpenAIChatCompletionEndPoint()
22
-
23
- if provider == "anthropic":
24
- from ..providers.anthropic_.messages import (
25
- AnthropicChatCompletionEndPoint,
26
- )
27
-
28
- return AnthropicChatCompletionEndPoint()
29
-
30
- if provider == "groq":
31
- from ..providers.groq_.chat_completions import (
32
- GroqChatCompletionEndPoint,
33
- )
34
-
35
- return GroqChatCompletionEndPoint()
36
-
37
- if provider == "perplexity":
38
- from ..providers.perplexity_.chat_completions import (
39
- PerplexityChatCompletionEndPoint,
40
- )
41
-
42
- return PerplexityChatCompletionEndPoint()
43
-
44
- if provider == "openrouter":
45
- from ..providers.openrouter_.chat_completions import (
46
- OpenRouterChatCompletionEndPoint,
47
- )
48
-
49
- return OpenRouterChatCompletionEndPoint()
50
-
51
- if provider == "ollama":
52
- from ..providers.ollama_.chat_completions import (
53
- OllamaChatCompletionEndPoint,
54
- )
55
-
56
- return OllamaChatCompletionEndPoint()
57
-
58
- return OpenAIChatCompletionEndPoint(
59
- config={
60
- "provider": provider,
61
- "base_url": base_url,
62
- "endpoint": endpoint,
63
- "endpoint_params": endpoint_params,
64
- }
65
- )
66
-
67
- if provider == "exa" and endpoint == "search":
68
- from ..providers.exa_.search import ExaSearchEndPoint
69
-
70
- return ExaSearchEndPoint()
71
-
72
- raise ValueError(f"Unsupported endpoint: {endpoint}")
@@ -1,3 +0,0 @@
1
- # Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
@@ -1,3 +0,0 @@
1
- # Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
@@ -1,99 +0,0 @@
1
- # Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
-
5
- from lionagi.service.endpoints.chat_completion import ChatCompletionEndPoint
6
-
7
- CHAT_COMPLETION_CONFIG = {
8
- "provider": "anthropic",
9
- "endpoint": "messages",
10
- "method": "post",
11
- "requires_tokens": True,
12
- "openai_compatible": False,
13
- "is_invokeable": True,
14
- "is_streamable": True,
15
- "base_url": "https://api.anthropic.com/v1",
16
- "api_version": "2023-06-01",
17
- "required_kwargs": {
18
- "messages",
19
- "model",
20
- "max_tokens",
21
- },
22
- "optional_kwargs": {
23
- "metadata",
24
- "stop_sequences",
25
- "stream",
26
- "system",
27
- "temperature",
28
- "tool_choice",
29
- "tools",
30
- "top_p",
31
- "top_k",
32
- "cache_control",
33
- },
34
- "allowed_roles": ["user", "assistant"],
35
- }
36
-
37
-
38
- class AnthropicChatCompletionEndPoint(ChatCompletionEndPoint):
39
- """
40
- Documentation: https://docs.anthropic.com/en/api/
41
- """
42
-
43
- def __init__(self, config: dict = CHAT_COMPLETION_CONFIG):
44
- super().__init__(config)
45
-
46
- def create_payload(self, **kwargs) -> dict:
47
- payload = {}
48
- cached = kwargs.get("cached", False)
49
- headers = kwargs.get("headers", {})
50
- for k, v in kwargs.items():
51
- if k in self.acceptable_kwargs:
52
- payload[k] = v
53
-
54
- for i in self.required_kwargs:
55
- if i not in payload:
56
- raise ValueError(f"Missing required argument: {i}")
57
-
58
- if "cache_control" in payload:
59
- cache_control = payload.pop("cache_control")
60
- if cache_control:
61
- cache_control = {"type": "ephemeral"}
62
- last_message = payload["messages"][-1]["content"]
63
- if isinstance(last_message, str):
64
- last_message = {
65
- "type": "text",
66
- "text": last_message,
67
- "cache_control": cache_control,
68
- }
69
- elif isinstance(last_message, list) and isinstance(
70
- last_message[-1], dict
71
- ):
72
- last_message[-1]["cache_control"] = cache_control
73
- payload["messages"][-1]["content"] = (
74
- [last_message]
75
- if not isinstance(last_message, list)
76
- else last_message
77
- )
78
-
79
- first_message = payload["messages"][0]
80
- system = None
81
- if first_message.get("role") == "system":
82
- system = first_message["content"]
83
- system = [{"type": "text", "text": system}]
84
- payload["messages"] = payload["messages"][1:]
85
- payload["system"] = system
86
-
87
- if "api_key" in kwargs:
88
- headers["x-api-key"] = kwargs["api_key"]
89
- headers["anthropic-version"] = kwargs.pop(
90
- "api_version", self.config.api_version
91
- )
92
- if "content-type" not in kwargs:
93
- headers["content-type"] = "application/json"
94
-
95
- return {
96
- "payload": payload,
97
- "headers": headers,
98
- "cached": cached,
99
- }
@@ -1,3 +0,0 @@
1
- from khive.providers.exa_ import ExaSearchRequest
2
-
3
- __all__ = ("ExaSearchRequest",)
@@ -1,80 +0,0 @@
1
- from typing import Literal
2
-
3
- from lionagi.service.endpoints.base import EndPoint
4
-
5
- from .models import ExaSearchRequest
6
-
7
- CATEGORY_OPTIONS = Literal[
8
- "article",
9
- "book",
10
- "company",
11
- "research paper",
12
- "news",
13
- "pdf",
14
- "github",
15
- "tweet",
16
- "personal site",
17
- "linkedin profile",
18
- "financial report",
19
- ]
20
-
21
- SEARCH_CONFIG = {
22
- "name": "search_exa",
23
- "provider": "exa",
24
- "base_url": "https://api.exa.ai",
25
- "endpoint": "search",
26
- "method": "post",
27
- "openai_compatible": False,
28
- "is_invokeable": False,
29
- "requires_tokens": False,
30
- "is_streamable": False,
31
- "required_kwargs": {
32
- "query",
33
- },
34
- "optional_kwargs": {
35
- "category",
36
- "contents",
37
- "endCrawlDate",
38
- "endPublishedDate",
39
- "excludeDomains",
40
- "excludeText",
41
- "includeDomains",
42
- "includeText",
43
- "numResults",
44
- "startCrawlDate",
45
- "startPublishedDate",
46
- "type", # keyword, neural, auto
47
- "useAutoPrompt",
48
- },
49
- "request_options": ExaSearchRequest,
50
- }
51
-
52
-
53
- class ExaSearchEndPoint(EndPoint):
54
-
55
- def __init__(self, config: dict = SEARCH_CONFIG):
56
- super().__init__(config)
57
-
58
- def create_payload(
59
- self, request_obj: "ExaSearchRequest" = None, **kwargs
60
- ) -> dict:
61
- if request_obj is not None:
62
- kwargs.update(request_obj.to_dict(exclude_none=True))
63
-
64
- payload = {}
65
- is_cached = kwargs.get("is_cached", False)
66
- headers = kwargs.get("headers", {})
67
-
68
- for k, v in kwargs.items():
69
- if k in self.acceptable_kwargs:
70
- payload[k] = v
71
- if "api_key" in kwargs:
72
- headers["x-api-key"] = kwargs["api_key"]
73
- if "content-type" not in kwargs:
74
- headers["content-type"] = "application/json"
75
-
76
- return {
77
- "payload": payload,
78
- "headers": headers,
79
- "is_cached": is_cached,
80
- }
@@ -1,7 +0,0 @@
1
- from .models import ExaSearchRequest
2
- from .search import ExaSearchEndPoint
3
-
4
- __all__ = (
5
- "ExaSearchRequest",
6
- "ExaSearchEndPoint",
7
- )
@@ -1,3 +0,0 @@
1
- # Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
@@ -1,56 +0,0 @@
1
- # Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
-
5
- from lionagi.service.endpoints.chat_completion import ChatCompletionEndPoint
6
-
7
- CHAT_COMPLETION_CONFIG = {
8
- "provider": "groq",
9
- "base_url": "https://api.groq.com/openai/v1",
10
- "endpoint": "chat/completions",
11
- "method": "post",
12
- "openai_compatible": True,
13
- "is_invokeable": True,
14
- "requires_tokens": True,
15
- "is_streamable": True,
16
- "required_kwargs": {
17
- "messages",
18
- "model",
19
- },
20
- "deprecated_kwargs": {
21
- "max_tokens",
22
- "function_call",
23
- "functions",
24
- },
25
- "optional_kwargs": {
26
- "store",
27
- "reasoning_effort",
28
- "metadata",
29
- "frequency_penalty",
30
- "max_completion_tokens",
31
- "modalities",
32
- "prediction",
33
- "audio",
34
- "presence_penalty",
35
- "seed",
36
- "service_tier",
37
- "stop",
38
- "stream",
39
- "stream_options",
40
- "temperature",
41
- "top_p",
42
- "tools",
43
- "tool_choice",
44
- "parallel_tool_calls",
45
- "user",
46
- },
47
- }
48
-
49
-
50
- class GroqChatCompletionEndPoint(ChatCompletionEndPoint):
51
- """
52
- Documentation: https://console.groq.com/docs/overview
53
- """
54
-
55
- def __init__(self, config: dict = CHAT_COMPLETION_CONFIG):
56
- super().__init__(config)
@@ -1,3 +0,0 @@
1
- # Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
@@ -1,134 +0,0 @@
1
- # Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
-
5
- from collections.abc import AsyncGenerator
6
-
7
- from lionagi.service.endpoints.chat_completion import ChatCompletionEndPoint
8
-
9
- CHAT_COMPLETION_CONFIG = {
10
- "provider": "ollama",
11
- "base_url": "http://localhost:11434/v1",
12
- "endpoint": "chat",
13
- "method": "post",
14
- "openai_compatible": True,
15
- "is_invokeable": True,
16
- "requires_tokens": True,
17
- "is_streamable": True,
18
- "required_kwargs": {
19
- "messages",
20
- "model",
21
- },
22
- "optional_kwargs": {
23
- "frequency_penalty",
24
- "presence_penalty",
25
- "response_format",
26
- "seed",
27
- "stop",
28
- "stream",
29
- "stream_options",
30
- "temperature",
31
- "top_p",
32
- "max_tokens",
33
- "tools",
34
- # "tool_choice",
35
- # "logit_bias",
36
- # "user",
37
- # "n",
38
- # "logprobs",
39
- },
40
- "allowed_roles": ["user", "assistant", "system"],
41
- "invoke_with_endpoint": True,
42
- }
43
-
44
-
45
- class OllamaChatCompletionEndPoint(ChatCompletionEndPoint):
46
- """
47
- Documentation: https://platform.openai.com/docs/api-reference/chat/create
48
- """
49
-
50
- def __init__(self, config: dict = CHAT_COMPLETION_CONFIG):
51
- from lionagi.libs.package.imports import check_import
52
-
53
- check_import("openai")
54
- check_import("ollama")
55
-
56
- from ollama import list, pull # type: ignore
57
- from openai import AsyncOpenAI # type: ignore
58
-
59
- super().__init__(config)
60
- self.client = AsyncOpenAI(
61
- base_url=self.config.base_url,
62
- api_key="ollama",
63
- )
64
- self._pull = pull
65
- self._list = list
66
-
67
- async def _invoke(
68
- self,
69
- payload: dict,
70
- headers: dict,
71
- **kwargs,
72
- ):
73
- self._check_model(payload["model"])
74
- params = {**payload, **kwargs}
75
- headers.pop("Authorization", None)
76
- params["extra_headers"] = headers
77
-
78
- if "response_format" in payload:
79
- return await self.client.beta.chat.completions.parse(**params)
80
- params.pop("response_format", None)
81
- return await self.client.chat.completions.create(**params)
82
-
83
- async def _stream(
84
- self,
85
- payload: dict,
86
- headers: dict,
87
- **kwargs,
88
- ) -> AsyncGenerator:
89
-
90
- self._check_model(payload["model"])
91
- params = {**payload, **kwargs}
92
- headers.pop("Authorization", None)
93
- params["extra_headers"] = headers
94
-
95
- async for chunk in self.client.beta.chat.completions.stream(**params):
96
- yield chunk
97
-
98
- @property
99
- def allowed_roles(self):
100
- return ["system", "user", "assistant"]
101
-
102
- def _pull_model(self, model: str):
103
- from tqdm import tqdm
104
-
105
- current_digest, bars = "", {}
106
- for progress in self._pull(model, stream=True):
107
- digest = progress.get("digest", "")
108
- if digest != current_digest and current_digest in bars:
109
- bars[current_digest].close()
110
-
111
- if not digest:
112
- print(progress.get("status"))
113
- continue
114
-
115
- if digest not in bars and (total := progress.get("total")):
116
- bars[digest] = tqdm(
117
- total=total,
118
- desc=f"pulling {digest[7:19]}",
119
- unit="B",
120
- unit_scale=True,
121
- )
122
-
123
- if completed := progress.get("completed"):
124
- bars[digest].update(completed - bars[digest].n)
125
-
126
- current_digest = digest
127
-
128
- def _list_local_models(self) -> set:
129
- response = self._list()
130
- return {i.model for i in response.models}
131
-
132
- def _check_model(self, model: str):
133
- if model not in self._list_local_models():
134
- self._pull_model(model)
@@ -1,3 +0,0 @@
1
- # Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
3
- # SPDX-License-Identifier: Apache-2.0