lionagi 0.12.2__py3-none-any.whl → 0.12.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. lionagi/config.py +123 -0
  2. lionagi/fields/file.py +1 -1
  3. lionagi/fields/reason.py +1 -1
  4. lionagi/libs/file/concat.py +1 -6
  5. lionagi/libs/file/concat_files.py +1 -5
  6. lionagi/libs/file/save.py +1 -1
  7. lionagi/libs/package/imports.py +8 -177
  8. lionagi/libs/parse.py +30 -0
  9. lionagi/libs/schema/load_pydantic_model_from_schema.py +259 -0
  10. lionagi/libs/token_transform/perplexity.py +2 -4
  11. lionagi/libs/token_transform/synthlang_/resources/frameworks/framework_options.json +46 -46
  12. lionagi/libs/token_transform/synthlang_/translate_to_synthlang.py +1 -1
  13. lionagi/operations/chat/chat.py +2 -2
  14. lionagi/operations/communicate/communicate.py +20 -5
  15. lionagi/operations/parse/parse.py +131 -43
  16. lionagi/protocols/generic/log.py +1 -2
  17. lionagi/protocols/generic/pile.py +18 -4
  18. lionagi/protocols/messages/assistant_response.py +20 -1
  19. lionagi/protocols/messages/templates/README.md +6 -10
  20. lionagi/service/connections/__init__.py +15 -0
  21. lionagi/service/connections/api_calling.py +230 -0
  22. lionagi/service/connections/endpoint.py +410 -0
  23. lionagi/service/connections/endpoint_config.py +137 -0
  24. lionagi/service/connections/header_factory.py +56 -0
  25. lionagi/service/connections/match_endpoint.py +49 -0
  26. lionagi/service/connections/providers/__init__.py +3 -0
  27. lionagi/service/connections/providers/anthropic_.py +87 -0
  28. lionagi/service/connections/providers/exa_.py +33 -0
  29. lionagi/service/connections/providers/oai_.py +166 -0
  30. lionagi/service/connections/providers/ollama_.py +122 -0
  31. lionagi/service/connections/providers/perplexity_.py +29 -0
  32. lionagi/service/imodel.py +36 -144
  33. lionagi/service/manager.py +1 -7
  34. lionagi/service/{endpoints/rate_limited_processor.py → rate_limited_processor.py} +4 -2
  35. lionagi/service/resilience.py +545 -0
  36. lionagi/service/third_party/README.md +71 -0
  37. lionagi/service/third_party/__init__.py +0 -0
  38. lionagi/service/third_party/anthropic_models.py +159 -0
  39. lionagi/service/third_party/exa_models.py +165 -0
  40. lionagi/service/third_party/openai_models.py +18241 -0
  41. lionagi/service/third_party/pplx_models.py +156 -0
  42. lionagi/service/types.py +5 -4
  43. lionagi/session/branch.py +12 -7
  44. lionagi/tools/file/reader.py +1 -1
  45. lionagi/tools/memory/tools.py +497 -0
  46. lionagi/utils.py +921 -123
  47. lionagi/version.py +1 -1
  48. {lionagi-0.12.2.dist-info → lionagi-0.12.4.dist-info}/METADATA +33 -16
  49. {lionagi-0.12.2.dist-info → lionagi-0.12.4.dist-info}/RECORD +53 -63
  50. lionagi/libs/file/create_path.py +0 -80
  51. lionagi/libs/file/file_util.py +0 -358
  52. lionagi/libs/parse/__init__.py +0 -3
  53. lionagi/libs/parse/fuzzy_parse_json.py +0 -117
  54. lionagi/libs/parse/to_dict.py +0 -336
  55. lionagi/libs/parse/to_json.py +0 -61
  56. lionagi/libs/parse/to_num.py +0 -378
  57. lionagi/libs/parse/to_xml.py +0 -57
  58. lionagi/libs/parse/xml_parser.py +0 -148
  59. lionagi/libs/schema/breakdown_pydantic_annotation.py +0 -48
  60. lionagi/service/endpoints/__init__.py +0 -3
  61. lionagi/service/endpoints/base.py +0 -706
  62. lionagi/service/endpoints/chat_completion.py +0 -116
  63. lionagi/service/endpoints/match_endpoint.py +0 -72
  64. lionagi/service/providers/__init__.py +0 -3
  65. lionagi/service/providers/anthropic_/__init__.py +0 -3
  66. lionagi/service/providers/anthropic_/messages.py +0 -99
  67. lionagi/service/providers/exa_/models.py +0 -3
  68. lionagi/service/providers/exa_/search.py +0 -80
  69. lionagi/service/providers/exa_/types.py +0 -7
  70. lionagi/service/providers/groq_/__init__.py +0 -3
  71. lionagi/service/providers/groq_/chat_completions.py +0 -56
  72. lionagi/service/providers/ollama_/__init__.py +0 -3
  73. lionagi/service/providers/ollama_/chat_completions.py +0 -134
  74. lionagi/service/providers/openai_/__init__.py +0 -3
  75. lionagi/service/providers/openai_/chat_completions.py +0 -101
  76. lionagi/service/providers/openai_/spec.py +0 -14
  77. lionagi/service/providers/openrouter_/__init__.py +0 -3
  78. lionagi/service/providers/openrouter_/chat_completions.py +0 -62
  79. lionagi/service/providers/perplexity_/__init__.py +0 -3
  80. lionagi/service/providers/perplexity_/chat_completions.py +0 -44
  81. lionagi/service/providers/perplexity_/models.py +0 -5
  82. lionagi/service/providers/types.py +0 -17
  83. /lionagi/{service/providers/exa_/__init__.py → py.typed} +0 -0
  84. /lionagi/service/{endpoints/token_calculator.py → token_calculator.py} +0 -0
  85. {lionagi-0.12.2.dist-info → lionagi-0.12.4.dist-info}/WHEEL +0 -0
  86. {lionagi-0.12.2.dist-info → lionagi-0.12.4.dist-info}/licenses/LICENSE +0 -0
@@ -1,101 +0,0 @@
1
- # Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
-
5
- from lionagi.service.endpoints.chat_completion import ChatCompletionEndPoint
6
-
7
- from .spec import reasoning_models, reasoning_not_support_params
8
-
9
- CHAT_COMPLETION_CONFIG = {
10
- "provider": "openai",
11
- "base_url": "https://api.openai.com/v1",
12
- "endpoint": "chat/completions",
13
- "method": "post",
14
- "openai_compatible": True,
15
- "is_invokeable": True,
16
- "requires_tokens": True,
17
- "is_streamable": True,
18
- "required_kwargs": {
19
- "messages",
20
- "model",
21
- },
22
- "deprecated_kwargs": {
23
- "max_tokens",
24
- "function_call",
25
- "functions",
26
- },
27
- "optional_kwargs": {
28
- "store",
29
- "reasoning_effort",
30
- "metadata",
31
- "frequency_penalty",
32
- "logit_bias",
33
- "logprobs",
34
- "top_logprobs",
35
- "max_completion_tokens",
36
- "n",
37
- "modalities",
38
- "prediction",
39
- "audio",
40
- "presence_penalty",
41
- "response_format",
42
- "seed",
43
- "service_tier",
44
- "stop",
45
- "stream",
46
- "stream_options",
47
- "temperature",
48
- "top_p",
49
- "tools",
50
- "tool_choice",
51
- "parallel_tool_calls",
52
- "user",
53
- },
54
- "allowed_roles": ["user", "assistant", "system", "developer", "tool"],
55
- }
56
-
57
-
58
- class OpenAIChatCompletionEndPoint(ChatCompletionEndPoint):
59
- """
60
- Documentation: https://platform.openai.com/docs/api-reference/chat/create
61
- """
62
-
63
- def __init__(self, config: dict = CHAT_COMPLETION_CONFIG):
64
- super().__init__(config)
65
-
66
- def create_payload(self, **kwargs) -> dict:
67
- """Generates a request payload (and headers) for this endpoint.
68
-
69
- Args:
70
- **kwargs:
71
- Arbitrary parameters passed by the caller.
72
-
73
- Returns:
74
- dict:
75
- A dictionary containing:
76
- - "payload": A dict with filtered parameters for the request.
77
- - "headers": A dict of additional headers (e.g., `Authorization`).
78
- - "is_cached": Whether the request is to be cached.
79
- """
80
- payload = {}
81
- is_cached = kwargs.get("is_cached", False)
82
- headers = kwargs.get("headers", {})
83
- for k, v in kwargs.items():
84
- if k in self.acceptable_kwargs:
85
- payload[k] = v
86
- if "api_key" in kwargs:
87
- headers["Authorization"] = f"Bearer {kwargs['api_key']}"
88
-
89
- if payload.get("model") in reasoning_models:
90
- for param in reasoning_not_support_params:
91
- payload.pop(param, None)
92
- if payload["messages"][0].get("role") == "system":
93
- payload["messages"][0]["role"] = "developer"
94
- else:
95
- payload.pop("reasoning_effort", None)
96
-
97
- return {
98
- "payload": payload,
99
- "headers": headers,
100
- "is_cached": is_cached,
101
- }
@@ -1,14 +0,0 @@
1
- reasoning_models = (
2
- "o3-mini-2025-01-31",
3
- "o3-mini",
4
- "o1",
5
- "o1-2024-12-17",
6
- )
7
-
8
- reasoning_not_support_params = (
9
- "temperature",
10
- "top_p",
11
- "logit_bias",
12
- "logprobs",
13
- "top_logprobs",
14
- )
@@ -1,3 +0,0 @@
1
- # Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
@@ -1,62 +0,0 @@
1
- # Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
-
5
- from lionagi.service.endpoints.chat_completion import ChatCompletionEndPoint
6
-
7
- CHAT_COMPLETION_CONFIG = {
8
- "provider": "openrouter",
9
- "base_url": "https://openrouter.ai/api/v1",
10
- "endpoint": "chat/completions",
11
- "method": "post",
12
- "openai_compatible": True,
13
- "is_invokeable": True,
14
- "requires_tokens": True,
15
- "is_streamable": True,
16
- "required_kwargs": {
17
- "messages",
18
- "model",
19
- },
20
- "deprecated_kwargs": {
21
- "max_tokens",
22
- "function_call",
23
- "functions",
24
- },
25
- "optional_kwargs": {
26
- "store",
27
- "reasoning_effort",
28
- "metadata",
29
- "frequency_penalty",
30
- "logit_bias",
31
- "logprobs",
32
- "top_logprobs",
33
- "max_completion_tokens",
34
- "n",
35
- "modalities",
36
- "prediction",
37
- "audio",
38
- "presence_penalty",
39
- "response_format",
40
- "seed",
41
- "service_tier",
42
- "stop",
43
- "stream",
44
- "stream_options",
45
- "temperature",
46
- "top_p",
47
- "tools",
48
- "tool_choice",
49
- "parallel_tool_calls",
50
- "user",
51
- },
52
- "allowed_roles": ["user", "assistant", "system"],
53
- }
54
-
55
-
56
- class OpenRouterChatCompletionEndPoint(ChatCompletionEndPoint):
57
- """
58
- Documentation: https://openrouter.ai/docs/quick-start#quick-start
59
- """
60
-
61
- def __init__(self, config: dict = CHAT_COMPLETION_CONFIG):
62
- super().__init__(config)
@@ -1,3 +0,0 @@
1
- # Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
@@ -1,44 +0,0 @@
1
- # Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
-
5
- from lionagi.service.endpoints.chat_completion import ChatCompletionEndPoint
6
-
7
- from .models import PerplexityChatCompletionRequest
8
-
9
- CHAT_COMPLETION_CONFIG = {
10
- "name": "search_perplexity",
11
- "provider": "perplexity",
12
- "base_url": "https://api.perplexity.ai",
13
- "endpoint": "chat/completions",
14
- "method": "post",
15
- "openai_compatible": True,
16
- "is_invokeable": True,
17
- "requires_tokens": True,
18
- "is_streamable": True,
19
- "required_kwargs": {
20
- "messages",
21
- "model",
22
- },
23
- "optional_kwargs": {
24
- "max_tokens",
25
- "temperature",
26
- "top_p",
27
- "search_domain_filter",
28
- "return_images",
29
- "return_related_questions",
30
- "search_recency_filter",
31
- "top_k",
32
- "stream",
33
- "presence_penalty",
34
- "frequency_penalty",
35
- },
36
- "allowed_roles": ["user", "assistant"],
37
- "request_options": PerplexityChatCompletionRequest,
38
- }
39
-
40
-
41
- class PerplexityChatCompletionEndPoint(ChatCompletionEndPoint):
42
-
43
- def __init__(self, config: dict = CHAT_COMPLETION_CONFIG):
44
- super().__init__(config)
@@ -1,5 +0,0 @@
1
- from khive.providers.perplexity_ import (
2
- PerplexityChatRequest as PerplexityChatCompletionRequest,
3
- )
4
-
5
- __all__ = ("PerplexityChatCompletionRequest",)
@@ -1,17 +0,0 @@
1
- from .anthropic_.messages import AnthropicChatCompletionEndPoint
2
- from .exa_.models import ExaSearchRequest
3
- from .exa_.search import ExaSearchEndPoint
4
- from .groq_.chat_completions import GroqChatCompletionEndPoint
5
- from .openai_.chat_completions import OpenAIChatCompletionEndPoint
6
- from .openrouter_.chat_completions import OpenRouterChatCompletionEndPoint
7
- from .perplexity_.chat_completions import PerplexityChatCompletionEndPoint
8
-
9
- __all__ = (
10
- "AnthropicChatCompletionEndPoint",
11
- "ExaSearchEndPoint",
12
- "ExaSearchRequest",
13
- "GroqChatCompletionEndPoint",
14
- "OpenAIChatCompletionEndPoint",
15
- "OpenRouterChatCompletionEndPoint",
16
- "PerplexityChatCompletionEndPoint",
17
- )