lionagi 0.12.3__py3-none-any.whl → 0.12.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. lionagi/config.py +123 -0
  2. lionagi/libs/schema/load_pydantic_model_from_schema.py +259 -0
  3. lionagi/libs/token_transform/perplexity.py +2 -4
  4. lionagi/libs/token_transform/synthlang_/translate_to_synthlang.py +1 -1
  5. lionagi/operations/chat/chat.py +2 -2
  6. lionagi/operations/communicate/communicate.py +20 -5
  7. lionagi/operations/parse/parse.py +131 -43
  8. lionagi/protocols/generic/pile.py +94 -33
  9. lionagi/protocols/graph/node.py +25 -19
  10. lionagi/protocols/messages/assistant_response.py +20 -1
  11. lionagi/service/connections/__init__.py +15 -0
  12. lionagi/service/connections/api_calling.py +230 -0
  13. lionagi/service/connections/endpoint.py +410 -0
  14. lionagi/service/connections/endpoint_config.py +137 -0
  15. lionagi/service/connections/header_factory.py +56 -0
  16. lionagi/service/connections/match_endpoint.py +49 -0
  17. lionagi/service/connections/providers/__init__.py +3 -0
  18. lionagi/service/connections/providers/anthropic_.py +87 -0
  19. lionagi/service/connections/providers/exa_.py +33 -0
  20. lionagi/service/connections/providers/oai_.py +166 -0
  21. lionagi/service/connections/providers/ollama_.py +122 -0
  22. lionagi/service/connections/providers/perplexity_.py +29 -0
  23. lionagi/service/imodel.py +36 -144
  24. lionagi/service/manager.py +1 -7
  25. lionagi/service/{endpoints/rate_limited_processor.py → rate_limited_processor.py} +4 -2
  26. lionagi/service/resilience.py +545 -0
  27. lionagi/service/third_party/README.md +71 -0
  28. lionagi/service/third_party/anthropic_models.py +159 -0
  29. lionagi/service/{providers/exa_/models.py → third_party/exa_models.py} +18 -13
  30. lionagi/service/third_party/openai_models.py +18241 -0
  31. lionagi/service/third_party/pplx_models.py +156 -0
  32. lionagi/service/types.py +5 -4
  33. lionagi/session/branch.py +12 -7
  34. lionagi/tools/file/reader.py +1 -1
  35. lionagi/tools/memory/tools.py +497 -0
  36. lionagi/version.py +1 -1
  37. {lionagi-0.12.3.dist-info → lionagi-0.12.5.dist-info}/METADATA +17 -19
  38. {lionagi-0.12.3.dist-info → lionagi-0.12.5.dist-info}/RECORD +43 -54
  39. lionagi/adapters/__init__.py +0 -1
  40. lionagi/adapters/adapter.py +0 -120
  41. lionagi/adapters/json_adapter.py +0 -181
  42. lionagi/adapters/pandas_/csv_adapter.py +0 -94
  43. lionagi/adapters/pandas_/excel_adapter.py +0 -94
  44. lionagi/adapters/pandas_/pd_dataframe_adapter.py +0 -81
  45. lionagi/adapters/pandas_/pd_series_adapter.py +0 -57
  46. lionagi/adapters/toml_adapter.py +0 -204
  47. lionagi/adapters/types.py +0 -21
  48. lionagi/service/endpoints/__init__.py +0 -3
  49. lionagi/service/endpoints/base.py +0 -706
  50. lionagi/service/endpoints/chat_completion.py +0 -116
  51. lionagi/service/endpoints/match_endpoint.py +0 -72
  52. lionagi/service/providers/__init__.py +0 -3
  53. lionagi/service/providers/anthropic_/__init__.py +0 -3
  54. lionagi/service/providers/anthropic_/messages.py +0 -99
  55. lionagi/service/providers/exa_/search.py +0 -80
  56. lionagi/service/providers/exa_/types.py +0 -7
  57. lionagi/service/providers/groq_/__init__.py +0 -3
  58. lionagi/service/providers/groq_/chat_completions.py +0 -56
  59. lionagi/service/providers/ollama_/__init__.py +0 -3
  60. lionagi/service/providers/ollama_/chat_completions.py +0 -134
  61. lionagi/service/providers/openai_/__init__.py +0 -3
  62. lionagi/service/providers/openai_/chat_completions.py +0 -101
  63. lionagi/service/providers/openai_/spec.py +0 -14
  64. lionagi/service/providers/openrouter_/__init__.py +0 -3
  65. lionagi/service/providers/openrouter_/chat_completions.py +0 -62
  66. lionagi/service/providers/perplexity_/__init__.py +0 -3
  67. lionagi/service/providers/perplexity_/chat_completions.py +0 -44
  68. lionagi/service/providers/perplexity_/models.py +0 -144
  69. lionagi/service/providers/types.py +0 -17
  70. /lionagi/{adapters/pandas_/__init__.py → py.typed} +0 -0
  71. /lionagi/service/{providers/exa_ → third_party}/__init__.py +0 -0
  72. /lionagi/service/{endpoints/token_calculator.py → token_calculator.py} +0 -0
  73. {lionagi-0.12.3.dist-info → lionagi-0.12.5.dist-info}/WHEEL +0 -0
  74. {lionagi-0.12.3.dist-info → lionagi-0.12.5.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,87 @@
1
+ # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from pydantic import BaseModel
6
+
7
+ from lionagi.config import settings
8
+ from lionagi.service.connections.endpoint import Endpoint
9
+ from lionagi.service.connections.endpoint_config import EndpointConfig
10
+ from lionagi.service.third_party.anthropic_models import CreateMessageRequest
11
+
12
+ ANTHROPIC_MESSAGES_ENDPOINT_CONFIG = EndpointConfig(
13
+ name="anthropic_messages",
14
+ provider="anthropic",
15
+ base_url="https://api.anthropic.com/v1",
16
+ endpoint="messages",
17
+ method="POST",
18
+ openai_compatible=False,
19
+ auth_type="x-api-key",
20
+ default_headers={"anthropic-version": "2023-06-01"},
21
+ api_key=settings.ANTHROPIC_API_KEY or "dummy-key-for-testing",
22
+ request_options=CreateMessageRequest,
23
+ )
24
+
25
+
26
+ class AnthropicMessagesEndpoint(Endpoint):
27
+ def __init__(
28
+ self,
29
+ config: EndpointConfig = ANTHROPIC_MESSAGES_ENDPOINT_CONFIG,
30
+ **kwargs,
31
+ ):
32
+ super().__init__(config, **kwargs)
33
+
34
+ def create_payload(
35
+ self,
36
+ request: dict | BaseModel,
37
+ extra_headers: dict | None = None,
38
+ **kwargs,
39
+ ):
40
+ # Extract system message before validation if present
41
+ request_dict = (
42
+ request if isinstance(request, dict) else request.model_dump()
43
+ )
44
+ system = None
45
+
46
+ if "messages" in request_dict and request_dict["messages"]:
47
+ first_message = request_dict["messages"][0]
48
+ if first_message.get("role") == "system":
49
+ system = first_message["content"]
50
+ # Remove system message before validation
51
+ request_dict["messages"] = request_dict["messages"][1:]
52
+ request = request_dict
53
+
54
+ payload, headers = super().create_payload(
55
+ request, extra_headers=extra_headers, **kwargs
56
+ )
57
+
58
+ # Remove api_key from payload if present
59
+ payload.pop("api_key", None)
60
+
61
+ if "cache_control" in payload:
62
+ cache_control = payload.pop("cache_control")
63
+ if cache_control:
64
+ cache_control = {"type": "ephemeral"}
65
+ last_message = payload["messages"][-1]["content"]
66
+ if isinstance(last_message, str):
67
+ last_message = {
68
+ "type": "text",
69
+ "text": last_message,
70
+ "cache_control": cache_control,
71
+ }
72
+ elif isinstance(last_message, list) and isinstance(
73
+ last_message[-1], dict
74
+ ):
75
+ last_message[-1]["cache_control"] = cache_control
76
+ payload["messages"][-1]["content"] = (
77
+ [last_message]
78
+ if not isinstance(last_message, list)
79
+ else last_message
80
+ )
81
+
82
+ # If we extracted a system message earlier, add it to payload
83
+ if system:
84
+ system = [{"type": "text", "text": system}]
85
+ payload["system"] = system
86
+
87
+ return (payload, headers)
@@ -0,0 +1,33 @@
1
+ # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from __future__ import annotations
6
+
7
+ from lionagi.config import settings
8
+ from lionagi.service.connections.endpoint import Endpoint
9
+ from lionagi.service.connections.endpoint_config import EndpointConfig
10
+ from lionagi.service.third_party.exa_models import ExaSearchRequest
11
+
12
+ __all__ = ("ExaSearchEndpoint",)
13
+
14
+
15
+ ENDPOINT_CONFIG = EndpointConfig(
16
+ name="exa_search",
17
+ provider="exa",
18
+ base_url="https://api.exa.ai",
19
+ endpoint="search",
20
+ method="POST",
21
+ request_options=ExaSearchRequest,
22
+ api_key=settings.EXA_API_KEY or "dummy-key-for-testing",
23
+ timeout=120,
24
+ max_retries=3,
25
+ auth_type="x-api-key",
26
+ transport_type="http",
27
+ content_type="application/json",
28
+ )
29
+
30
+
31
+ class ExaSearchEndpoint(Endpoint):
32
+ def __init__(self, config=ENDPOINT_CONFIG, **kwargs):
33
+ super().__init__(config=config, **kwargs)
@@ -0,0 +1,166 @@
1
+ # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from pydantic import BaseModel
6
+
7
+ from lionagi.config import settings
8
+ from lionagi.service.connections.endpoint import Endpoint
9
+ from lionagi.service.connections.endpoint_config import EndpointConfig
10
+ from lionagi.service.third_party.openai_models import (
11
+ CreateChatCompletionRequest,
12
+ CreateResponse,
13
+ )
14
+
15
+ __all__ = (
16
+ "OpenaiChatEndpoint",
17
+ "OpenaiResponseEndpoint",
18
+ "OpenrouterChatEndpoint",
19
+ "OPENROUTER_GEMINI_ENDPOINT_CONFIG",
20
+ )
21
+
22
+
23
+ OPENAI_CHAT_ENDPOINT_CONFIG = EndpointConfig(
24
+ name="openai_chat",
25
+ provider="openai",
26
+ base_url="https://api.openai.com/v1",
27
+ endpoint="chat/completions",
28
+ kwargs={"model": "gpt-4o"},
29
+ api_key=settings.OPENAI_API_KEY or "dummy-key-for-testing",
30
+ auth_type="bearer",
31
+ content_type="application/json",
32
+ method="POST",
33
+ requires_tokens=True,
34
+ request_options=CreateChatCompletionRequest,
35
+ )
36
+
37
+ OPENAI_RESPONSE_ENDPOINT_CONFIG = EndpointConfig(
38
+ name="openai_response",
39
+ provider="openai",
40
+ base_url="https://api.openai.com/v1",
41
+ endpoint="chat/completions", # OpenAI responses API uses same endpoint
42
+ kwargs={"model": "gpt-4o"},
43
+ api_key=settings.OPENAI_API_KEY or "dummy-key-for-testing",
44
+ auth_type="bearer",
45
+ content_type="application/json",
46
+ method="POST",
47
+ requires_tokens=True,
48
+ request_options=CreateResponse,
49
+ )
50
+
51
+ OPENROUTER_CHAT_ENDPOINT_CONFIG = EndpointConfig(
52
+ name="openrouter_chat",
53
+ provider="openrouter",
54
+ base_url="https://openrouter.ai/api/v1",
55
+ endpoint="chat/completions",
56
+ kwargs={"model": "google/gemini-2.5-flash-preview-05-20"},
57
+ api_key=settings.OPENROUTER_API_KEY or "dummy-key-for-testing",
58
+ auth_type="bearer",
59
+ content_type="application/json",
60
+ method="POST",
61
+ request_options=CreateChatCompletionRequest,
62
+ )
63
+
64
+ OPENROUTER_GEMINI_ENDPOINT_CONFIG = EndpointConfig(
65
+ name="openrouter_gemini",
66
+ provider="openrouter",
67
+ base_url="https://openrouter.ai/api/v1",
68
+ endpoint="chat/completions",
69
+ kwargs={"model": "google/gemini-2.5-flash-preview-05-20"},
70
+ api_key=settings.OPENROUTER_API_KEY or "dummy-key-for-testing",
71
+ auth_type="bearer",
72
+ content_type="application/json",
73
+ method="POST",
74
+ )
75
+
76
+ OPENAI_EMBEDDING_ENDPOINT_CONFIG = EndpointConfig(
77
+ name="openai_embed",
78
+ provider="openai",
79
+ base_url="https://api.openai.com/v1",
80
+ endpoint="embeddings",
81
+ kwargs={"model": "text-embedding-3-small"},
82
+ api_key=settings.OPENAI_API_KEY or "dummy-key-for-testing",
83
+ auth_type="bearer",
84
+ content_type="application/json",
85
+ method="POST",
86
+ )
87
+
88
+ GROQ_CHAT_ENDPOINT_CONFIG = EndpointConfig(
89
+ name="groq_chat",
90
+ provider="groq",
91
+ base_url="https://api.groq.com/openai/v1",
92
+ endpoint="chat/completions",
93
+ api_key=settings.GROQ_API_KEY or "dummy-key-for-testing",
94
+ auth_type="bearer",
95
+ content_type="application/json",
96
+ method="POST",
97
+ )
98
+
99
+
100
+ REASONING_MODELS = (
101
+ "o3-mini-2025-01-31",
102
+ "o3-mini",
103
+ "o1",
104
+ "o1-2024-12-17",
105
+ )
106
+
107
+ REASONING_NOT_SUPPORT_PARAMS = (
108
+ "temperature",
109
+ "top_p",
110
+ "logit_bias",
111
+ "logprobs",
112
+ "top_logprobs",
113
+ )
114
+
115
+
116
+ class OpenaiChatEndpoint(Endpoint):
117
+ def __init__(self, config=OPENAI_CHAT_ENDPOINT_CONFIG, **kwargs):
118
+ super().__init__(config, **kwargs)
119
+
120
+ def create_payload(
121
+ self,
122
+ request: dict | BaseModel,
123
+ extra_headers: dict | None = None,
124
+ **kwargs,
125
+ ):
126
+ """Override to handle model-specific parameter filtering."""
127
+ payload, headers = super().create_payload(
128
+ request, extra_headers, **kwargs
129
+ )
130
+
131
+ # Handle reasoning models
132
+ model = payload.get("model")
133
+ if model in REASONING_MODELS:
134
+ # Remove unsupported parameters for reasoning models
135
+ for param in REASONING_NOT_SUPPORT_PARAMS:
136
+ payload.pop(param, None)
137
+
138
+ # Convert system role to developer role for reasoning models
139
+ if "messages" in payload and payload["messages"]:
140
+ if payload["messages"][0].get("role") == "system":
141
+ payload["messages"][0]["role"] = "developer"
142
+ else:
143
+ # Remove reasoning_effort for non-reasoning models
144
+ payload.pop("reasoning_effort", None)
145
+
146
+ return (payload, headers)
147
+
148
+
149
+ class OpenaiResponseEndpoint(Endpoint):
150
+ def __init__(self, config=OPENAI_RESPONSE_ENDPOINT_CONFIG, **kwargs):
151
+ super().__init__(config, **kwargs)
152
+
153
+
154
+ class OpenrouterChatEndpoint(Endpoint):
155
+ def __init__(self, config=OPENROUTER_CHAT_ENDPOINT_CONFIG, **kwargs):
156
+ super().__init__(config, **kwargs)
157
+
158
+
159
+ class GroqChatEndpoint(Endpoint):
160
+ def __init__(self, config=GROQ_CHAT_ENDPOINT_CONFIG, **kwargs):
161
+ super().__init__(config, **kwargs)
162
+
163
+
164
+ class OpenaiEmbedEndpoint(Endpoint):
165
+ def __init__(self, config=OPENAI_EMBEDDING_ENDPOINT_CONFIG, **kwargs):
166
+ super().__init__(config, **kwargs)
@@ -0,0 +1,122 @@
1
+ # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from pydantic import BaseModel
6
+
7
+ from lionagi.service.connections.endpoint import Endpoint
8
+ from lionagi.service.connections.endpoint_config import EndpointConfig
9
+ from lionagi.service.third_party.openai_models import (
10
+ CreateChatCompletionRequest,
11
+ )
12
+ from lionagi.utils import is_import_installed
13
+
14
+ _HAS_OLLAMA = is_import_installed("ollama")
15
+
16
+ OLLAMA_CHAT_ENDPOINT_CONFIG = EndpointConfig(
17
+ name="ollama_chat",
18
+ provider="ollama",
19
+ base_url="http://localhost:11434/v1", # Ollama desktop client default
20
+ endpoint="chat/completions", # Use full OpenAI-compatible endpoint
21
+ kwargs={}, # Empty kwargs, model will be provided at runtime
22
+ openai_compatible=False, # Use HTTP transport
23
+ api_key=None, # No API key needed
24
+ method="POST",
25
+ content_type="application/json",
26
+ auth_type="none", # No authentication
27
+ default_headers={}, # No auth headers needed
28
+ request_options=CreateChatCompletionRequest, # Use Pydantic model for validation
29
+ )
30
+
31
+
32
+ class OllamaChatEndpoint(Endpoint):
33
+ """
34
+ Documentation: https://platform.openai.com/docs/api-reference/chat/create
35
+ """
36
+
37
+ def __init__(self, config=OLLAMA_CHAT_ENDPOINT_CONFIG, **kwargs):
38
+ if not _HAS_OLLAMA:
39
+ raise ModuleNotFoundError(
40
+ "ollama is not installed, please install it with `pip install lionagi[ollama]`"
41
+ )
42
+
43
+ # Override api_key for Ollama (not needed)
44
+ if "api_key" in kwargs:
45
+ kwargs.pop("api_key")
46
+
47
+ super().__init__(config, **kwargs)
48
+
49
+ from ollama import list as ollama_list # type: ignore[import]
50
+ from ollama import pull as ollama_pull # type: ignore[import]
51
+
52
+ self._pull = ollama_pull
53
+ self._list = ollama_list
54
+
55
+ def create_payload(
56
+ self,
57
+ request: dict | BaseModel,
58
+ extra_headers: dict | None = None,
59
+ **kwargs,
60
+ ):
61
+ """Override to handle Ollama-specific needs."""
62
+ payload, headers = super().create_payload(
63
+ request, extra_headers, **kwargs
64
+ )
65
+
66
+ # Ollama doesn't support reasoning_effort
67
+ payload.pop("reasoning_effort", None)
68
+
69
+ return (payload, headers)
70
+
71
+ async def call(
72
+ self, request: dict | BaseModel, cache_control: bool = False, **kwargs
73
+ ):
74
+ payload, _ = self.create_payload(request, **kwargs)
75
+
76
+ # Check if model exists and pull if needed
77
+ model = payload["model"]
78
+ self._check_model(model)
79
+
80
+ # The parent call method will handle headers internally
81
+ return await super().call(
82
+ payload, cache_control=cache_control, **kwargs
83
+ )
84
+
85
+ def _pull_model(self, model: str):
86
+ from tqdm import tqdm
87
+
88
+ current_digest, bars = "", {}
89
+ for progress in self._pull(model, stream=True):
90
+ digest = progress.get("digest", "")
91
+ if digest != current_digest and current_digest in bars:
92
+ bars[current_digest].close()
93
+
94
+ if not digest:
95
+ print(progress.get("status"))
96
+ continue
97
+
98
+ if digest not in bars and (total := progress.get("total")):
99
+ bars[digest] = tqdm(
100
+ total=total,
101
+ desc=f"pulling {digest[7:19]}",
102
+ unit="B",
103
+ unit_scale=True,
104
+ )
105
+
106
+ if completed := progress.get("completed"):
107
+ bars[digest].update(completed - bars[digest].n)
108
+
109
+ current_digest = digest
110
+
111
+ def _check_model(self, model: str):
112
+ try:
113
+ available_models = [i.model for i in self._list().models]
114
+
115
+ if model not in available_models:
116
+ print(
117
+ f"Model '{model}' not found locally. Pulling from Ollama registry..."
118
+ )
119
+ self._pull_model(model)
120
+ print(f"Model '{model}' successfully pulled.")
121
+ except Exception as e:
122
+ print(f"Warning: Could not check/pull model '{model}': {e}")
@@ -0,0 +1,29 @@
1
+ # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from lionagi.config import settings
6
+ from lionagi.service.connections.endpoint import Endpoint
7
+ from lionagi.service.connections.endpoint_config import EndpointConfig
8
+ from lionagi.service.third_party.pplx_models import PerplexityChatRequest
9
+
10
+ __all__ = ("PerplexityChatEndpoint",)
11
+
12
+
13
+ ENDPOINT_CONFIG = EndpointConfig(
14
+ name="perplexity_chat",
15
+ provider="perplexity",
16
+ base_url="https://api.perplexity.ai",
17
+ endpoint="chat/completions",
18
+ method="POST",
19
+ kwargs={"model": "sonar"},
20
+ api_key=settings.PERPLEXITY_API_KEY or "dummy-key-for-testing",
21
+ auth_type="bearer",
22
+ content_type="application/json",
23
+ request_options=PerplexityChatRequest,
24
+ )
25
+
26
+
27
+ class PerplexityChatEndpoint(Endpoint):
28
+ def __init__(self, config=ENDPOINT_CONFIG, **kwargs):
29
+ super().__init__(config, **kwargs)