lionagi 0.14.8__py3-none-any.whl → 0.14.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lionagi/_errors.py +120 -11
- lionagi/_types.py +0 -6
- lionagi/config.py +3 -1
- lionagi/models/operable_model.py +8 -3
- lionagi/operations/flow.py +0 -1
- lionagi/protocols/generic/event.py +2 -0
- lionagi/protocols/generic/log.py +26 -10
- lionagi/protocols/operatives/step.py +1 -1
- lionagi/protocols/types.py +9 -1
- lionagi/service/__init__.py +22 -1
- lionagi/service/connections/api_calling.py +57 -2
- lionagi/service/connections/endpoint_config.py +1 -1
- lionagi/service/connections/header_factory.py +4 -2
- lionagi/service/connections/match_endpoint.py +10 -10
- lionagi/service/connections/providers/anthropic_.py +5 -2
- lionagi/service/connections/providers/claude_code_.py +13 -17
- lionagi/service/connections/providers/claude_code_cli.py +51 -16
- lionagi/service/connections/providers/exa_.py +5 -3
- lionagi/service/connections/providers/oai_.py +116 -81
- lionagi/service/connections/providers/ollama_.py +38 -18
- lionagi/service/connections/providers/perplexity_.py +36 -14
- lionagi/service/connections/providers/types.py +30 -0
- lionagi/service/hooks/__init__.py +25 -0
- lionagi/service/hooks/_types.py +52 -0
- lionagi/service/hooks/_utils.py +85 -0
- lionagi/service/hooks/hook_event.py +67 -0
- lionagi/service/hooks/hook_registry.py +221 -0
- lionagi/service/imodel.py +120 -34
- lionagi/service/third_party/claude_code.py +715 -0
- lionagi/service/third_party/openai_model_names.py +198 -0
- lionagi/service/third_party/pplx_models.py +16 -8
- lionagi/service/types.py +21 -0
- lionagi/session/branch.py +1 -4
- lionagi/tools/base.py +1 -3
- lionagi/utils.py +8 -2
- lionagi/version.py +1 -1
- {lionagi-0.14.8.dist-info → lionagi-0.14.9.dist-info}/METADATA +2 -2
- {lionagi-0.14.8.dist-info → lionagi-0.14.9.dist-info}/RECORD +40 -36
- lionagi/service/connections/providers/_claude_code/__init__.py +0 -3
- lionagi/service/connections/providers/_claude_code/models.py +0 -244
- lionagi/service/connections/providers/_claude_code/stream_cli.py +0 -359
- lionagi/service/third_party/openai_models.py +0 -18241
- {lionagi-0.14.8.dist-info → lionagi-0.14.9.dist-info}/WHEEL +0 -0
- {lionagi-0.14.8.dist-info → lionagi-0.14.9.dist-info}/licenses/LICENSE +0 -0
@@ -4,48 +4,78 @@
|
|
4
4
|
|
5
5
|
from __future__ import annotations
|
6
6
|
|
7
|
-
from collections.abc import AsyncIterator
|
7
|
+
from collections.abc import AsyncIterator, Callable
|
8
8
|
|
9
9
|
from pydantic import BaseModel
|
10
10
|
|
11
11
|
from lionagi.service.connections.endpoint import Endpoint, EndpointConfig
|
12
12
|
from lionagi.utils import to_dict
|
13
13
|
|
14
|
-
from .
|
15
|
-
from ._claude_code.stream_cli import (
|
14
|
+
from ...third_party.claude_code import (
|
16
15
|
ClaudeChunk,
|
16
|
+
ClaudeCodeRequest,
|
17
17
|
ClaudeSession,
|
18
|
-
|
18
|
+
)
|
19
|
+
from ...third_party.claude_code import log as cc_log
|
20
|
+
from ...third_party.claude_code import (
|
19
21
|
stream_claude_code_cli,
|
20
22
|
)
|
21
23
|
|
22
|
-
|
24
|
+
_get_config = lambda: EndpointConfig(
|
23
25
|
name="claude_code_cli",
|
24
26
|
provider="claude_code",
|
25
27
|
base_url="internal",
|
26
28
|
endpoint="query_cli",
|
27
|
-
api_key="dummy",
|
29
|
+
api_key="dummy-key",
|
28
30
|
request_options=ClaudeCodeRequest,
|
29
31
|
timeout=18000, # 30 mins
|
30
32
|
)
|
31
33
|
|
34
|
+
ENDPOINT_CONFIG = _get_config() # backward compatibility
|
35
|
+
|
36
|
+
|
37
|
+
_CLAUDE_HANDLER_PARAMS = (
|
38
|
+
"on_thinking",
|
39
|
+
"on_text",
|
40
|
+
"on_tool_use",
|
41
|
+
"on_tool_result",
|
42
|
+
"on_system",
|
43
|
+
"on_final",
|
44
|
+
)
|
45
|
+
|
46
|
+
|
47
|
+
def _validate_handlers(handlers: dict[str, Callable | None], /) -> None:
|
48
|
+
if not isinstance(handlers, dict):
|
49
|
+
raise ValueError("Handlers must be a dictionary")
|
50
|
+
for k, v in handlers.items():
|
51
|
+
if k not in _CLAUDE_HANDLER_PARAMS:
|
52
|
+
raise ValueError(f"Invalid handler key: {k}")
|
53
|
+
if not (v is None or callable(v)):
|
54
|
+
raise ValueError(
|
55
|
+
f"Handler value must be callable or None, got {type(v)}"
|
56
|
+
)
|
57
|
+
|
32
58
|
|
33
59
|
class ClaudeCodeCLIEndpoint(Endpoint):
|
34
|
-
def __init__(self, config: EndpointConfig =
|
60
|
+
def __init__(self, config: EndpointConfig = None, **kwargs):
|
61
|
+
config = config or _get_config()
|
35
62
|
super().__init__(config=config, **kwargs)
|
36
63
|
|
37
64
|
@property
|
38
65
|
def claude_handlers(self):
|
39
|
-
handlers = {
|
40
|
-
"on_thinking": None,
|
41
|
-
"on_text": None,
|
42
|
-
"on_tool_use": None,
|
43
|
-
"on_tool_result": None,
|
44
|
-
"on_system": None,
|
45
|
-
"on_final": None,
|
46
|
-
}
|
66
|
+
handlers = {k: None for k in _CLAUDE_HANDLER_PARAMS}
|
47
67
|
return self.config.kwargs.get("claude_handlers", handlers)
|
48
68
|
|
69
|
+
@claude_handlers.setter
|
70
|
+
def claude_handlers(self, value: dict):
|
71
|
+
_validate_handlers(value)
|
72
|
+
self.config.kwargs["claude_handlers"] = value
|
73
|
+
|
74
|
+
def update_handlers(self, **kwargs):
|
75
|
+
_validate_handlers(kwargs)
|
76
|
+
handlers = {**self.claude_handlers, **kwargs}
|
77
|
+
self.claude_handlers = handlers
|
78
|
+
|
49
79
|
def create_payload(self, request: dict | BaseModel, **kwargs):
|
50
80
|
req_dict = {**self.config.kwargs, **to_dict(request), **kwargs}
|
51
81
|
messages = req_dict.pop("messages")
|
@@ -75,6 +105,8 @@ class ClaudeCodeCLIEndpoint(Endpoint):
|
|
75
105
|
request, session, **self.claude_handlers, **kwargs
|
76
106
|
):
|
77
107
|
if isinstance(chunk, dict):
|
108
|
+
if chunk.get("type") == "done":
|
109
|
+
break
|
78
110
|
system = chunk
|
79
111
|
responses.append(chunk)
|
80
112
|
|
@@ -92,7 +124,7 @@ class ClaudeCodeCLIEndpoint(Endpoint):
|
|
92
124
|
responses.append(chunk)
|
93
125
|
if isinstance(chunk, ClaudeSession):
|
94
126
|
break
|
95
|
-
|
127
|
+
cc_log.info(
|
96
128
|
f"Session {session.session_id} finished with {len(responses)} chunks"
|
97
129
|
)
|
98
130
|
texts = []
|
@@ -102,4 +134,7 @@ class ClaudeCodeCLIEndpoint(Endpoint):
|
|
102
134
|
|
103
135
|
texts.append(session.result)
|
104
136
|
session.result = "\n".join(texts)
|
137
|
+
if request.cli_include_summary:
|
138
|
+
session.populate_summary()
|
139
|
+
|
105
140
|
return to_dict(session, recursive=True)
|
@@ -11,8 +11,7 @@ from lionagi.service.third_party.exa_models import ExaSearchRequest
|
|
11
11
|
|
12
12
|
__all__ = ("ExaSearchEndpoint",)
|
13
13
|
|
14
|
-
|
15
|
-
ENDPOINT_CONFIG = EndpointConfig(
|
14
|
+
_get_config = lambda: EndpointConfig(
|
16
15
|
name="exa_search",
|
17
16
|
provider="exa",
|
18
17
|
base_url="https://api.exa.ai",
|
@@ -27,7 +26,10 @@ ENDPOINT_CONFIG = EndpointConfig(
|
|
27
26
|
content_type="application/json",
|
28
27
|
)
|
29
28
|
|
29
|
+
ENDPOINT_CONFIG = _get_config() # backward compatibility
|
30
|
+
|
30
31
|
|
31
32
|
class ExaSearchEndpoint(Endpoint):
|
32
|
-
def __init__(self, config=
|
33
|
+
def __init__(self, config: EndpointConfig = None, **kwargs):
|
34
|
+
config = config or _get_config()
|
33
35
|
super().__init__(config=config, **kwargs)
|
@@ -2,107 +2,126 @@
|
|
2
2
|
#
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
4
4
|
|
5
|
+
"""
|
6
|
+
OpenAI and OpenAI-compatible endpoint configurations.
|
7
|
+
|
8
|
+
This module provides endpoint configurations for:
|
9
|
+
- OpenAI (chat, response, embedding)
|
10
|
+
- OpenRouter (OpenAI-compatible)
|
11
|
+
- Groq (OpenAI-compatible)
|
12
|
+
|
13
|
+
Each provider has a helper function (_get_*_config) that creates
|
14
|
+
configurations with sensible defaults that can be overridden.
|
15
|
+
"""
|
16
|
+
|
5
17
|
from pydantic import BaseModel
|
6
18
|
|
7
19
|
from lionagi.config import settings
|
8
20
|
from lionagi.service.connections.endpoint import Endpoint
|
9
21
|
from lionagi.service.connections.endpoint_config import EndpointConfig
|
10
|
-
from lionagi.service.third_party.
|
11
|
-
|
12
|
-
|
22
|
+
from lionagi.service.third_party.openai_model_names import (
|
23
|
+
REASONING_MODELS,
|
24
|
+
is_reasoning_model,
|
13
25
|
)
|
14
26
|
|
15
27
|
__all__ = (
|
16
28
|
"OpenaiChatEndpoint",
|
17
29
|
"OpenaiResponseEndpoint",
|
30
|
+
"OpenaiEmbedEndpoint",
|
18
31
|
"OpenrouterChatEndpoint",
|
32
|
+
"GroqChatEndpoint",
|
33
|
+
"OPENAI_CHAT_ENDPOINT_CONFIG",
|
34
|
+
"OPENAI_RESPONSE_ENDPOINT_CONFIG",
|
35
|
+
"OPENAI_EMBEDDING_ENDPOINT_CONFIG",
|
36
|
+
"OPENROUTER_CHAT_ENDPOINT_CONFIG",
|
19
37
|
"OPENROUTER_GEMINI_ENDPOINT_CONFIG",
|
38
|
+
"GROQ_CHAT_ENDPOINT_CONFIG",
|
39
|
+
"REASONING_MODELS",
|
40
|
+
"REASONING_NOT_SUPPORT_PARAMS",
|
20
41
|
)
|
21
42
|
|
22
43
|
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
44
|
+
def _get_openai_config(**kwargs):
|
45
|
+
"""Create OpenAI endpoint configuration with defaults."""
|
46
|
+
config = dict(
|
47
|
+
name="openai_chat",
|
48
|
+
provider="openai",
|
49
|
+
base_url="https://api.openai.com/v1",
|
50
|
+
endpoint="chat/completions",
|
51
|
+
kwargs={"model": settings.OPENAI_DEFAULT_MODEL},
|
52
|
+
api_key=settings.OPENAI_API_KEY or "dummy-key-for-testing",
|
53
|
+
auth_type="bearer",
|
54
|
+
content_type="application/json",
|
55
|
+
method="POST",
|
56
|
+
requires_tokens=True,
|
57
|
+
# NOTE: OpenAI models have incorrect role literals, only use for param validation
|
58
|
+
# request_options=CreateChatCompletionRequest,
|
59
|
+
)
|
60
|
+
config.update(kwargs)
|
61
|
+
return EndpointConfig(**config)
|
62
|
+
|
63
|
+
|
64
|
+
def _get_openrouter_config(**kwargs):
|
65
|
+
"""Create OpenRouter endpoint configuration with defaults."""
|
66
|
+
config = dict(
|
67
|
+
name="openrouter_chat",
|
68
|
+
provider="openrouter",
|
69
|
+
base_url="https://openrouter.ai/api/v1",
|
70
|
+
endpoint="chat/completions",
|
71
|
+
kwargs={"model": "google/gemini-2.5-flash"},
|
72
|
+
api_key=settings.OPENROUTER_API_KEY or "dummy-key-for-testing",
|
73
|
+
auth_type="bearer",
|
74
|
+
content_type="application/json",
|
75
|
+
method="POST",
|
76
|
+
# NOTE: OpenRouter uses OpenAI-compatible format
|
77
|
+
# request_options=CreateChatCompletionRequest,
|
78
|
+
)
|
79
|
+
config.update(kwargs)
|
80
|
+
return EndpointConfig(**config)
|
81
|
+
|
82
|
+
|
83
|
+
def _get_groq_config(**kwargs):
|
84
|
+
"""Create Groq endpoint configuration with defaults."""
|
85
|
+
config = dict(
|
86
|
+
name="groq_chat",
|
87
|
+
provider="groq",
|
88
|
+
base_url="https://api.groq.com/openai/v1",
|
89
|
+
endpoint="chat/completions",
|
90
|
+
kwargs={"model": "llama-3.3-70b-versatile"},
|
91
|
+
api_key=settings.GROQ_API_KEY or "dummy-key-for-testing",
|
92
|
+
auth_type="bearer",
|
93
|
+
content_type="application/json",
|
94
|
+
method="POST",
|
95
|
+
)
|
96
|
+
config.update(kwargs)
|
97
|
+
return EndpointConfig(**config)
|
98
|
+
|
99
|
+
|
100
|
+
# OpenAI endpoints
|
101
|
+
OPENAI_CHAT_ENDPOINT_CONFIG = _get_openai_config()
|
102
|
+
|
103
|
+
OPENAI_RESPONSE_ENDPOINT_CONFIG = _get_openai_config(
|
38
104
|
name="openai_response",
|
39
|
-
|
40
|
-
base_url="https://api.openai.com/v1",
|
41
|
-
endpoint="chat/completions", # OpenAI responses API uses same endpoint
|
42
|
-
kwargs={"model": "gpt-4o"},
|
43
|
-
api_key=settings.OPENAI_API_KEY or "dummy-key-for-testing",
|
44
|
-
auth_type="bearer",
|
45
|
-
content_type="application/json",
|
46
|
-
method="POST",
|
47
|
-
requires_tokens=True,
|
48
|
-
request_options=CreateResponse,
|
49
|
-
)
|
50
|
-
|
51
|
-
OPENROUTER_CHAT_ENDPOINT_CONFIG = EndpointConfig(
|
52
|
-
name="openrouter_chat",
|
53
|
-
provider="openrouter",
|
54
|
-
base_url="https://openrouter.ai/api/v1",
|
55
|
-
endpoint="chat/completions",
|
56
|
-
kwargs={"model": "google/gemini-2.5-flash"},
|
57
|
-
api_key=settings.OPENROUTER_API_KEY or "dummy-key-for-testing",
|
58
|
-
auth_type="bearer",
|
59
|
-
content_type="application/json",
|
60
|
-
method="POST",
|
61
|
-
request_options=CreateChatCompletionRequest,
|
105
|
+
endpoint="responses",
|
62
106
|
)
|
63
107
|
|
64
|
-
OPENAI_EMBEDDING_ENDPOINT_CONFIG =
|
108
|
+
OPENAI_EMBEDDING_ENDPOINT_CONFIG = _get_openai_config(
|
65
109
|
name="openai_embed",
|
66
|
-
provider="openai",
|
67
|
-
base_url="https://api.openai.com/v1",
|
68
110
|
endpoint="embeddings",
|
69
111
|
kwargs={"model": "text-embedding-3-small"},
|
70
|
-
api_key=settings.OPENAI_API_KEY or "dummy-key-for-testing",
|
71
|
-
auth_type="bearer",
|
72
|
-
content_type="application/json",
|
73
|
-
method="POST",
|
74
|
-
)
|
75
|
-
|
76
|
-
GROQ_CHAT_ENDPOINT_CONFIG = EndpointConfig(
|
77
|
-
name="groq_chat",
|
78
|
-
provider="groq",
|
79
|
-
base_url="https://api.groq.com/openai/v1",
|
80
|
-
endpoint="chat/completions",
|
81
|
-
api_key=settings.GROQ_API_KEY or "dummy-key-for-testing",
|
82
|
-
auth_type="bearer",
|
83
|
-
content_type="application/json",
|
84
|
-
method="POST",
|
85
112
|
)
|
86
113
|
|
114
|
+
# OpenRouter endpoints
|
115
|
+
OPENROUTER_CHAT_ENDPOINT_CONFIG = _get_openrouter_config()
|
87
116
|
|
88
|
-
|
89
|
-
"
|
90
|
-
"
|
91
|
-
"o1-preview-2024-09-12",
|
92
|
-
"o1-pro",
|
93
|
-
"o1-pro-2025-03-19",
|
94
|
-
"o3-pro",
|
95
|
-
"o3-pro-2025-06-10",
|
96
|
-
"o3",
|
97
|
-
"o3-2025-04-16",
|
98
|
-
"o4-mini",
|
99
|
-
"o4-mini-2025-04-16",
|
100
|
-
"o3-mini",
|
101
|
-
"o3-mini-2025-01-31",
|
102
|
-
"o1-mini",
|
103
|
-
"o1-mini-2024-09-12",
|
117
|
+
OPENROUTER_GEMINI_ENDPOINT_CONFIG = _get_openrouter_config(
|
118
|
+
name="openrouter_gemini",
|
119
|
+
kwargs={"model": "google/gemini-2.5-flash"},
|
104
120
|
)
|
105
121
|
|
122
|
+
# Groq endpoints
|
123
|
+
GROQ_CHAT_ENDPOINT_CONFIG = _get_groq_config()
|
124
|
+
|
106
125
|
REASONING_NOT_SUPPORT_PARAMS = (
|
107
126
|
"temperature",
|
108
127
|
"top_p",
|
@@ -113,7 +132,8 @@ REASONING_NOT_SUPPORT_PARAMS = (
|
|
113
132
|
|
114
133
|
|
115
134
|
class OpenaiChatEndpoint(Endpoint):
|
116
|
-
def __init__(self, config=
|
135
|
+
def __init__(self, config=None, **kwargs):
|
136
|
+
config = config or _get_openai_config()
|
117
137
|
super().__init__(config, **kwargs)
|
118
138
|
|
119
139
|
def create_payload(
|
@@ -129,7 +149,11 @@ class OpenaiChatEndpoint(Endpoint):
|
|
129
149
|
|
130
150
|
# Handle reasoning models
|
131
151
|
model = payload.get("model")
|
132
|
-
if
|
152
|
+
if (
|
153
|
+
model
|
154
|
+
and is_reasoning_model(model)
|
155
|
+
and not model.startswith("gpt-5")
|
156
|
+
):
|
133
157
|
# Remove unsupported parameters for reasoning models
|
134
158
|
for param in REASONING_NOT_SUPPORT_PARAMS:
|
135
159
|
payload.pop(param, None)
|
@@ -146,20 +170,31 @@ class OpenaiChatEndpoint(Endpoint):
|
|
146
170
|
|
147
171
|
|
148
172
|
class OpenaiResponseEndpoint(Endpoint):
|
149
|
-
def __init__(self, config=
|
173
|
+
def __init__(self, config=None, **kwargs):
|
174
|
+
config = config or _get_openai_config(
|
175
|
+
name="openai_response",
|
176
|
+
endpoint="responses",
|
177
|
+
)
|
150
178
|
super().__init__(config, **kwargs)
|
151
179
|
|
152
180
|
|
153
181
|
class OpenrouterChatEndpoint(Endpoint):
|
154
|
-
def __init__(self, config=
|
182
|
+
def __init__(self, config=None, **kwargs):
|
183
|
+
config = config or _get_openrouter_config()
|
155
184
|
super().__init__(config, **kwargs)
|
156
185
|
|
157
186
|
|
158
187
|
class GroqChatEndpoint(Endpoint):
|
159
|
-
def __init__(self, config=
|
188
|
+
def __init__(self, config=None, **kwargs):
|
189
|
+
config = config or _get_groq_config()
|
160
190
|
super().__init__(config, **kwargs)
|
161
191
|
|
162
192
|
|
163
193
|
class OpenaiEmbedEndpoint(Endpoint):
|
164
|
-
def __init__(self, config=
|
194
|
+
def __init__(self, config=None, **kwargs):
|
195
|
+
config = config or _get_openai_config(
|
196
|
+
name="openai_embed",
|
197
|
+
endpoint="embeddings",
|
198
|
+
kwargs={"model": "text-embedding-3-small"},
|
199
|
+
)
|
165
200
|
super().__init__(config, **kwargs)
|
@@ -2,31 +2,50 @@
|
|
2
2
|
#
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
4
4
|
|
5
|
+
"""
|
6
|
+
Ollama endpoint configuration.
|
7
|
+
|
8
|
+
Ollama provides local model hosting with both native and OpenAI-compatible APIs.
|
9
|
+
This module configures the OpenAI-compatible endpoint for consistency.
|
10
|
+
"""
|
11
|
+
|
5
12
|
from pydantic import BaseModel
|
6
13
|
|
7
14
|
from lionagi.service.connections.endpoint import Endpoint
|
8
15
|
from lionagi.service.connections.endpoint_config import EndpointConfig
|
9
|
-
from lionagi.service.third_party.openai_models import (
|
10
|
-
CreateChatCompletionRequest,
|
11
|
-
)
|
12
16
|
from lionagi.utils import is_import_installed
|
13
17
|
|
18
|
+
__all__ = (
|
19
|
+
"OllamaChatEndpoint",
|
20
|
+
"OLLAMA_CHAT_ENDPOINT_CONFIG",
|
21
|
+
)
|
22
|
+
|
14
23
|
_HAS_OLLAMA = is_import_installed("ollama")
|
15
24
|
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
25
|
+
|
26
|
+
def _get_ollama_config(**kwargs):
|
27
|
+
"""Create Ollama endpoint configuration with defaults."""
|
28
|
+
config = dict(
|
29
|
+
name="ollama_chat",
|
30
|
+
provider="ollama",
|
31
|
+
base_url="http://localhost:11434/v1", # OpenAI-compatible endpoint
|
32
|
+
endpoint="chat/completions",
|
33
|
+
kwargs={}, # Model will be provided at runtime
|
34
|
+
openai_compatible=False, # Use HTTP transport
|
35
|
+
api_key=None, # No API key needed
|
36
|
+
method="POST",
|
37
|
+
content_type="application/json",
|
38
|
+
auth_type="none", # No authentication
|
39
|
+
default_headers={}, # No auth headers needed
|
40
|
+
# NOTE: Not using request_options due to OpenAI model role literal issues
|
41
|
+
# request_options=CreateChatCompletionRequest,
|
42
|
+
)
|
43
|
+
config.update(kwargs)
|
44
|
+
return EndpointConfig(**config)
|
45
|
+
|
46
|
+
|
47
|
+
# Default OpenAI-compatible configuration
|
48
|
+
OLLAMA_CHAT_ENDPOINT_CONFIG = _get_ollama_config()
|
30
49
|
|
31
50
|
|
32
51
|
class OllamaChatEndpoint(Endpoint):
|
@@ -34,7 +53,7 @@ class OllamaChatEndpoint(Endpoint):
|
|
34
53
|
Documentation: https://platform.openai.com/docs/api-reference/chat/create
|
35
54
|
"""
|
36
55
|
|
37
|
-
def __init__(self, config=
|
56
|
+
def __init__(self, config=None, **kwargs):
|
38
57
|
if not _HAS_OLLAMA:
|
39
58
|
raise ModuleNotFoundError(
|
40
59
|
"ollama is not installed, please install it with `pip install lionagi[ollama]`"
|
@@ -44,6 +63,7 @@ class OllamaChatEndpoint(Endpoint):
|
|
44
63
|
if "api_key" in kwargs:
|
45
64
|
kwargs.pop("api_key")
|
46
65
|
|
66
|
+
config = config or _get_ollama_config()
|
47
67
|
super().__init__(config, **kwargs)
|
48
68
|
|
49
69
|
from ollama import list as ollama_list # type: ignore[import]
|
@@ -2,28 +2,50 @@
|
|
2
2
|
#
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
4
4
|
|
5
|
+
"""
|
6
|
+
Perplexity AI endpoint configuration.
|
7
|
+
|
8
|
+
Perplexity provides real-time web search and Q&A capabilities through their Sonar API.
|
9
|
+
This module configures endpoints for different Sonar model tiers.
|
10
|
+
"""
|
11
|
+
|
5
12
|
from lionagi.config import settings
|
6
13
|
from lionagi.service.connections.endpoint import Endpoint
|
7
14
|
from lionagi.service.connections.endpoint_config import EndpointConfig
|
8
15
|
from lionagi.service.third_party.pplx_models import PerplexityChatRequest
|
9
16
|
|
10
|
-
__all__ = (
|
17
|
+
__all__ = (
|
18
|
+
"PerplexityChatEndpoint",
|
19
|
+
"PERPLEXITY_CHAT_ENDPOINT_CONFIG",
|
20
|
+
)
|
11
21
|
|
12
22
|
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
23
|
+
def _get_perplexity_config(**kwargs):
|
24
|
+
"""Create Perplexity endpoint configuration with defaults."""
|
25
|
+
config = dict(
|
26
|
+
name="perplexity_chat",
|
27
|
+
provider="perplexity",
|
28
|
+
base_url="https://api.perplexity.ai",
|
29
|
+
endpoint="chat/completions",
|
30
|
+
method="POST",
|
31
|
+
kwargs={"model": "sonar"}, # Default to base sonar model
|
32
|
+
api_key=settings.PERPLEXITY_API_KEY or "dummy-key-for-testing",
|
33
|
+
auth_type="bearer",
|
34
|
+
content_type="application/json",
|
35
|
+
request_options=PerplexityChatRequest,
|
36
|
+
)
|
37
|
+
config.update(kwargs)
|
38
|
+
return EndpointConfig(**config)
|
39
|
+
|
40
|
+
|
41
|
+
# Default configuration (users can specify model at runtime)
|
42
|
+
PERPLEXITY_CHAT_ENDPOINT_CONFIG = _get_perplexity_config()
|
43
|
+
|
44
|
+
# Legacy naming for backward compatibility
|
45
|
+
ENDPOINT_CONFIG = PERPLEXITY_CHAT_ENDPOINT_CONFIG
|
25
46
|
|
26
47
|
|
27
48
|
class PerplexityChatEndpoint(Endpoint):
|
28
|
-
def __init__(self, config=
|
49
|
+
def __init__(self, config=None, **kwargs):
|
50
|
+
config = config or _get_perplexity_config()
|
29
51
|
super().__init__(config, **kwargs)
|
@@ -0,0 +1,30 @@
|
|
1
|
+
from .anthropic_ import AnthropicMessagesEndpoint
|
2
|
+
from .claude_code_ import ClaudeCodeEndpoint, ClaudeCodeRequest
|
3
|
+
from .claude_code_cli import ClaudeCodeCLIEndpoint
|
4
|
+
from .exa_ import ExaSearchEndpoint, ExaSearchRequest
|
5
|
+
from .oai_ import (
|
6
|
+
GroqChatEndpoint,
|
7
|
+
OpenaiChatEndpoint,
|
8
|
+
OpenaiEmbedEndpoint,
|
9
|
+
OpenaiResponseEndpoint,
|
10
|
+
OpenrouterChatEndpoint,
|
11
|
+
)
|
12
|
+
from .ollama_ import OllamaChatEndpoint
|
13
|
+
from .perplexity_ import PerplexityChatEndpoint, PerplexityChatRequest
|
14
|
+
|
15
|
+
__all__ = (
|
16
|
+
"AnthropicMessagesEndpoint",
|
17
|
+
"ClaudeCodeEndpoint",
|
18
|
+
"ClaudeCodeRequest",
|
19
|
+
"ClaudeCodeCLIEndpoint",
|
20
|
+
"ExaSearchEndpoint",
|
21
|
+
"ExaSearchRequest",
|
22
|
+
"OpenaiChatEndpoint",
|
23
|
+
"OpenaiEmbedEndpoint",
|
24
|
+
"OpenaiResponseEndpoint",
|
25
|
+
"OpenrouterChatEndpoint",
|
26
|
+
"GroqChatEndpoint",
|
27
|
+
"OllamaChatEndpoint",
|
28
|
+
"PerplexityChatEndpoint",
|
29
|
+
"PerplexityChatRequest",
|
30
|
+
)
|
@@ -0,0 +1,25 @@
|
|
1
|
+
# Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
3
|
+
|
4
|
+
from lionagi.protocols.types import DataLogger
|
5
|
+
|
6
|
+
from ._types import AssosiatedEventInfo, HookDict, HookEventTypes
|
7
|
+
from .hook_event import HookEvent
|
8
|
+
from .hook_registry import HookRegistry
|
9
|
+
|
10
|
+
global_hook_logger = DataLogger(
|
11
|
+
persist_dir="./data/logs",
|
12
|
+
subfolder="hooks",
|
13
|
+
file_prefix="hook",
|
14
|
+
capacity=1000,
|
15
|
+
)
|
16
|
+
|
17
|
+
|
18
|
+
__all__ = (
|
19
|
+
"HookEventTypes",
|
20
|
+
"HookDict",
|
21
|
+
"AssosiatedEventInfo",
|
22
|
+
"HookEvent",
|
23
|
+
"HookRegistry",
|
24
|
+
"global_hook_logger",
|
25
|
+
)
|
@@ -0,0 +1,52 @@
|
|
1
|
+
# Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
3
|
+
|
4
|
+
from __future__ import annotations
|
5
|
+
|
6
|
+
from collections.abc import Awaitable, Callable
|
7
|
+
from typing import TypeVar
|
8
|
+
|
9
|
+
from typing_extensions import TypedDict
|
10
|
+
|
11
|
+
from lionagi.utils import StringEnum
|
12
|
+
|
13
|
+
SC = TypeVar("SC") # streaming chunk type
|
14
|
+
|
15
|
+
__all__ = (
|
16
|
+
"HookEventTypes",
|
17
|
+
"ALLOWED_HOOKS_TYPES",
|
18
|
+
"HookDict",
|
19
|
+
"StreamHandlers",
|
20
|
+
"AssosiatedEventInfo",
|
21
|
+
)
|
22
|
+
|
23
|
+
|
24
|
+
class HookEventTypes(StringEnum):
|
25
|
+
PreEventCreate = "pre_event_create"
|
26
|
+
PreInvokation = "pre_invokation"
|
27
|
+
PostInvokation = "post_invokation"
|
28
|
+
|
29
|
+
|
30
|
+
ALLOWED_HOOKS_TYPES = HookEventTypes.allowed()
|
31
|
+
|
32
|
+
|
33
|
+
class HookDict(TypedDict):
|
34
|
+
pre_event_create: Callable | None
|
35
|
+
pre_invokation: Callable | None
|
36
|
+
post_invokation: Callable | None
|
37
|
+
|
38
|
+
|
39
|
+
StreamHandlers = dict[str, Callable[[SC], Awaitable[None]]]
|
40
|
+
|
41
|
+
|
42
|
+
class AssosiatedEventInfo(TypedDict, total=False):
|
43
|
+
"""Information about the event associated with the hook."""
|
44
|
+
|
45
|
+
lion_class: str
|
46
|
+
"""Full qualified name of the event class."""
|
47
|
+
|
48
|
+
event_id: str
|
49
|
+
"""ID of the event."""
|
50
|
+
|
51
|
+
event_created_at: float
|
52
|
+
"""Creation timestamp of the event."""
|