lionagi 0.14.8__py3-none-any.whl → 0.14.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lionagi/_errors.py +120 -11
- lionagi/_types.py +0 -6
- lionagi/config.py +3 -1
- lionagi/fields/reason.py +1 -1
- lionagi/libs/concurrency/throttle.py +79 -0
- lionagi/libs/parse.py +2 -1
- lionagi/libs/unstructured/__init__.py +0 -0
- lionagi/libs/unstructured/pdf_to_image.py +45 -0
- lionagi/libs/unstructured/read_image_to_base64.py +33 -0
- lionagi/libs/validate/to_num.py +378 -0
- lionagi/libs/validate/xml_parser.py +203 -0
- lionagi/models/operable_model.py +8 -3
- lionagi/operations/flow.py +0 -1
- lionagi/protocols/generic/event.py +2 -0
- lionagi/protocols/generic/log.py +26 -10
- lionagi/protocols/operatives/step.py +1 -1
- lionagi/protocols/types.py +9 -1
- lionagi/service/__init__.py +22 -1
- lionagi/service/connections/api_calling.py +57 -2
- lionagi/service/connections/endpoint_config.py +1 -1
- lionagi/service/connections/header_factory.py +4 -2
- lionagi/service/connections/match_endpoint.py +10 -10
- lionagi/service/connections/providers/anthropic_.py +5 -2
- lionagi/service/connections/providers/claude_code_.py +13 -17
- lionagi/service/connections/providers/claude_code_cli.py +51 -16
- lionagi/service/connections/providers/exa_.py +5 -3
- lionagi/service/connections/providers/oai_.py +116 -81
- lionagi/service/connections/providers/ollama_.py +38 -18
- lionagi/service/connections/providers/perplexity_.py +36 -14
- lionagi/service/connections/providers/types.py +30 -0
- lionagi/service/hooks/__init__.py +25 -0
- lionagi/service/hooks/_types.py +52 -0
- lionagi/service/hooks/_utils.py +85 -0
- lionagi/service/hooks/hook_event.py +67 -0
- lionagi/service/hooks/hook_registry.py +221 -0
- lionagi/service/imodel.py +120 -34
- lionagi/service/third_party/claude_code.py +715 -0
- lionagi/service/third_party/openai_model_names.py +198 -0
- lionagi/service/third_party/pplx_models.py +16 -8
- lionagi/service/types.py +21 -0
- lionagi/session/branch.py +1 -4
- lionagi/tools/base.py +1 -3
- lionagi/tools/file/reader.py +1 -1
- lionagi/tools/memory/tools.py +2 -2
- lionagi/utils.py +12 -775
- lionagi/version.py +1 -1
- {lionagi-0.14.8.dist-info → lionagi-0.14.10.dist-info}/METADATA +6 -2
- {lionagi-0.14.8.dist-info → lionagi-0.14.10.dist-info}/RECORD +50 -40
- lionagi/service/connections/providers/_claude_code/__init__.py +0 -3
- lionagi/service/connections/providers/_claude_code/models.py +0 -244
- lionagi/service/connections/providers/_claude_code/stream_cli.py +0 -359
- lionagi/service/third_party/openai_models.py +0 -18241
- {lionagi-0.14.8.dist-info → lionagi-0.14.10.dist-info}/WHEEL +0 -0
- {lionagi-0.14.8.dist-info → lionagi-0.14.10.dist-info}/licenses/LICENSE +0 -0
@@ -2,107 +2,126 @@
|
|
2
2
|
#
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
4
4
|
|
5
|
+
"""
|
6
|
+
OpenAI and OpenAI-compatible endpoint configurations.
|
7
|
+
|
8
|
+
This module provides endpoint configurations for:
|
9
|
+
- OpenAI (chat, response, embedding)
|
10
|
+
- OpenRouter (OpenAI-compatible)
|
11
|
+
- Groq (OpenAI-compatible)
|
12
|
+
|
13
|
+
Each provider has a helper function (_get_*_config) that creates
|
14
|
+
configurations with sensible defaults that can be overridden.
|
15
|
+
"""
|
16
|
+
|
5
17
|
from pydantic import BaseModel
|
6
18
|
|
7
19
|
from lionagi.config import settings
|
8
20
|
from lionagi.service.connections.endpoint import Endpoint
|
9
21
|
from lionagi.service.connections.endpoint_config import EndpointConfig
|
10
|
-
from lionagi.service.third_party.
|
11
|
-
|
12
|
-
|
22
|
+
from lionagi.service.third_party.openai_model_names import (
|
23
|
+
REASONING_MODELS,
|
24
|
+
is_reasoning_model,
|
13
25
|
)
|
14
26
|
|
15
27
|
__all__ = (
|
16
28
|
"OpenaiChatEndpoint",
|
17
29
|
"OpenaiResponseEndpoint",
|
30
|
+
"OpenaiEmbedEndpoint",
|
18
31
|
"OpenrouterChatEndpoint",
|
32
|
+
"GroqChatEndpoint",
|
33
|
+
"OPENAI_CHAT_ENDPOINT_CONFIG",
|
34
|
+
"OPENAI_RESPONSE_ENDPOINT_CONFIG",
|
35
|
+
"OPENAI_EMBEDDING_ENDPOINT_CONFIG",
|
36
|
+
"OPENROUTER_CHAT_ENDPOINT_CONFIG",
|
19
37
|
"OPENROUTER_GEMINI_ENDPOINT_CONFIG",
|
38
|
+
"GROQ_CHAT_ENDPOINT_CONFIG",
|
39
|
+
"REASONING_MODELS",
|
40
|
+
"REASONING_NOT_SUPPORT_PARAMS",
|
20
41
|
)
|
21
42
|
|
22
43
|
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
44
|
+
def _get_openai_config(**kwargs):
|
45
|
+
"""Create OpenAI endpoint configuration with defaults."""
|
46
|
+
config = dict(
|
47
|
+
name="openai_chat",
|
48
|
+
provider="openai",
|
49
|
+
base_url="https://api.openai.com/v1",
|
50
|
+
endpoint="chat/completions",
|
51
|
+
kwargs={"model": settings.OPENAI_DEFAULT_MODEL},
|
52
|
+
api_key=settings.OPENAI_API_KEY or "dummy-key-for-testing",
|
53
|
+
auth_type="bearer",
|
54
|
+
content_type="application/json",
|
55
|
+
method="POST",
|
56
|
+
requires_tokens=True,
|
57
|
+
# NOTE: OpenAI models have incorrect role literals, only use for param validation
|
58
|
+
# request_options=CreateChatCompletionRequest,
|
59
|
+
)
|
60
|
+
config.update(kwargs)
|
61
|
+
return EndpointConfig(**config)
|
62
|
+
|
63
|
+
|
64
|
+
def _get_openrouter_config(**kwargs):
|
65
|
+
"""Create OpenRouter endpoint configuration with defaults."""
|
66
|
+
config = dict(
|
67
|
+
name="openrouter_chat",
|
68
|
+
provider="openrouter",
|
69
|
+
base_url="https://openrouter.ai/api/v1",
|
70
|
+
endpoint="chat/completions",
|
71
|
+
kwargs={"model": "google/gemini-2.5-flash"},
|
72
|
+
api_key=settings.OPENROUTER_API_KEY or "dummy-key-for-testing",
|
73
|
+
auth_type="bearer",
|
74
|
+
content_type="application/json",
|
75
|
+
method="POST",
|
76
|
+
# NOTE: OpenRouter uses OpenAI-compatible format
|
77
|
+
# request_options=CreateChatCompletionRequest,
|
78
|
+
)
|
79
|
+
config.update(kwargs)
|
80
|
+
return EndpointConfig(**config)
|
81
|
+
|
82
|
+
|
83
|
+
def _get_groq_config(**kwargs):
|
84
|
+
"""Create Groq endpoint configuration with defaults."""
|
85
|
+
config = dict(
|
86
|
+
name="groq_chat",
|
87
|
+
provider="groq",
|
88
|
+
base_url="https://api.groq.com/openai/v1",
|
89
|
+
endpoint="chat/completions",
|
90
|
+
kwargs={"model": "llama-3.3-70b-versatile"},
|
91
|
+
api_key=settings.GROQ_API_KEY or "dummy-key-for-testing",
|
92
|
+
auth_type="bearer",
|
93
|
+
content_type="application/json",
|
94
|
+
method="POST",
|
95
|
+
)
|
96
|
+
config.update(kwargs)
|
97
|
+
return EndpointConfig(**config)
|
98
|
+
|
99
|
+
|
100
|
+
# OpenAI endpoints
|
101
|
+
OPENAI_CHAT_ENDPOINT_CONFIG = _get_openai_config()
|
102
|
+
|
103
|
+
OPENAI_RESPONSE_ENDPOINT_CONFIG = _get_openai_config(
|
38
104
|
name="openai_response",
|
39
|
-
|
40
|
-
base_url="https://api.openai.com/v1",
|
41
|
-
endpoint="chat/completions", # OpenAI responses API uses same endpoint
|
42
|
-
kwargs={"model": "gpt-4o"},
|
43
|
-
api_key=settings.OPENAI_API_KEY or "dummy-key-for-testing",
|
44
|
-
auth_type="bearer",
|
45
|
-
content_type="application/json",
|
46
|
-
method="POST",
|
47
|
-
requires_tokens=True,
|
48
|
-
request_options=CreateResponse,
|
49
|
-
)
|
50
|
-
|
51
|
-
OPENROUTER_CHAT_ENDPOINT_CONFIG = EndpointConfig(
|
52
|
-
name="openrouter_chat",
|
53
|
-
provider="openrouter",
|
54
|
-
base_url="https://openrouter.ai/api/v1",
|
55
|
-
endpoint="chat/completions",
|
56
|
-
kwargs={"model": "google/gemini-2.5-flash"},
|
57
|
-
api_key=settings.OPENROUTER_API_KEY or "dummy-key-for-testing",
|
58
|
-
auth_type="bearer",
|
59
|
-
content_type="application/json",
|
60
|
-
method="POST",
|
61
|
-
request_options=CreateChatCompletionRequest,
|
105
|
+
endpoint="responses",
|
62
106
|
)
|
63
107
|
|
64
|
-
OPENAI_EMBEDDING_ENDPOINT_CONFIG =
|
108
|
+
OPENAI_EMBEDDING_ENDPOINT_CONFIG = _get_openai_config(
|
65
109
|
name="openai_embed",
|
66
|
-
provider="openai",
|
67
|
-
base_url="https://api.openai.com/v1",
|
68
110
|
endpoint="embeddings",
|
69
111
|
kwargs={"model": "text-embedding-3-small"},
|
70
|
-
api_key=settings.OPENAI_API_KEY or "dummy-key-for-testing",
|
71
|
-
auth_type="bearer",
|
72
|
-
content_type="application/json",
|
73
|
-
method="POST",
|
74
|
-
)
|
75
|
-
|
76
|
-
GROQ_CHAT_ENDPOINT_CONFIG = EndpointConfig(
|
77
|
-
name="groq_chat",
|
78
|
-
provider="groq",
|
79
|
-
base_url="https://api.groq.com/openai/v1",
|
80
|
-
endpoint="chat/completions",
|
81
|
-
api_key=settings.GROQ_API_KEY or "dummy-key-for-testing",
|
82
|
-
auth_type="bearer",
|
83
|
-
content_type="application/json",
|
84
|
-
method="POST",
|
85
112
|
)
|
86
113
|
|
114
|
+
# OpenRouter endpoints
|
115
|
+
OPENROUTER_CHAT_ENDPOINT_CONFIG = _get_openrouter_config()
|
87
116
|
|
88
|
-
|
89
|
-
"
|
90
|
-
"
|
91
|
-
"o1-preview-2024-09-12",
|
92
|
-
"o1-pro",
|
93
|
-
"o1-pro-2025-03-19",
|
94
|
-
"o3-pro",
|
95
|
-
"o3-pro-2025-06-10",
|
96
|
-
"o3",
|
97
|
-
"o3-2025-04-16",
|
98
|
-
"o4-mini",
|
99
|
-
"o4-mini-2025-04-16",
|
100
|
-
"o3-mini",
|
101
|
-
"o3-mini-2025-01-31",
|
102
|
-
"o1-mini",
|
103
|
-
"o1-mini-2024-09-12",
|
117
|
+
OPENROUTER_GEMINI_ENDPOINT_CONFIG = _get_openrouter_config(
|
118
|
+
name="openrouter_gemini",
|
119
|
+
kwargs={"model": "google/gemini-2.5-flash"},
|
104
120
|
)
|
105
121
|
|
122
|
+
# Groq endpoints
|
123
|
+
GROQ_CHAT_ENDPOINT_CONFIG = _get_groq_config()
|
124
|
+
|
106
125
|
REASONING_NOT_SUPPORT_PARAMS = (
|
107
126
|
"temperature",
|
108
127
|
"top_p",
|
@@ -113,7 +132,8 @@ REASONING_NOT_SUPPORT_PARAMS = (
|
|
113
132
|
|
114
133
|
|
115
134
|
class OpenaiChatEndpoint(Endpoint):
|
116
|
-
def __init__(self, config=
|
135
|
+
def __init__(self, config=None, **kwargs):
|
136
|
+
config = config or _get_openai_config()
|
117
137
|
super().__init__(config, **kwargs)
|
118
138
|
|
119
139
|
def create_payload(
|
@@ -129,7 +149,11 @@ class OpenaiChatEndpoint(Endpoint):
|
|
129
149
|
|
130
150
|
# Handle reasoning models
|
131
151
|
model = payload.get("model")
|
132
|
-
if
|
152
|
+
if (
|
153
|
+
model
|
154
|
+
and is_reasoning_model(model)
|
155
|
+
and not model.startswith("gpt-5")
|
156
|
+
):
|
133
157
|
# Remove unsupported parameters for reasoning models
|
134
158
|
for param in REASONING_NOT_SUPPORT_PARAMS:
|
135
159
|
payload.pop(param, None)
|
@@ -146,20 +170,31 @@ class OpenaiChatEndpoint(Endpoint):
|
|
146
170
|
|
147
171
|
|
148
172
|
class OpenaiResponseEndpoint(Endpoint):
|
149
|
-
def __init__(self, config=
|
173
|
+
def __init__(self, config=None, **kwargs):
|
174
|
+
config = config or _get_openai_config(
|
175
|
+
name="openai_response",
|
176
|
+
endpoint="responses",
|
177
|
+
)
|
150
178
|
super().__init__(config, **kwargs)
|
151
179
|
|
152
180
|
|
153
181
|
class OpenrouterChatEndpoint(Endpoint):
|
154
|
-
def __init__(self, config=
|
182
|
+
def __init__(self, config=None, **kwargs):
|
183
|
+
config = config or _get_openrouter_config()
|
155
184
|
super().__init__(config, **kwargs)
|
156
185
|
|
157
186
|
|
158
187
|
class GroqChatEndpoint(Endpoint):
|
159
|
-
def __init__(self, config=
|
188
|
+
def __init__(self, config=None, **kwargs):
|
189
|
+
config = config or _get_groq_config()
|
160
190
|
super().__init__(config, **kwargs)
|
161
191
|
|
162
192
|
|
163
193
|
class OpenaiEmbedEndpoint(Endpoint):
|
164
|
-
def __init__(self, config=
|
194
|
+
def __init__(self, config=None, **kwargs):
|
195
|
+
config = config or _get_openai_config(
|
196
|
+
name="openai_embed",
|
197
|
+
endpoint="embeddings",
|
198
|
+
kwargs={"model": "text-embedding-3-small"},
|
199
|
+
)
|
165
200
|
super().__init__(config, **kwargs)
|
@@ -2,31 +2,50 @@
|
|
2
2
|
#
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
4
4
|
|
5
|
+
"""
|
6
|
+
Ollama endpoint configuration.
|
7
|
+
|
8
|
+
Ollama provides local model hosting with both native and OpenAI-compatible APIs.
|
9
|
+
This module configures the OpenAI-compatible endpoint for consistency.
|
10
|
+
"""
|
11
|
+
|
5
12
|
from pydantic import BaseModel
|
6
13
|
|
7
14
|
from lionagi.service.connections.endpoint import Endpoint
|
8
15
|
from lionagi.service.connections.endpoint_config import EndpointConfig
|
9
|
-
from lionagi.service.third_party.openai_models import (
|
10
|
-
CreateChatCompletionRequest,
|
11
|
-
)
|
12
16
|
from lionagi.utils import is_import_installed
|
13
17
|
|
18
|
+
__all__ = (
|
19
|
+
"OllamaChatEndpoint",
|
20
|
+
"OLLAMA_CHAT_ENDPOINT_CONFIG",
|
21
|
+
)
|
22
|
+
|
14
23
|
_HAS_OLLAMA = is_import_installed("ollama")
|
15
24
|
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
25
|
+
|
26
|
+
def _get_ollama_config(**kwargs):
|
27
|
+
"""Create Ollama endpoint configuration with defaults."""
|
28
|
+
config = dict(
|
29
|
+
name="ollama_chat",
|
30
|
+
provider="ollama",
|
31
|
+
base_url="http://localhost:11434/v1", # OpenAI-compatible endpoint
|
32
|
+
endpoint="chat/completions",
|
33
|
+
kwargs={}, # Model will be provided at runtime
|
34
|
+
openai_compatible=False, # Use HTTP transport
|
35
|
+
api_key=None, # No API key needed
|
36
|
+
method="POST",
|
37
|
+
content_type="application/json",
|
38
|
+
auth_type="none", # No authentication
|
39
|
+
default_headers={}, # No auth headers needed
|
40
|
+
# NOTE: Not using request_options due to OpenAI model role literal issues
|
41
|
+
# request_options=CreateChatCompletionRequest,
|
42
|
+
)
|
43
|
+
config.update(kwargs)
|
44
|
+
return EndpointConfig(**config)
|
45
|
+
|
46
|
+
|
47
|
+
# Default OpenAI-compatible configuration
|
48
|
+
OLLAMA_CHAT_ENDPOINT_CONFIG = _get_ollama_config()
|
30
49
|
|
31
50
|
|
32
51
|
class OllamaChatEndpoint(Endpoint):
|
@@ -34,7 +53,7 @@ class OllamaChatEndpoint(Endpoint):
|
|
34
53
|
Documentation: https://platform.openai.com/docs/api-reference/chat/create
|
35
54
|
"""
|
36
55
|
|
37
|
-
def __init__(self, config=
|
56
|
+
def __init__(self, config=None, **kwargs):
|
38
57
|
if not _HAS_OLLAMA:
|
39
58
|
raise ModuleNotFoundError(
|
40
59
|
"ollama is not installed, please install it with `pip install lionagi[ollama]`"
|
@@ -44,6 +63,7 @@ class OllamaChatEndpoint(Endpoint):
|
|
44
63
|
if "api_key" in kwargs:
|
45
64
|
kwargs.pop("api_key")
|
46
65
|
|
66
|
+
config = config or _get_ollama_config()
|
47
67
|
super().__init__(config, **kwargs)
|
48
68
|
|
49
69
|
from ollama import list as ollama_list # type: ignore[import]
|
@@ -2,28 +2,50 @@
|
|
2
2
|
#
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
4
4
|
|
5
|
+
"""
|
6
|
+
Perplexity AI endpoint configuration.
|
7
|
+
|
8
|
+
Perplexity provides real-time web search and Q&A capabilities through their Sonar API.
|
9
|
+
This module configures endpoints for different Sonar model tiers.
|
10
|
+
"""
|
11
|
+
|
5
12
|
from lionagi.config import settings
|
6
13
|
from lionagi.service.connections.endpoint import Endpoint
|
7
14
|
from lionagi.service.connections.endpoint_config import EndpointConfig
|
8
15
|
from lionagi.service.third_party.pplx_models import PerplexityChatRequest
|
9
16
|
|
10
|
-
__all__ = (
|
17
|
+
__all__ = (
|
18
|
+
"PerplexityChatEndpoint",
|
19
|
+
"PERPLEXITY_CHAT_ENDPOINT_CONFIG",
|
20
|
+
)
|
11
21
|
|
12
22
|
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
23
|
+
def _get_perplexity_config(**kwargs):
|
24
|
+
"""Create Perplexity endpoint configuration with defaults."""
|
25
|
+
config = dict(
|
26
|
+
name="perplexity_chat",
|
27
|
+
provider="perplexity",
|
28
|
+
base_url="https://api.perplexity.ai",
|
29
|
+
endpoint="chat/completions",
|
30
|
+
method="POST",
|
31
|
+
kwargs={"model": "sonar"}, # Default to base sonar model
|
32
|
+
api_key=settings.PERPLEXITY_API_KEY or "dummy-key-for-testing",
|
33
|
+
auth_type="bearer",
|
34
|
+
content_type="application/json",
|
35
|
+
request_options=PerplexityChatRequest,
|
36
|
+
)
|
37
|
+
config.update(kwargs)
|
38
|
+
return EndpointConfig(**config)
|
39
|
+
|
40
|
+
|
41
|
+
# Default configuration (users can specify model at runtime)
|
42
|
+
PERPLEXITY_CHAT_ENDPOINT_CONFIG = _get_perplexity_config()
|
43
|
+
|
44
|
+
# Legacy naming for backward compatibility
|
45
|
+
ENDPOINT_CONFIG = PERPLEXITY_CHAT_ENDPOINT_CONFIG
|
25
46
|
|
26
47
|
|
27
48
|
class PerplexityChatEndpoint(Endpoint):
|
28
|
-
def __init__(self, config=
|
49
|
+
def __init__(self, config=None, **kwargs):
|
50
|
+
config = config or _get_perplexity_config()
|
29
51
|
super().__init__(config, **kwargs)
|
@@ -0,0 +1,30 @@
|
|
1
|
+
from .anthropic_ import AnthropicMessagesEndpoint
|
2
|
+
from .claude_code_ import ClaudeCodeEndpoint, ClaudeCodeRequest
|
3
|
+
from .claude_code_cli import ClaudeCodeCLIEndpoint
|
4
|
+
from .exa_ import ExaSearchEndpoint, ExaSearchRequest
|
5
|
+
from .oai_ import (
|
6
|
+
GroqChatEndpoint,
|
7
|
+
OpenaiChatEndpoint,
|
8
|
+
OpenaiEmbedEndpoint,
|
9
|
+
OpenaiResponseEndpoint,
|
10
|
+
OpenrouterChatEndpoint,
|
11
|
+
)
|
12
|
+
from .ollama_ import OllamaChatEndpoint
|
13
|
+
from .perplexity_ import PerplexityChatEndpoint, PerplexityChatRequest
|
14
|
+
|
15
|
+
__all__ = (
|
16
|
+
"AnthropicMessagesEndpoint",
|
17
|
+
"ClaudeCodeEndpoint",
|
18
|
+
"ClaudeCodeRequest",
|
19
|
+
"ClaudeCodeCLIEndpoint",
|
20
|
+
"ExaSearchEndpoint",
|
21
|
+
"ExaSearchRequest",
|
22
|
+
"OpenaiChatEndpoint",
|
23
|
+
"OpenaiEmbedEndpoint",
|
24
|
+
"OpenaiResponseEndpoint",
|
25
|
+
"OpenrouterChatEndpoint",
|
26
|
+
"GroqChatEndpoint",
|
27
|
+
"OllamaChatEndpoint",
|
28
|
+
"PerplexityChatEndpoint",
|
29
|
+
"PerplexityChatRequest",
|
30
|
+
)
|
@@ -0,0 +1,25 @@
|
|
1
|
+
# Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
3
|
+
|
4
|
+
from lionagi.protocols.types import DataLogger
|
5
|
+
|
6
|
+
from ._types import AssosiatedEventInfo, HookDict, HookEventTypes
|
7
|
+
from .hook_event import HookEvent
|
8
|
+
from .hook_registry import HookRegistry
|
9
|
+
|
10
|
+
global_hook_logger = DataLogger(
|
11
|
+
persist_dir="./data/logs",
|
12
|
+
subfolder="hooks",
|
13
|
+
file_prefix="hook",
|
14
|
+
capacity=1000,
|
15
|
+
)
|
16
|
+
|
17
|
+
|
18
|
+
__all__ = (
|
19
|
+
"HookEventTypes",
|
20
|
+
"HookDict",
|
21
|
+
"AssosiatedEventInfo",
|
22
|
+
"HookEvent",
|
23
|
+
"HookRegistry",
|
24
|
+
"global_hook_logger",
|
25
|
+
)
|
@@ -0,0 +1,52 @@
|
|
1
|
+
# Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
3
|
+
|
4
|
+
from __future__ import annotations
|
5
|
+
|
6
|
+
from collections.abc import Awaitable, Callable
|
7
|
+
from typing import TypeVar
|
8
|
+
|
9
|
+
from typing_extensions import TypedDict
|
10
|
+
|
11
|
+
from lionagi.utils import StringEnum
|
12
|
+
|
13
|
+
SC = TypeVar("SC") # streaming chunk type
|
14
|
+
|
15
|
+
__all__ = (
|
16
|
+
"HookEventTypes",
|
17
|
+
"ALLOWED_HOOKS_TYPES",
|
18
|
+
"HookDict",
|
19
|
+
"StreamHandlers",
|
20
|
+
"AssosiatedEventInfo",
|
21
|
+
)
|
22
|
+
|
23
|
+
|
24
|
+
class HookEventTypes(StringEnum):
|
25
|
+
PreEventCreate = "pre_event_create"
|
26
|
+
PreInvokation = "pre_invokation"
|
27
|
+
PostInvokation = "post_invokation"
|
28
|
+
|
29
|
+
|
30
|
+
ALLOWED_HOOKS_TYPES = HookEventTypes.allowed()
|
31
|
+
|
32
|
+
|
33
|
+
class HookDict(TypedDict):
|
34
|
+
pre_event_create: Callable | None
|
35
|
+
pre_invokation: Callable | None
|
36
|
+
post_invokation: Callable | None
|
37
|
+
|
38
|
+
|
39
|
+
StreamHandlers = dict[str, Callable[[SC], Awaitable[None]]]
|
40
|
+
|
41
|
+
|
42
|
+
class AssosiatedEventInfo(TypedDict, total=False):
|
43
|
+
"""Information about the event associated with the hook."""
|
44
|
+
|
45
|
+
lion_class: str
|
46
|
+
"""Full qualified name of the event class."""
|
47
|
+
|
48
|
+
event_id: str
|
49
|
+
"""ID of the event."""
|
50
|
+
|
51
|
+
event_created_at: float
|
52
|
+
"""Creation timestamp of the event."""
|
@@ -0,0 +1,85 @@
|
|
1
|
+
# Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
3
|
+
|
4
|
+
from __future__ import annotations
|
5
|
+
|
6
|
+
from anyio import sleep
|
7
|
+
|
8
|
+
from lionagi._errors import ValidationError
|
9
|
+
from lionagi.utils import is_coro_func
|
10
|
+
|
11
|
+
from ._types import ALLOWED_HOOKS_TYPES, HookEventTypes
|
12
|
+
|
13
|
+
__all__ = (
|
14
|
+
"get_handler",
|
15
|
+
"validate_hooks",
|
16
|
+
"validate_stream_handlers",
|
17
|
+
)
|
18
|
+
|
19
|
+
|
20
|
+
def get_handler(d_: dict, k: str | type, get: bool = False, /):
|
21
|
+
handler = d_.get(k)
|
22
|
+
if handler is None and not get:
|
23
|
+
return None
|
24
|
+
|
25
|
+
if handler is not None:
|
26
|
+
if not is_coro_func(handler):
|
27
|
+
|
28
|
+
async def _func(x):
|
29
|
+
await sleep(0)
|
30
|
+
return handler(x)
|
31
|
+
|
32
|
+
return _func
|
33
|
+
return handler
|
34
|
+
|
35
|
+
async def _func(x):
|
36
|
+
await sleep(0)
|
37
|
+
return x
|
38
|
+
|
39
|
+
return _func
|
40
|
+
|
41
|
+
|
42
|
+
def validate_hooks(kw):
|
43
|
+
"""Validate that all hooks are callable."""
|
44
|
+
if not isinstance(kw, dict):
|
45
|
+
raise ValidationError.from_value(
|
46
|
+
kw,
|
47
|
+
expected="A dictionary of hooks",
|
48
|
+
message="Hooks must be a dictionary of callable functions",
|
49
|
+
)
|
50
|
+
for k, v in kw.items():
|
51
|
+
if not isinstance(k, HookEventTypes) or k not in ALLOWED_HOOKS_TYPES:
|
52
|
+
raise ValidationError.from_value(
|
53
|
+
k,
|
54
|
+
expected=f"One of {ALLOWED_HOOKS_TYPES}",
|
55
|
+
message=f"Hook key must be one of {ALLOWED_HOOKS_TYPES}, got {k}",
|
56
|
+
)
|
57
|
+
if not callable(v):
|
58
|
+
raise ValidationError.from_value(
|
59
|
+
v,
|
60
|
+
expected="A callable function",
|
61
|
+
message=f"Hook for {k} must be callable, got {type(v)}",
|
62
|
+
)
|
63
|
+
|
64
|
+
|
65
|
+
def validate_stream_handlers(kw):
|
66
|
+
"""Validate that all stream handlers are callable."""
|
67
|
+
if not isinstance(kw, dict):
|
68
|
+
raise ValidationError.from_value(
|
69
|
+
kw,
|
70
|
+
expected="A dictionary of stream handlers",
|
71
|
+
message="Stream handlers must be a dictionary of callable functions",
|
72
|
+
)
|
73
|
+
for k, v in kw.items():
|
74
|
+
if not isinstance(k, str | type):
|
75
|
+
raise ValidationError.from_value(
|
76
|
+
k,
|
77
|
+
expected="A name or type of the chunk being handled",
|
78
|
+
message=f"Stream handler key must be a string or type, got {type(k)}",
|
79
|
+
)
|
80
|
+
if not callable(v):
|
81
|
+
raise ValidationError.from_value(
|
82
|
+
v,
|
83
|
+
expected="A callable function",
|
84
|
+
message=f"Stream handler for {k} must be callable, got {type(v)}",
|
85
|
+
)
|
@@ -0,0 +1,67 @@
|
|
1
|
+
# Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
3
|
+
|
4
|
+
from __future__ import annotations
|
5
|
+
|
6
|
+
from typing import Any
|
7
|
+
|
8
|
+
import anyio
|
9
|
+
from pydantic import Field, PrivateAttr
|
10
|
+
|
11
|
+
from lionagi.libs.concurrency import fail_after, get_cancelled_exc_class
|
12
|
+
from lionagi.protocols.types import Event, EventStatus
|
13
|
+
|
14
|
+
from ._types import AssosiatedEventInfo, HookEventTypes
|
15
|
+
from .hook_registry import HookRegistry
|
16
|
+
|
17
|
+
|
18
|
+
class HookEvent(Event):
|
19
|
+
registry: HookRegistry = Field(..., exclude=True)
|
20
|
+
hook_type: HookEventTypes
|
21
|
+
exit: bool = Field(False, exclude=True)
|
22
|
+
timeout: int = Field(30, exclude=True)
|
23
|
+
params: dict[str, Any] = Field(default_factory=dict, exclude=True)
|
24
|
+
event_like: Event | type[Event] = Field(..., exclude=True)
|
25
|
+
_should_exit: bool = PrivateAttr(False)
|
26
|
+
_exit_cause: BaseException | None = PrivateAttr(None)
|
27
|
+
|
28
|
+
assosiated_event_info: AssosiatedEventInfo | None = None
|
29
|
+
|
30
|
+
async def invoke(self):
|
31
|
+
start = anyio.current_time()
|
32
|
+
self.execution.status = EventStatus.PROCESSING
|
33
|
+
try:
|
34
|
+
with fail_after(self.timeout):
|
35
|
+
(res, se, st), meta = await self.registry.call(
|
36
|
+
self.event_like,
|
37
|
+
hook_type=self.hook_type,
|
38
|
+
exit=self.exit,
|
39
|
+
**self.params,
|
40
|
+
)
|
41
|
+
|
42
|
+
self.assosiated_event_info = AssosiatedEventInfo(**meta)
|
43
|
+
self._should_exit = se
|
44
|
+
self.execution.status = st
|
45
|
+
if isinstance(res, tuple) and len(res) == 2:
|
46
|
+
self.execution.response = None
|
47
|
+
self.execution.error = str(res[1])
|
48
|
+
self._exit_cause = res[1]
|
49
|
+
raise res[1]
|
50
|
+
if isinstance(res, Exception):
|
51
|
+
self.execution.response = None
|
52
|
+
self.execution.error = str(res)
|
53
|
+
self._exit_cause = res
|
54
|
+
else:
|
55
|
+
self.execution.response = res
|
56
|
+
self.execution.error = None
|
57
|
+
except get_cancelled_exc_class():
|
58
|
+
raise
|
59
|
+
|
60
|
+
except Exception as e:
|
61
|
+
self.execution.status = EventStatus.FAILED
|
62
|
+
self.execution.response = None
|
63
|
+
self.execution.error = str(e)
|
64
|
+
self._should_exit = True
|
65
|
+
|
66
|
+
finally:
|
67
|
+
self.execution.duration = anyio.current_time() - start
|