lionagi 0.17.10__py3-none-any.whl → 0.18.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lionagi/__init__.py +1 -2
- lionagi/_class_registry.py +1 -2
- lionagi/_errors.py +1 -2
- lionagi/adapters/async_postgres_adapter.py +2 -10
- lionagi/config.py +1 -2
- lionagi/fields/action.py +1 -2
- lionagi/fields/base.py +3 -0
- lionagi/fields/code.py +3 -0
- lionagi/fields/file.py +3 -0
- lionagi/fields/instruct.py +1 -2
- lionagi/fields/reason.py +1 -2
- lionagi/fields/research.py +3 -0
- lionagi/libs/__init__.py +1 -2
- lionagi/libs/file/__init__.py +1 -2
- lionagi/libs/file/chunk.py +1 -2
- lionagi/libs/file/process.py +1 -2
- lionagi/libs/schema/__init__.py +1 -2
- lionagi/libs/schema/as_readable.py +1 -2
- lionagi/libs/schema/extract_code_block.py +1 -2
- lionagi/libs/schema/extract_docstring.py +1 -2
- lionagi/libs/schema/function_to_schema.py +1 -2
- lionagi/libs/schema/load_pydantic_model_from_schema.py +1 -2
- lionagi/libs/schema/minimal_yaml.py +98 -0
- lionagi/libs/validate/__init__.py +1 -2
- lionagi/libs/validate/common_field_validators.py +1 -2
- lionagi/libs/validate/validate_boolean.py +1 -2
- lionagi/ln/fuzzy/_string_similarity.py +1 -2
- lionagi/ln/types.py +32 -5
- lionagi/models/__init__.py +1 -2
- lionagi/models/field_model.py +9 -1
- lionagi/models/hashable_model.py +4 -2
- lionagi/models/model_params.py +1 -2
- lionagi/models/operable_model.py +1 -2
- lionagi/models/schema_model.py +1 -2
- lionagi/operations/ReAct/ReAct.py +475 -239
- lionagi/operations/ReAct/__init__.py +1 -2
- lionagi/operations/ReAct/utils.py +4 -2
- lionagi/operations/__init__.py +1 -2
- lionagi/operations/act/__init__.py +2 -0
- lionagi/operations/act/act.py +206 -0
- lionagi/operations/brainstorm/__init__.py +1 -2
- lionagi/operations/brainstorm/brainstorm.py +1 -2
- lionagi/operations/brainstorm/prompt.py +1 -2
- lionagi/operations/builder.py +1 -2
- lionagi/operations/chat/__init__.py +1 -2
- lionagi/operations/chat/chat.py +131 -116
- lionagi/operations/communicate/communicate.py +102 -44
- lionagi/operations/flow.py +5 -6
- lionagi/operations/instruct/__init__.py +1 -2
- lionagi/operations/instruct/instruct.py +1 -2
- lionagi/operations/interpret/__init__.py +1 -2
- lionagi/operations/interpret/interpret.py +66 -22
- lionagi/operations/operate/__init__.py +1 -2
- lionagi/operations/operate/operate.py +213 -108
- lionagi/operations/parse/__init__.py +1 -2
- lionagi/operations/parse/parse.py +171 -144
- lionagi/operations/plan/__init__.py +1 -2
- lionagi/operations/plan/plan.py +1 -2
- lionagi/operations/plan/prompt.py +1 -2
- lionagi/operations/select/__init__.py +1 -2
- lionagi/operations/select/select.py +79 -19
- lionagi/operations/select/utils.py +2 -3
- lionagi/operations/types.py +120 -25
- lionagi/operations/utils.py +1 -2
- lionagi/protocols/__init__.py +1 -2
- lionagi/protocols/_concepts.py +1 -2
- lionagi/protocols/action/__init__.py +1 -2
- lionagi/protocols/action/function_calling.py +3 -20
- lionagi/protocols/action/manager.py +34 -4
- lionagi/protocols/action/tool.py +1 -2
- lionagi/protocols/contracts.py +1 -2
- lionagi/protocols/forms/__init__.py +1 -2
- lionagi/protocols/forms/base.py +1 -2
- lionagi/protocols/forms/flow.py +1 -2
- lionagi/protocols/forms/form.py +1 -2
- lionagi/protocols/forms/report.py +1 -2
- lionagi/protocols/generic/__init__.py +1 -2
- lionagi/protocols/generic/element.py +17 -65
- lionagi/protocols/generic/event.py +1 -2
- lionagi/protocols/generic/log.py +17 -14
- lionagi/protocols/generic/pile.py +3 -4
- lionagi/protocols/generic/processor.py +1 -2
- lionagi/protocols/generic/progression.py +1 -2
- lionagi/protocols/graph/__init__.py +1 -2
- lionagi/protocols/graph/edge.py +1 -2
- lionagi/protocols/graph/graph.py +1 -2
- lionagi/protocols/graph/node.py +1 -2
- lionagi/protocols/ids.py +1 -2
- lionagi/protocols/mail/__init__.py +1 -2
- lionagi/protocols/mail/exchange.py +1 -2
- lionagi/protocols/mail/mail.py +1 -2
- lionagi/protocols/mail/mailbox.py +1 -2
- lionagi/protocols/mail/manager.py +1 -2
- lionagi/protocols/mail/package.py +1 -2
- lionagi/protocols/messages/__init__.py +28 -2
- lionagi/protocols/messages/action_request.py +87 -186
- lionagi/protocols/messages/action_response.py +74 -133
- lionagi/protocols/messages/assistant_response.py +131 -161
- lionagi/protocols/messages/base.py +27 -20
- lionagi/protocols/messages/instruction.py +281 -626
- lionagi/protocols/messages/manager.py +113 -64
- lionagi/protocols/messages/message.py +88 -199
- lionagi/protocols/messages/system.py +53 -125
- lionagi/protocols/operatives/__init__.py +1 -2
- lionagi/protocols/operatives/operative.py +1 -2
- lionagi/protocols/operatives/step.py +1 -2
- lionagi/protocols/types.py +1 -4
- lionagi/service/connections/__init__.py +1 -2
- lionagi/service/connections/api_calling.py +1 -2
- lionagi/service/connections/endpoint.py +1 -10
- lionagi/service/connections/endpoint_config.py +1 -2
- lionagi/service/connections/header_factory.py +1 -2
- lionagi/service/connections/match_endpoint.py +1 -2
- lionagi/service/connections/mcp/__init__.py +1 -2
- lionagi/service/connections/mcp/wrapper.py +1 -2
- lionagi/service/connections/providers/__init__.py +1 -2
- lionagi/service/connections/providers/anthropic_.py +1 -2
- lionagi/service/connections/providers/claude_code_cli.py +1 -2
- lionagi/service/connections/providers/exa_.py +1 -2
- lionagi/service/connections/providers/nvidia_nim_.py +2 -27
- lionagi/service/connections/providers/oai_.py +30 -96
- lionagi/service/connections/providers/ollama_.py +4 -4
- lionagi/service/connections/providers/perplexity_.py +1 -2
- lionagi/service/hooks/__init__.py +1 -1
- lionagi/service/hooks/_types.py +1 -1
- lionagi/service/hooks/_utils.py +1 -1
- lionagi/service/hooks/hook_event.py +1 -1
- lionagi/service/hooks/hook_registry.py +1 -1
- lionagi/service/hooks/hooked_event.py +3 -4
- lionagi/service/imodel.py +1 -2
- lionagi/service/manager.py +1 -2
- lionagi/service/rate_limited_processor.py +1 -2
- lionagi/service/resilience.py +1 -2
- lionagi/service/third_party/anthropic_models.py +1 -2
- lionagi/service/third_party/claude_code.py +4 -4
- lionagi/service/third_party/openai_models.py +433 -0
- lionagi/service/token_calculator.py +1 -2
- lionagi/session/__init__.py +1 -2
- lionagi/session/branch.py +171 -180
- lionagi/session/session.py +4 -11
- lionagi/tools/__init__.py +1 -2
- lionagi/tools/base.py +1 -2
- lionagi/tools/file/__init__.py +1 -2
- lionagi/tools/file/reader.py +3 -4
- lionagi/tools/types.py +1 -2
- lionagi/utils.py +1 -2
- lionagi/version.py +1 -1
- {lionagi-0.17.10.dist-info → lionagi-0.18.0.dist-info}/METADATA +1 -2
- lionagi-0.18.0.dist-info/RECORD +191 -0
- lionagi/operations/_act/__init__.py +0 -3
- lionagi/operations/_act/act.py +0 -87
- lionagi/protocols/messages/templates/README.md +0 -28
- lionagi/protocols/messages/templates/action_request.jinja2 +0 -5
- lionagi/protocols/messages/templates/action_response.jinja2 +0 -9
- lionagi/protocols/messages/templates/assistant_response.jinja2 +0 -6
- lionagi/protocols/messages/templates/instruction_message.jinja2 +0 -61
- lionagi/protocols/messages/templates/system_message.jinja2 +0 -11
- lionagi/protocols/messages/templates/tool_schemas.jinja2 +0 -7
- lionagi/service/connections/providers/types.py +0 -28
- lionagi/service/third_party/openai_model_names.py +0 -198
- lionagi/service/types.py +0 -59
- lionagi-0.17.10.dist-info/RECORD +0 -199
- {lionagi-0.17.10.dist-info → lionagi-0.18.0.dist-info}/WHEEL +0 -0
- {lionagi-0.17.10.dist-info → lionagi-0.18.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,145 +1,73 @@
|
|
1
|
-
# Copyright (c) 2023
|
2
|
-
#
|
1
|
+
# Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
|
3
2
|
# SPDX-License-Identifier: Apache-2.0
|
4
3
|
|
4
|
+
from dataclasses import dataclass
|
5
5
|
from datetime import datetime
|
6
|
-
from typing import Any
|
6
|
+
from typing import Any
|
7
7
|
|
8
|
-
from pydantic import
|
9
|
-
from typing_extensions import Self, override
|
8
|
+
from pydantic import Field, field_validator
|
10
9
|
|
11
10
|
from .base import SenderRecipient
|
12
|
-
from .message import MessageRole, RoledMessage
|
11
|
+
from .message import MessageContent, MessageRole, RoledMessage
|
13
12
|
|
14
|
-
__all__ = ("System",)
|
15
13
|
|
14
|
+
@dataclass(slots=True)
|
15
|
+
class SystemContent(MessageContent):
|
16
|
+
"""Content for system messages.
|
16
17
|
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
) -> dict:
|
18
|
+
Fields:
|
19
|
+
system_message: Main system instruction text
|
20
|
+
system_datetime: Optional datetime string
|
21
21
|
"""
|
22
|
-
Insert optional datetime string into the system message content.
|
23
22
|
|
24
|
-
|
25
|
-
|
26
|
-
If True, embed current time. If str, use as time. If None, omit.
|
27
|
-
system_message (str):
|
28
|
-
The main system message text.
|
29
|
-
|
30
|
-
Returns:
|
31
|
-
dict: The combined system content.
|
32
|
-
"""
|
33
|
-
content: dict = {"system_message": system_message}
|
34
|
-
if system_datetime:
|
35
|
-
if isinstance(system_datetime, str):
|
36
|
-
content["system_datetime"] = system_datetime
|
37
|
-
else:
|
38
|
-
content["system_datetime"] = datetime.now().isoformat(
|
39
|
-
timespec="minutes"
|
40
|
-
)
|
41
|
-
return content
|
42
|
-
|
43
|
-
|
44
|
-
class System(RoledMessage):
|
45
|
-
"""
|
46
|
-
A specialized message that sets a *system-level* context or policy.
|
47
|
-
Usually the first in a conversation, instructing the AI about general
|
48
|
-
constraints or identity.
|
49
|
-
"""
|
50
|
-
|
51
|
-
template: str | Template | None = jinja_env.get_template(
|
52
|
-
"system_message.jinja2"
|
23
|
+
system_message: str = (
|
24
|
+
"You are a helpful AI assistant. Let's think step by step."
|
53
25
|
)
|
26
|
+
system_datetime: str | None = None
|
54
27
|
|
55
|
-
@
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
template=None,
|
64
|
-
system: Any = None,
|
65
|
-
**kwargs,
|
66
|
-
) -> Self:
|
67
|
-
"""
|
68
|
-
Construct a system message with optional datetime annotation.
|
69
|
-
|
70
|
-
Args:
|
71
|
-
system_message (str):
|
72
|
-
The main text instructing the AI about behavior/identity.
|
73
|
-
system_datetime (bool|str, optional):
|
74
|
-
If True or str, embed a time reference. If str, it is used directly.
|
75
|
-
sender (SenderRecipient, optional):
|
76
|
-
Typically `MessageRole.SYSTEM`.
|
77
|
-
recipient (SenderRecipient, optional):
|
78
|
-
Typically `MessageRole.ASSISTANT`.
|
79
|
-
template (Template|str|None):
|
80
|
-
An optional custom template for rendering.
|
81
|
-
system (Any):
|
82
|
-
Alias for `system_message` (deprecated).
|
83
|
-
**kwargs:
|
84
|
-
Additional content merged into the final dict.
|
28
|
+
@property
|
29
|
+
def rendered(self) -> str:
|
30
|
+
"""Render system message with optional datetime."""
|
31
|
+
parts = []
|
32
|
+
if self.system_datetime:
|
33
|
+
parts.append(f"System Time: {self.system_datetime}")
|
34
|
+
parts.append(self.system_message)
|
35
|
+
return "\n\n".join(parts)
|
85
36
|
|
86
|
-
|
87
|
-
|
88
|
-
"""
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
"as they are alias, and `system` is deprecated"
|
93
|
-
)
|
94
|
-
system_message = system_message or system
|
95
|
-
|
96
|
-
content = format_system_content(
|
97
|
-
system_datetime=system_datetime, system_message=system_message
|
37
|
+
@classmethod
|
38
|
+
def from_dict(cls, data: dict[str, Any]) -> "SystemContent":
|
39
|
+
"""Construct SystemContent from dictionary."""
|
40
|
+
system_message = data.get(
|
41
|
+
"system_message",
|
42
|
+
cls.__dataclass_fields__["system_message"].default,
|
98
43
|
)
|
99
|
-
|
100
|
-
params = {
|
101
|
-
"role": MessageRole.SYSTEM,
|
102
|
-
"content": content,
|
103
|
-
"sender": sender or MessageRole.SYSTEM,
|
104
|
-
"recipient": recipient or MessageRole.ASSISTANT,
|
105
|
-
}
|
106
|
-
if template:
|
107
|
-
params["template"] = template
|
108
|
-
return cls(**params)
|
44
|
+
system_datetime = data.get("system_datetime")
|
109
45
|
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
system_datetime: bool | str = None,
|
116
|
-
template: Template | str | None = None,
|
117
|
-
**kwargs,
|
118
|
-
) -> NoReturn:
|
119
|
-
"""
|
120
|
-
Adjust fields of this system message.
|
46
|
+
# Handle datetime generation
|
47
|
+
if system_datetime is True:
|
48
|
+
system_datetime = datetime.now().isoformat(timespec="minutes")
|
49
|
+
elif system_datetime is False or system_datetime is None:
|
50
|
+
system_datetime = None
|
121
51
|
|
122
|
-
|
123
|
-
system_message
|
124
|
-
New system message text.
|
125
|
-
sender (SenderRecipient):
|
126
|
-
Updated sender or role.
|
127
|
-
recipient (SenderRecipient):
|
128
|
-
Updated recipient or role.
|
129
|
-
system_datetime (bool|str):
|
130
|
-
If set, embed new datetime info.
|
131
|
-
template (Template|str|None):
|
132
|
-
New template override.
|
133
|
-
**kwargs:
|
134
|
-
Additional fields for self.content.
|
135
|
-
"""
|
136
|
-
if any([system_message, system_message]):
|
137
|
-
self.content = format_system_content(
|
138
|
-
system_datetime=system_datetime, system_message=system_message
|
139
|
-
)
|
140
|
-
super().update(
|
141
|
-
sender=sender, recipient=recipient, template=template, **kwargs
|
52
|
+
return cls(
|
53
|
+
system_message=system_message, system_datetime=system_datetime
|
142
54
|
)
|
143
55
|
|
144
56
|
|
145
|
-
|
57
|
+
class System(RoledMessage):
|
58
|
+
"""System-level message setting context or policy for the conversation."""
|
59
|
+
|
60
|
+
role: MessageRole = MessageRole.SYSTEM
|
61
|
+
content: SystemContent = Field(default_factory=SystemContent)
|
62
|
+
sender: SenderRecipient | None = MessageRole.SYSTEM
|
63
|
+
recipient: SenderRecipient | None = MessageRole.ASSISTANT
|
64
|
+
|
65
|
+
@field_validator("content", mode="before")
|
66
|
+
def _validate_content(cls, v):
|
67
|
+
if v is None:
|
68
|
+
return SystemContent()
|
69
|
+
if isinstance(v, dict):
|
70
|
+
return SystemContent.from_dict(v)
|
71
|
+
if isinstance(v, SystemContent):
|
72
|
+
return v
|
73
|
+
raise TypeError("content must be dict or SystemContent instance")
|
lionagi/protocols/types.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1
|
-
# Copyright (c) 2023
|
2
|
-
#
|
1
|
+
# Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
|
3
2
|
# SPDX-License-Identifier: Apache-2.0
|
4
3
|
|
5
4
|
from ._concepts import Collective, Communicatable, Condition, Manager
|
@@ -26,7 +25,6 @@ from .mail.manager import MailManager
|
|
26
25
|
from .messages.base import (
|
27
26
|
MESSAGE_FIELDS,
|
28
27
|
MessageField,
|
29
|
-
MessageFlag,
|
30
28
|
MessageRole,
|
31
29
|
validate_sender_recipient,
|
32
30
|
)
|
@@ -83,7 +81,6 @@ __all__ = (
|
|
83
81
|
"PackageCategory",
|
84
82
|
"MESSAGE_FIELDS",
|
85
83
|
"MessageField",
|
86
|
-
"MessageFlag",
|
87
84
|
"MessageRole",
|
88
85
|
"validate_sender_recipient",
|
89
86
|
"ActionRequest",
|
@@ -1,5 +1,4 @@
|
|
1
|
-
# Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
-
#
|
1
|
+
# Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
|
3
2
|
# SPDX-License-Identifier: Apache-2.0
|
4
3
|
|
5
4
|
import asyncio
|
@@ -123,10 +122,7 @@ class Endpoint:
|
|
123
122
|
# Validate the filtered payload
|
124
123
|
payload = self.config.validate_payload(filtered_payload)
|
125
124
|
else:
|
126
|
-
# If no request_options, we still need to remove obvious non-API params
|
127
|
-
# These are parameters that are never part of any API payload
|
128
125
|
non_api_params = {
|
129
|
-
"task",
|
130
126
|
"provider",
|
131
127
|
"base_url",
|
132
128
|
"endpoint",
|
@@ -134,13 +130,9 @@ class Endpoint:
|
|
134
130
|
"api_key",
|
135
131
|
"queue_capacity",
|
136
132
|
"capacity_refresh_time",
|
137
|
-
"interval",
|
138
|
-
"limit_requests",
|
139
|
-
"limit_tokens",
|
140
133
|
"invoke_with_endpoint",
|
141
134
|
"extra_headers",
|
142
135
|
"headers",
|
143
|
-
"cache_control",
|
144
136
|
"include_token_usage_to_model",
|
145
137
|
"chat_model",
|
146
138
|
"imodel",
|
@@ -149,7 +141,6 @@ class Endpoint:
|
|
149
141
|
"aggregation_count",
|
150
142
|
"action_strategy",
|
151
143
|
"parse_model",
|
152
|
-
"reason",
|
153
144
|
"actions",
|
154
145
|
"return_operative",
|
155
146
|
"operative_model",
|
@@ -1,24 +1,6 @@
|
|
1
|
-
# Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
-
#
|
1
|
+
# Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
|
3
2
|
# SPDX-License-Identifier: Apache-2.0
|
4
3
|
|
5
|
-
"""
|
6
|
-
NVIDIA NIM endpoint configurations.
|
7
|
-
|
8
|
-
This module provides endpoint configurations for NVIDIA NIM (NVIDIA Inference Microservices),
|
9
|
-
which offers GPU-accelerated inference for various AI models through an OpenAI-compatible API.
|
10
|
-
|
11
|
-
NVIDIA NIM features:
|
12
|
-
- OpenAI-compatible API endpoints
|
13
|
-
- GPU-accelerated inference
|
14
|
-
- Support for various open-source models (Llama, Mistral, etc.)
|
15
|
-
- Both cloud-hosted and self-hosted options
|
16
|
-
- Free tier with 1000 credits for development
|
17
|
-
|
18
|
-
API Documentation: https://docs.nvidia.com/nim/
|
19
|
-
Build Portal: https://build.nvidia.com/
|
20
|
-
"""
|
21
|
-
|
22
4
|
from lionagi.config import settings
|
23
5
|
from lionagi.service.connections.endpoint import Endpoint
|
24
6
|
from lionagi.service.connections.endpoint_config import EndpointConfig
|
@@ -69,15 +51,8 @@ NVIDIA_NIM_EMBED_ENDPOINT_CONFIG = _get_nvidia_nim_config(
|
|
69
51
|
class NvidiaNimChatEndpoint(Endpoint):
|
70
52
|
"""NVIDIA NIM chat completion endpoint.
|
71
53
|
|
72
|
-
Supports various open-source models including:
|
73
|
-
- meta/llama3-8b-instruct
|
74
|
-
- meta/llama3-70b-instruct
|
75
|
-
- meta/llama3.1-405b-instruct
|
76
|
-
- mistralai/mixtral-8x7b-instruct-v0.1
|
77
|
-
- google/gemma-7b
|
78
|
-
- And many more...
|
79
|
-
|
80
54
|
Get your API key from: https://build.nvidia.com/
|
55
|
+
API Documentation: https://docs.nvidia.com/nim/
|
81
56
|
"""
|
82
57
|
|
83
58
|
def __init__(self, config=None, **kwargs):
|
@@ -1,5 +1,4 @@
|
|
1
|
-
# Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
-
#
|
1
|
+
# Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
|
3
2
|
# SPDX-License-Identifier: Apache-2.0
|
4
3
|
|
5
4
|
"""
|
@@ -19,45 +18,21 @@ from pydantic import BaseModel
|
|
19
18
|
from lionagi.config import settings
|
20
19
|
from lionagi.service.connections.endpoint import Endpoint
|
21
20
|
from lionagi.service.connections.endpoint_config import EndpointConfig
|
22
|
-
from lionagi.service.third_party.
|
23
|
-
|
24
|
-
is_reasoning_model,
|
25
|
-
)
|
26
|
-
|
27
|
-
__all__ = (
|
28
|
-
"OpenaiChatEndpoint",
|
29
|
-
"OpenaiResponseEndpoint",
|
30
|
-
"OpenaiEmbedEndpoint",
|
31
|
-
"OpenrouterChatEndpoint",
|
32
|
-
"GroqChatEndpoint",
|
33
|
-
"OPENAI_CHAT_ENDPOINT_CONFIG",
|
34
|
-
"OPENAI_RESPONSE_ENDPOINT_CONFIG",
|
35
|
-
"OPENAI_EMBEDDING_ENDPOINT_CONFIG",
|
36
|
-
"OPENROUTER_CHAT_ENDPOINT_CONFIG",
|
37
|
-
"OPENROUTER_GEMINI_ENDPOINT_CONFIG",
|
38
|
-
"GROQ_CHAT_ENDPOINT_CONFIG",
|
39
|
-
"REASONING_MODELS",
|
40
|
-
"REASONING_NOT_SUPPORT_PARAMS",
|
21
|
+
from lionagi.service.third_party.openai_models import (
|
22
|
+
OpenAIChatCompletionsRequest,
|
41
23
|
)
|
42
24
|
|
43
25
|
|
44
|
-
def
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
content_type="application/json",
|
55
|
-
method="POST",
|
56
|
-
requires_tokens=True,
|
57
|
-
# NOTE: OpenAI models have incorrect role literals, only use for param validation
|
58
|
-
# request_options=CreateChatCompletionRequest,
|
59
|
-
)
|
60
|
-
config.update(kwargs)
|
26
|
+
def _get_oai_config(**kw):
|
27
|
+
config = {
|
28
|
+
"provider": "openai",
|
29
|
+
"base_url": "https://api.openai.com/v1",
|
30
|
+
"auth_type": "bearer",
|
31
|
+
"content_type": "application/json",
|
32
|
+
"method": "POST",
|
33
|
+
"api_key": settings.OPENAI_API_KEY or "dummy-key-for-testing",
|
34
|
+
}
|
35
|
+
config.update(kw)
|
61
36
|
return EndpointConfig(**config)
|
62
37
|
|
63
38
|
|
@@ -73,8 +48,7 @@ def _get_openrouter_config(**kwargs):
|
|
73
48
|
auth_type="bearer",
|
74
49
|
content_type="application/json",
|
75
50
|
method="POST",
|
76
|
-
|
77
|
-
# request_options=CreateChatCompletionRequest,
|
51
|
+
request_options=OpenAIChatCompletionsRequest,
|
78
52
|
)
|
79
53
|
config.update(kwargs)
|
80
54
|
return EndpointConfig(**config)
|
@@ -92,48 +66,21 @@ def _get_groq_config(**kwargs):
|
|
92
66
|
auth_type="bearer",
|
93
67
|
content_type="application/json",
|
94
68
|
method="POST",
|
69
|
+
request_options=OpenAIChatCompletionsRequest,
|
95
70
|
)
|
96
71
|
config.update(kwargs)
|
97
72
|
return EndpointConfig(**config)
|
98
73
|
|
99
74
|
|
100
|
-
# OpenAI endpoints
|
101
|
-
OPENAI_CHAT_ENDPOINT_CONFIG = _get_openai_config()
|
102
|
-
|
103
|
-
OPENAI_RESPONSE_ENDPOINT_CONFIG = _get_openai_config(
|
104
|
-
name="openai_response",
|
105
|
-
endpoint="responses",
|
106
|
-
)
|
107
|
-
|
108
|
-
OPENAI_EMBEDDING_ENDPOINT_CONFIG = _get_openai_config(
|
109
|
-
name="openai_embed",
|
110
|
-
endpoint="embeddings",
|
111
|
-
kwargs={"model": "text-embedding-3-small"},
|
112
|
-
)
|
113
|
-
|
114
|
-
# OpenRouter endpoints
|
115
|
-
OPENROUTER_CHAT_ENDPOINT_CONFIG = _get_openrouter_config()
|
116
|
-
|
117
|
-
OPENROUTER_GEMINI_ENDPOINT_CONFIG = _get_openrouter_config(
|
118
|
-
name="openrouter_gemini",
|
119
|
-
kwargs={"model": "google/gemini-2.5-flash"},
|
120
|
-
)
|
121
|
-
|
122
|
-
# Groq endpoints
|
123
|
-
GROQ_CHAT_ENDPOINT_CONFIG = _get_groq_config()
|
124
|
-
|
125
|
-
REASONING_NOT_SUPPORT_PARAMS = (
|
126
|
-
"temperature",
|
127
|
-
"top_p",
|
128
|
-
"logit_bias",
|
129
|
-
"logprobs",
|
130
|
-
"top_logprobs",
|
131
|
-
)
|
132
|
-
|
133
|
-
|
134
75
|
class OpenaiChatEndpoint(Endpoint):
|
135
76
|
def __init__(self, config=None, **kwargs):
|
136
|
-
config = config or
|
77
|
+
config = config or _get_oai_config(
|
78
|
+
name="oai_chat",
|
79
|
+
endpoint="chat/completions",
|
80
|
+
request_options=OpenAIChatCompletionsRequest,
|
81
|
+
kwargs={"model": settings.OPENAI_DEFAULT_MODEL},
|
82
|
+
requires_tokens=True,
|
83
|
+
)
|
137
84
|
super().__init__(config, **kwargs)
|
138
85
|
|
139
86
|
def create_payload(
|
@@ -146,34 +93,20 @@ class OpenaiChatEndpoint(Endpoint):
|
|
146
93
|
payload, headers = super().create_payload(
|
147
94
|
request, extra_headers, **kwargs
|
148
95
|
)
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
model
|
154
|
-
and is_reasoning_model(model)
|
155
|
-
and not model.startswith("gpt-5")
|
156
|
-
):
|
157
|
-
# Remove unsupported parameters for reasoning models
|
158
|
-
for param in REASONING_NOT_SUPPORT_PARAMS:
|
159
|
-
payload.pop(param, None)
|
160
|
-
|
161
|
-
# Convert system role to developer role for reasoning models
|
162
|
-
if "messages" in payload and payload["messages"]:
|
163
|
-
if payload["messages"][0].get("role") == "system":
|
164
|
-
payload["messages"][0]["role"] = "developer"
|
165
|
-
else:
|
166
|
-
# Remove reasoning_effort for non-reasoning models
|
167
|
-
payload.pop("reasoning_effort", None)
|
96
|
+
# Convert system role to developer role for reasoning models
|
97
|
+
if "messages" in payload and payload["messages"]:
|
98
|
+
if payload["messages"][0].get("role") == "system":
|
99
|
+
payload["messages"][0]["role"] = "developer"
|
168
100
|
|
169
101
|
return (payload, headers)
|
170
102
|
|
171
103
|
|
172
104
|
class OpenaiResponseEndpoint(Endpoint):
|
173
105
|
def __init__(self, config=None, **kwargs):
|
174
|
-
config = config or
|
106
|
+
config = config or _get_oai_config(
|
175
107
|
name="openai_response",
|
176
108
|
endpoint="responses",
|
109
|
+
requires_tokens=True,
|
177
110
|
)
|
178
111
|
super().__init__(config, **kwargs)
|
179
112
|
|
@@ -192,9 +125,10 @@ class GroqChatEndpoint(Endpoint):
|
|
192
125
|
|
193
126
|
class OpenaiEmbedEndpoint(Endpoint):
|
194
127
|
def __init__(self, config=None, **kwargs):
|
195
|
-
config = config or
|
128
|
+
config = config or _get_oai_config(
|
196
129
|
name="openai_embed",
|
197
130
|
endpoint="embeddings",
|
198
131
|
kwargs={"model": "text-embedding-3-small"},
|
132
|
+
requires_tokens=True,
|
199
133
|
)
|
200
134
|
super().__init__(config, **kwargs)
|
@@ -1,5 +1,4 @@
|
|
1
|
-
# Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
-
#
|
1
|
+
# Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
|
3
2
|
# SPDX-License-Identifier: Apache-2.0
|
4
3
|
|
5
4
|
"""
|
@@ -25,6 +24,8 @@ _HAS_OLLAMA = is_import_installed("ollama")
|
|
25
24
|
|
26
25
|
def _get_ollama_config(**kwargs):
|
27
26
|
"""Create Ollama endpoint configuration with defaults."""
|
27
|
+
from ...third_party.openai_models import OpenAIChatCompletionsRequest
|
28
|
+
|
28
29
|
config = dict(
|
29
30
|
name="ollama_chat",
|
30
31
|
provider="ollama",
|
@@ -37,8 +38,7 @@ def _get_ollama_config(**kwargs):
|
|
37
38
|
content_type="application/json",
|
38
39
|
auth_type="none", # No authentication
|
39
40
|
default_headers={}, # No auth headers needed
|
40
|
-
|
41
|
-
# request_options=CreateChatCompletionRequest,
|
41
|
+
request_options=OpenAIChatCompletionsRequest,
|
42
42
|
)
|
43
43
|
config.update(kwargs)
|
44
44
|
return EndpointConfig(**config)
|
lionagi/service/hooks/_types.py
CHANGED
lionagi/service/hooks/_utils.py
CHANGED