lionagi 0.17.10__py3-none-any.whl → 0.18.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (164) hide show
  1. lionagi/__init__.py +1 -2
  2. lionagi/_class_registry.py +1 -2
  3. lionagi/_errors.py +1 -2
  4. lionagi/adapters/async_postgres_adapter.py +2 -10
  5. lionagi/config.py +1 -2
  6. lionagi/fields/action.py +1 -2
  7. lionagi/fields/base.py +3 -0
  8. lionagi/fields/code.py +3 -0
  9. lionagi/fields/file.py +3 -0
  10. lionagi/fields/instruct.py +1 -2
  11. lionagi/fields/reason.py +1 -2
  12. lionagi/fields/research.py +3 -0
  13. lionagi/libs/__init__.py +1 -2
  14. lionagi/libs/file/__init__.py +1 -2
  15. lionagi/libs/file/chunk.py +1 -2
  16. lionagi/libs/file/process.py +1 -2
  17. lionagi/libs/schema/__init__.py +1 -2
  18. lionagi/libs/schema/as_readable.py +1 -2
  19. lionagi/libs/schema/extract_code_block.py +1 -2
  20. lionagi/libs/schema/extract_docstring.py +1 -2
  21. lionagi/libs/schema/function_to_schema.py +1 -2
  22. lionagi/libs/schema/load_pydantic_model_from_schema.py +1 -2
  23. lionagi/libs/schema/minimal_yaml.py +98 -0
  24. lionagi/libs/validate/__init__.py +1 -2
  25. lionagi/libs/validate/common_field_validators.py +1 -2
  26. lionagi/libs/validate/validate_boolean.py +1 -2
  27. lionagi/ln/fuzzy/_string_similarity.py +1 -2
  28. lionagi/ln/types.py +32 -5
  29. lionagi/models/__init__.py +1 -2
  30. lionagi/models/field_model.py +9 -1
  31. lionagi/models/hashable_model.py +4 -2
  32. lionagi/models/model_params.py +1 -2
  33. lionagi/models/operable_model.py +1 -2
  34. lionagi/models/schema_model.py +1 -2
  35. lionagi/operations/ReAct/ReAct.py +475 -239
  36. lionagi/operations/ReAct/__init__.py +1 -2
  37. lionagi/operations/ReAct/utils.py +4 -2
  38. lionagi/operations/__init__.py +1 -2
  39. lionagi/operations/act/__init__.py +2 -0
  40. lionagi/operations/act/act.py +206 -0
  41. lionagi/operations/brainstorm/__init__.py +1 -2
  42. lionagi/operations/brainstorm/brainstorm.py +1 -2
  43. lionagi/operations/brainstorm/prompt.py +1 -2
  44. lionagi/operations/builder.py +1 -2
  45. lionagi/operations/chat/__init__.py +1 -2
  46. lionagi/operations/chat/chat.py +131 -116
  47. lionagi/operations/communicate/communicate.py +102 -44
  48. lionagi/operations/flow.py +5 -6
  49. lionagi/operations/instruct/__init__.py +1 -2
  50. lionagi/operations/instruct/instruct.py +1 -2
  51. lionagi/operations/interpret/__init__.py +1 -2
  52. lionagi/operations/interpret/interpret.py +66 -22
  53. lionagi/operations/operate/__init__.py +1 -2
  54. lionagi/operations/operate/operate.py +213 -108
  55. lionagi/operations/parse/__init__.py +1 -2
  56. lionagi/operations/parse/parse.py +171 -144
  57. lionagi/operations/plan/__init__.py +1 -2
  58. lionagi/operations/plan/plan.py +1 -2
  59. lionagi/operations/plan/prompt.py +1 -2
  60. lionagi/operations/select/__init__.py +1 -2
  61. lionagi/operations/select/select.py +79 -19
  62. lionagi/operations/select/utils.py +2 -3
  63. lionagi/operations/types.py +120 -25
  64. lionagi/operations/utils.py +1 -2
  65. lionagi/protocols/__init__.py +1 -2
  66. lionagi/protocols/_concepts.py +1 -2
  67. lionagi/protocols/action/__init__.py +1 -2
  68. lionagi/protocols/action/function_calling.py +3 -20
  69. lionagi/protocols/action/manager.py +34 -4
  70. lionagi/protocols/action/tool.py +1 -2
  71. lionagi/protocols/contracts.py +1 -2
  72. lionagi/protocols/forms/__init__.py +1 -2
  73. lionagi/protocols/forms/base.py +1 -2
  74. lionagi/protocols/forms/flow.py +1 -2
  75. lionagi/protocols/forms/form.py +1 -2
  76. lionagi/protocols/forms/report.py +1 -2
  77. lionagi/protocols/generic/__init__.py +1 -2
  78. lionagi/protocols/generic/element.py +17 -65
  79. lionagi/protocols/generic/event.py +1 -2
  80. lionagi/protocols/generic/log.py +17 -14
  81. lionagi/protocols/generic/pile.py +3 -4
  82. lionagi/protocols/generic/processor.py +1 -2
  83. lionagi/protocols/generic/progression.py +1 -2
  84. lionagi/protocols/graph/__init__.py +1 -2
  85. lionagi/protocols/graph/edge.py +1 -2
  86. lionagi/protocols/graph/graph.py +1 -2
  87. lionagi/protocols/graph/node.py +1 -2
  88. lionagi/protocols/ids.py +1 -2
  89. lionagi/protocols/mail/__init__.py +1 -2
  90. lionagi/protocols/mail/exchange.py +1 -2
  91. lionagi/protocols/mail/mail.py +1 -2
  92. lionagi/protocols/mail/mailbox.py +1 -2
  93. lionagi/protocols/mail/manager.py +1 -2
  94. lionagi/protocols/mail/package.py +1 -2
  95. lionagi/protocols/messages/__init__.py +28 -2
  96. lionagi/protocols/messages/action_request.py +87 -186
  97. lionagi/protocols/messages/action_response.py +74 -133
  98. lionagi/protocols/messages/assistant_response.py +131 -161
  99. lionagi/protocols/messages/base.py +27 -20
  100. lionagi/protocols/messages/instruction.py +281 -626
  101. lionagi/protocols/messages/manager.py +113 -64
  102. lionagi/protocols/messages/message.py +88 -199
  103. lionagi/protocols/messages/system.py +53 -125
  104. lionagi/protocols/operatives/__init__.py +1 -2
  105. lionagi/protocols/operatives/operative.py +1 -2
  106. lionagi/protocols/operatives/step.py +1 -2
  107. lionagi/protocols/types.py +1 -4
  108. lionagi/service/connections/__init__.py +1 -2
  109. lionagi/service/connections/api_calling.py +1 -2
  110. lionagi/service/connections/endpoint.py +1 -10
  111. lionagi/service/connections/endpoint_config.py +1 -2
  112. lionagi/service/connections/header_factory.py +1 -2
  113. lionagi/service/connections/match_endpoint.py +1 -2
  114. lionagi/service/connections/mcp/__init__.py +1 -2
  115. lionagi/service/connections/mcp/wrapper.py +1 -2
  116. lionagi/service/connections/providers/__init__.py +1 -2
  117. lionagi/service/connections/providers/anthropic_.py +1 -2
  118. lionagi/service/connections/providers/claude_code_cli.py +1 -2
  119. lionagi/service/connections/providers/exa_.py +1 -2
  120. lionagi/service/connections/providers/nvidia_nim_.py +2 -27
  121. lionagi/service/connections/providers/oai_.py +30 -96
  122. lionagi/service/connections/providers/ollama_.py +4 -4
  123. lionagi/service/connections/providers/perplexity_.py +1 -2
  124. lionagi/service/hooks/__init__.py +1 -1
  125. lionagi/service/hooks/_types.py +1 -1
  126. lionagi/service/hooks/_utils.py +1 -1
  127. lionagi/service/hooks/hook_event.py +1 -1
  128. lionagi/service/hooks/hook_registry.py +1 -1
  129. lionagi/service/hooks/hooked_event.py +3 -4
  130. lionagi/service/imodel.py +1 -2
  131. lionagi/service/manager.py +1 -2
  132. lionagi/service/rate_limited_processor.py +1 -2
  133. lionagi/service/resilience.py +1 -2
  134. lionagi/service/third_party/anthropic_models.py +1 -2
  135. lionagi/service/third_party/claude_code.py +4 -4
  136. lionagi/service/third_party/openai_models.py +433 -0
  137. lionagi/service/token_calculator.py +1 -2
  138. lionagi/session/__init__.py +1 -2
  139. lionagi/session/branch.py +171 -180
  140. lionagi/session/session.py +4 -11
  141. lionagi/tools/__init__.py +1 -2
  142. lionagi/tools/base.py +1 -2
  143. lionagi/tools/file/__init__.py +1 -2
  144. lionagi/tools/file/reader.py +3 -4
  145. lionagi/tools/types.py +1 -2
  146. lionagi/utils.py +1 -2
  147. lionagi/version.py +1 -1
  148. {lionagi-0.17.10.dist-info → lionagi-0.18.0.dist-info}/METADATA +1 -2
  149. lionagi-0.18.0.dist-info/RECORD +191 -0
  150. lionagi/operations/_act/__init__.py +0 -3
  151. lionagi/operations/_act/act.py +0 -87
  152. lionagi/protocols/messages/templates/README.md +0 -28
  153. lionagi/protocols/messages/templates/action_request.jinja2 +0 -5
  154. lionagi/protocols/messages/templates/action_response.jinja2 +0 -9
  155. lionagi/protocols/messages/templates/assistant_response.jinja2 +0 -6
  156. lionagi/protocols/messages/templates/instruction_message.jinja2 +0 -61
  157. lionagi/protocols/messages/templates/system_message.jinja2 +0 -11
  158. lionagi/protocols/messages/templates/tool_schemas.jinja2 +0 -7
  159. lionagi/service/connections/providers/types.py +0 -28
  160. lionagi/service/third_party/openai_model_names.py +0 -198
  161. lionagi/service/types.py +0 -59
  162. lionagi-0.17.10.dist-info/RECORD +0 -199
  163. {lionagi-0.17.10.dist-info → lionagi-0.18.0.dist-info}/WHEEL +0 -0
  164. {lionagi-0.17.10.dist-info → lionagi-0.18.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,145 +1,73 @@
1
- # Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
1
+ # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
3
2
  # SPDX-License-Identifier: Apache-2.0
4
3
 
4
+ from dataclasses import dataclass
5
5
  from datetime import datetime
6
- from typing import Any, NoReturn
6
+ from typing import Any
7
7
 
8
- from pydantic import JsonValue
9
- from typing_extensions import Self, override
8
+ from pydantic import Field, field_validator
10
9
 
11
10
  from .base import SenderRecipient
12
- from .message import MessageRole, RoledMessage, Template, jinja_env
11
+ from .message import MessageContent, MessageRole, RoledMessage
13
12
 
14
- __all__ = ("System",)
15
13
 
14
+ @dataclass(slots=True)
15
+ class SystemContent(MessageContent):
16
+ """Content for system messages.
16
17
 
17
- def format_system_content(
18
- system_datetime: bool | str | None,
19
- system_message: str,
20
- ) -> dict:
18
+ Fields:
19
+ system_message: Main system instruction text
20
+ system_datetime: Optional datetime string
21
21
  """
22
- Insert optional datetime string into the system message content.
23
22
 
24
- Args:
25
- system_datetime (bool|str|None):
26
- If True, embed current time. If str, use as time. If None, omit.
27
- system_message (str):
28
- The main system message text.
29
-
30
- Returns:
31
- dict: The combined system content.
32
- """
33
- content: dict = {"system_message": system_message}
34
- if system_datetime:
35
- if isinstance(system_datetime, str):
36
- content["system_datetime"] = system_datetime
37
- else:
38
- content["system_datetime"] = datetime.now().isoformat(
39
- timespec="minutes"
40
- )
41
- return content
42
-
43
-
44
- class System(RoledMessage):
45
- """
46
- A specialized message that sets a *system-level* context or policy.
47
- Usually the first in a conversation, instructing the AI about general
48
- constraints or identity.
49
- """
50
-
51
- template: str | Template | None = jinja_env.get_template(
52
- "system_message.jinja2"
23
+ system_message: str = (
24
+ "You are a helpful AI assistant. Let's think step by step."
53
25
  )
26
+ system_datetime: str | None = None
54
27
 
55
- @override
56
- @classmethod
57
- def create(
58
- cls,
59
- system_message="You are a helpful AI assistant. Let's think step by step.",
60
- system_datetime=None,
61
- sender: SenderRecipient = None,
62
- recipient: SenderRecipient = None,
63
- template=None,
64
- system: Any = None,
65
- **kwargs,
66
- ) -> Self:
67
- """
68
- Construct a system message with optional datetime annotation.
69
-
70
- Args:
71
- system_message (str):
72
- The main text instructing the AI about behavior/identity.
73
- system_datetime (bool|str, optional):
74
- If True or str, embed a time reference. If str, it is used directly.
75
- sender (SenderRecipient, optional):
76
- Typically `MessageRole.SYSTEM`.
77
- recipient (SenderRecipient, optional):
78
- Typically `MessageRole.ASSISTANT`.
79
- template (Template|str|None):
80
- An optional custom template for rendering.
81
- system (Any):
82
- Alias for `system_message` (deprecated).
83
- **kwargs:
84
- Additional content merged into the final dict.
28
+ @property
29
+ def rendered(self) -> str:
30
+ """Render system message with optional datetime."""
31
+ parts = []
32
+ if self.system_datetime:
33
+ parts.append(f"System Time: {self.system_datetime}")
34
+ parts.append(self.system_message)
35
+ return "\n\n".join(parts)
85
36
 
86
- Returns:
87
- System: A newly created system-level message.
88
- """
89
- if system and system_message:
90
- raise ValueError(
91
- "Cannot provide both system and system_message arguments."
92
- "as they are alias, and `system` is deprecated"
93
- )
94
- system_message = system_message or system
95
-
96
- content = format_system_content(
97
- system_datetime=system_datetime, system_message=system_message
37
+ @classmethod
38
+ def from_dict(cls, data: dict[str, Any]) -> "SystemContent":
39
+ """Construct SystemContent from dictionary."""
40
+ system_message = data.get(
41
+ "system_message",
42
+ cls.__dataclass_fields__["system_message"].default,
98
43
  )
99
- content.update(kwargs)
100
- params = {
101
- "role": MessageRole.SYSTEM,
102
- "content": content,
103
- "sender": sender or MessageRole.SYSTEM,
104
- "recipient": recipient or MessageRole.ASSISTANT,
105
- }
106
- if template:
107
- params["template"] = template
108
- return cls(**params)
44
+ system_datetime = data.get("system_datetime")
109
45
 
110
- def update(
111
- self,
112
- system_message: JsonValue = None,
113
- sender: SenderRecipient = None,
114
- recipient: SenderRecipient = None,
115
- system_datetime: bool | str = None,
116
- template: Template | str | None = None,
117
- **kwargs,
118
- ) -> NoReturn:
119
- """
120
- Adjust fields of this system message.
46
+ # Handle datetime generation
47
+ if system_datetime is True:
48
+ system_datetime = datetime.now().isoformat(timespec="minutes")
49
+ elif system_datetime is False or system_datetime is None:
50
+ system_datetime = None
121
51
 
122
- Args:
123
- system_message (JsonValue):
124
- New system message text.
125
- sender (SenderRecipient):
126
- Updated sender or role.
127
- recipient (SenderRecipient):
128
- Updated recipient or role.
129
- system_datetime (bool|str):
130
- If set, embed new datetime info.
131
- template (Template|str|None):
132
- New template override.
133
- **kwargs:
134
- Additional fields for self.content.
135
- """
136
- if any([system_message, system_message]):
137
- self.content = format_system_content(
138
- system_datetime=system_datetime, system_message=system_message
139
- )
140
- super().update(
141
- sender=sender, recipient=recipient, template=template, **kwargs
52
+ return cls(
53
+ system_message=system_message, system_datetime=system_datetime
142
54
  )
143
55
 
144
56
 
145
- # File: lionagi/protocols/messages/system.py
57
+ class System(RoledMessage):
58
+ """System-level message setting context or policy for the conversation."""
59
+
60
+ role: MessageRole = MessageRole.SYSTEM
61
+ content: SystemContent = Field(default_factory=SystemContent)
62
+ sender: SenderRecipient | None = MessageRole.SYSTEM
63
+ recipient: SenderRecipient | None = MessageRole.ASSISTANT
64
+
65
+ @field_validator("content", mode="before")
66
+ def _validate_content(cls, v):
67
+ if v is None:
68
+ return SystemContent()
69
+ if isinstance(v, dict):
70
+ return SystemContent.from_dict(v)
71
+ if isinstance(v, SystemContent):
72
+ return v
73
+ raise TypeError("content must be dict or SystemContent instance")
@@ -1,3 +1,2 @@
1
- # Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
1
+ # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
3
2
  # SPDX-License-Identifier: Apache-2.0
@@ -1,5 +1,4 @@
1
- # Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
1
+ # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
3
2
  # SPDX-License-Identifier: Apache-2.0
4
3
 
5
4
  from typing import Any
@@ -1,5 +1,4 @@
1
- # Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
1
+ # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
3
2
  # SPDX-License-Identifier: Apache-2.0
4
3
 
5
4
  from pydantic import BaseModel
@@ -1,5 +1,4 @@
1
- # Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
1
+ # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
3
2
  # SPDX-License-Identifier: Apache-2.0
4
3
 
5
4
  from ._concepts import Collective, Communicatable, Condition, Manager
@@ -26,7 +25,6 @@ from .mail.manager import MailManager
26
25
  from .messages.base import (
27
26
  MESSAGE_FIELDS,
28
27
  MessageField,
29
- MessageFlag,
30
28
  MessageRole,
31
29
  validate_sender_recipient,
32
30
  )
@@ -83,7 +81,6 @@ __all__ = (
83
81
  "PackageCategory",
84
82
  "MESSAGE_FIELDS",
85
83
  "MessageField",
86
- "MessageFlag",
87
84
  "MessageRole",
88
85
  "validate_sender_recipient",
89
86
  "ActionRequest",
@@ -1,5 +1,4 @@
1
- # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
1
+ # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
3
2
  # SPDX-License-Identifier: Apache-2.0
4
3
 
5
4
  from .endpoint import Endpoint
@@ -1,5 +1,4 @@
1
- # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
1
+ # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
3
2
  # SPDX-License-Identifier: Apache-2.0
4
3
 
5
4
  import logging
@@ -1,5 +1,4 @@
1
- # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
1
+ # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
3
2
  # SPDX-License-Identifier: Apache-2.0
4
3
 
5
4
  import asyncio
@@ -123,10 +122,7 @@ class Endpoint:
123
122
  # Validate the filtered payload
124
123
  payload = self.config.validate_payload(filtered_payload)
125
124
  else:
126
- # If no request_options, we still need to remove obvious non-API params
127
- # These are parameters that are never part of any API payload
128
125
  non_api_params = {
129
- "task",
130
126
  "provider",
131
127
  "base_url",
132
128
  "endpoint",
@@ -134,13 +130,9 @@ class Endpoint:
134
130
  "api_key",
135
131
  "queue_capacity",
136
132
  "capacity_refresh_time",
137
- "interval",
138
- "limit_requests",
139
- "limit_tokens",
140
133
  "invoke_with_endpoint",
141
134
  "extra_headers",
142
135
  "headers",
143
- "cache_control",
144
136
  "include_token_usage_to_model",
145
137
  "chat_model",
146
138
  "imodel",
@@ -149,7 +141,6 @@ class Endpoint:
149
141
  "aggregation_count",
150
142
  "action_strategy",
151
143
  "parse_model",
152
- "reason",
153
144
  "actions",
154
145
  "return_operative",
155
146
  "operative_model",
@@ -1,5 +1,4 @@
1
- # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
1
+ # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
3
2
  # SPDX-License-Identifier: Apache-2.0
4
3
 
5
4
  import logging
@@ -1,5 +1,4 @@
1
- # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
1
+ # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
3
2
  # SPDX-License-Identifier: Apache-2.0
4
3
 
5
4
  from typing import Literal
@@ -1,5 +1,4 @@
1
- # Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
1
+ # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
3
2
  # SPDX-License-Identifier: Apache-2.0
4
3
 
5
4
  from lionagi.service.connections.endpoint_config import EndpointConfig
@@ -1,3 +1,2 @@
1
- # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
1
+ # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
3
2
  # SPDX-License-Identifier: Apache-2.0
@@ -1,5 +1,4 @@
1
- # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
1
+ # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
3
2
  # SPDX-License-Identifier: Apache-2.0
4
3
 
5
4
  import asyncio
@@ -1,3 +1,2 @@
1
- # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
1
+ # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
3
2
  # SPDX-License-Identifier: Apache-2.0
@@ -1,5 +1,4 @@
1
- # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
1
+ # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
3
2
  # SPDX-License-Identifier: Apache-2.0
4
3
 
5
4
  from pydantic import BaseModel
@@ -1,5 +1,4 @@
1
- # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
1
+ # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
3
2
  # SPDX-License-Identifier: Apache-2.0
4
3
 
5
4
  from __future__ import annotations
@@ -1,5 +1,4 @@
1
- # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
1
+ # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
3
2
  # SPDX-License-Identifier: Apache-2.0
4
3
 
5
4
  from __future__ import annotations
@@ -1,24 +1,6 @@
1
- # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
1
+ # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
3
2
  # SPDX-License-Identifier: Apache-2.0
4
3
 
5
- """
6
- NVIDIA NIM endpoint configurations.
7
-
8
- This module provides endpoint configurations for NVIDIA NIM (NVIDIA Inference Microservices),
9
- which offers GPU-accelerated inference for various AI models through an OpenAI-compatible API.
10
-
11
- NVIDIA NIM features:
12
- - OpenAI-compatible API endpoints
13
- - GPU-accelerated inference
14
- - Support for various open-source models (Llama, Mistral, etc.)
15
- - Both cloud-hosted and self-hosted options
16
- - Free tier with 1000 credits for development
17
-
18
- API Documentation: https://docs.nvidia.com/nim/
19
- Build Portal: https://build.nvidia.com/
20
- """
21
-
22
4
  from lionagi.config import settings
23
5
  from lionagi.service.connections.endpoint import Endpoint
24
6
  from lionagi.service.connections.endpoint_config import EndpointConfig
@@ -69,15 +51,8 @@ NVIDIA_NIM_EMBED_ENDPOINT_CONFIG = _get_nvidia_nim_config(
69
51
  class NvidiaNimChatEndpoint(Endpoint):
70
52
  """NVIDIA NIM chat completion endpoint.
71
53
 
72
- Supports various open-source models including:
73
- - meta/llama3-8b-instruct
74
- - meta/llama3-70b-instruct
75
- - meta/llama3.1-405b-instruct
76
- - mistralai/mixtral-8x7b-instruct-v0.1
77
- - google/gemma-7b
78
- - And many more...
79
-
80
54
  Get your API key from: https://build.nvidia.com/
55
+ API Documentation: https://docs.nvidia.com/nim/
81
56
  """
82
57
 
83
58
  def __init__(self, config=None, **kwargs):
@@ -1,5 +1,4 @@
1
- # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
1
+ # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
3
2
  # SPDX-License-Identifier: Apache-2.0
4
3
 
5
4
  """
@@ -19,45 +18,21 @@ from pydantic import BaseModel
19
18
  from lionagi.config import settings
20
19
  from lionagi.service.connections.endpoint import Endpoint
21
20
  from lionagi.service.connections.endpoint_config import EndpointConfig
22
- from lionagi.service.third_party.openai_model_names import (
23
- REASONING_MODELS,
24
- is_reasoning_model,
25
- )
26
-
27
- __all__ = (
28
- "OpenaiChatEndpoint",
29
- "OpenaiResponseEndpoint",
30
- "OpenaiEmbedEndpoint",
31
- "OpenrouterChatEndpoint",
32
- "GroqChatEndpoint",
33
- "OPENAI_CHAT_ENDPOINT_CONFIG",
34
- "OPENAI_RESPONSE_ENDPOINT_CONFIG",
35
- "OPENAI_EMBEDDING_ENDPOINT_CONFIG",
36
- "OPENROUTER_CHAT_ENDPOINT_CONFIG",
37
- "OPENROUTER_GEMINI_ENDPOINT_CONFIG",
38
- "GROQ_CHAT_ENDPOINT_CONFIG",
39
- "REASONING_MODELS",
40
- "REASONING_NOT_SUPPORT_PARAMS",
21
+ from lionagi.service.third_party.openai_models import (
22
+ OpenAIChatCompletionsRequest,
41
23
  )
42
24
 
43
25
 
44
- def _get_openai_config(**kwargs):
45
- """Create OpenAI endpoint configuration with defaults."""
46
- config = dict(
47
- name="openai_chat",
48
- provider="openai",
49
- base_url="https://api.openai.com/v1",
50
- endpoint="chat/completions",
51
- kwargs={"model": settings.OPENAI_DEFAULT_MODEL},
52
- api_key=settings.OPENAI_API_KEY or "dummy-key-for-testing",
53
- auth_type="bearer",
54
- content_type="application/json",
55
- method="POST",
56
- requires_tokens=True,
57
- # NOTE: OpenAI models have incorrect role literals, only use for param validation
58
- # request_options=CreateChatCompletionRequest,
59
- )
60
- config.update(kwargs)
26
+ def _get_oai_config(**kw):
27
+ config = {
28
+ "provider": "openai",
29
+ "base_url": "https://api.openai.com/v1",
30
+ "auth_type": "bearer",
31
+ "content_type": "application/json",
32
+ "method": "POST",
33
+ "api_key": settings.OPENAI_API_KEY or "dummy-key-for-testing",
34
+ }
35
+ config.update(kw)
61
36
  return EndpointConfig(**config)
62
37
 
63
38
 
@@ -73,8 +48,7 @@ def _get_openrouter_config(**kwargs):
73
48
  auth_type="bearer",
74
49
  content_type="application/json",
75
50
  method="POST",
76
- # NOTE: OpenRouter uses OpenAI-compatible format
77
- # request_options=CreateChatCompletionRequest,
51
+ request_options=OpenAIChatCompletionsRequest,
78
52
  )
79
53
  config.update(kwargs)
80
54
  return EndpointConfig(**config)
@@ -92,48 +66,21 @@ def _get_groq_config(**kwargs):
92
66
  auth_type="bearer",
93
67
  content_type="application/json",
94
68
  method="POST",
69
+ request_options=OpenAIChatCompletionsRequest,
95
70
  )
96
71
  config.update(kwargs)
97
72
  return EndpointConfig(**config)
98
73
 
99
74
 
100
- # OpenAI endpoints
101
- OPENAI_CHAT_ENDPOINT_CONFIG = _get_openai_config()
102
-
103
- OPENAI_RESPONSE_ENDPOINT_CONFIG = _get_openai_config(
104
- name="openai_response",
105
- endpoint="responses",
106
- )
107
-
108
- OPENAI_EMBEDDING_ENDPOINT_CONFIG = _get_openai_config(
109
- name="openai_embed",
110
- endpoint="embeddings",
111
- kwargs={"model": "text-embedding-3-small"},
112
- )
113
-
114
- # OpenRouter endpoints
115
- OPENROUTER_CHAT_ENDPOINT_CONFIG = _get_openrouter_config()
116
-
117
- OPENROUTER_GEMINI_ENDPOINT_CONFIG = _get_openrouter_config(
118
- name="openrouter_gemini",
119
- kwargs={"model": "google/gemini-2.5-flash"},
120
- )
121
-
122
- # Groq endpoints
123
- GROQ_CHAT_ENDPOINT_CONFIG = _get_groq_config()
124
-
125
- REASONING_NOT_SUPPORT_PARAMS = (
126
- "temperature",
127
- "top_p",
128
- "logit_bias",
129
- "logprobs",
130
- "top_logprobs",
131
- )
132
-
133
-
134
75
  class OpenaiChatEndpoint(Endpoint):
135
76
  def __init__(self, config=None, **kwargs):
136
- config = config or _get_openai_config()
77
+ config = config or _get_oai_config(
78
+ name="oai_chat",
79
+ endpoint="chat/completions",
80
+ request_options=OpenAIChatCompletionsRequest,
81
+ kwargs={"model": settings.OPENAI_DEFAULT_MODEL},
82
+ requires_tokens=True,
83
+ )
137
84
  super().__init__(config, **kwargs)
138
85
 
139
86
  def create_payload(
@@ -146,34 +93,20 @@ class OpenaiChatEndpoint(Endpoint):
146
93
  payload, headers = super().create_payload(
147
94
  request, extra_headers, **kwargs
148
95
  )
149
-
150
- # Handle reasoning models
151
- model = payload.get("model")
152
- if (
153
- model
154
- and is_reasoning_model(model)
155
- and not model.startswith("gpt-5")
156
- ):
157
- # Remove unsupported parameters for reasoning models
158
- for param in REASONING_NOT_SUPPORT_PARAMS:
159
- payload.pop(param, None)
160
-
161
- # Convert system role to developer role for reasoning models
162
- if "messages" in payload and payload["messages"]:
163
- if payload["messages"][0].get("role") == "system":
164
- payload["messages"][0]["role"] = "developer"
165
- else:
166
- # Remove reasoning_effort for non-reasoning models
167
- payload.pop("reasoning_effort", None)
96
+ # Convert system role to developer role for reasoning models
97
+ if "messages" in payload and payload["messages"]:
98
+ if payload["messages"][0].get("role") == "system":
99
+ payload["messages"][0]["role"] = "developer"
168
100
 
169
101
  return (payload, headers)
170
102
 
171
103
 
172
104
  class OpenaiResponseEndpoint(Endpoint):
173
105
  def __init__(self, config=None, **kwargs):
174
- config = config or _get_openai_config(
106
+ config = config or _get_oai_config(
175
107
  name="openai_response",
176
108
  endpoint="responses",
109
+ requires_tokens=True,
177
110
  )
178
111
  super().__init__(config, **kwargs)
179
112
 
@@ -192,9 +125,10 @@ class GroqChatEndpoint(Endpoint):
192
125
 
193
126
  class OpenaiEmbedEndpoint(Endpoint):
194
127
  def __init__(self, config=None, **kwargs):
195
- config = config or _get_openai_config(
128
+ config = config or _get_oai_config(
196
129
  name="openai_embed",
197
130
  endpoint="embeddings",
198
131
  kwargs={"model": "text-embedding-3-small"},
132
+ requires_tokens=True,
199
133
  )
200
134
  super().__init__(config, **kwargs)
@@ -1,5 +1,4 @@
1
- # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
1
+ # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
3
2
  # SPDX-License-Identifier: Apache-2.0
4
3
 
5
4
  """
@@ -25,6 +24,8 @@ _HAS_OLLAMA = is_import_installed("ollama")
25
24
 
26
25
  def _get_ollama_config(**kwargs):
27
26
  """Create Ollama endpoint configuration with defaults."""
27
+ from ...third_party.openai_models import OpenAIChatCompletionsRequest
28
+
28
29
  config = dict(
29
30
  name="ollama_chat",
30
31
  provider="ollama",
@@ -37,8 +38,7 @@ def _get_ollama_config(**kwargs):
37
38
  content_type="application/json",
38
39
  auth_type="none", # No authentication
39
40
  default_headers={}, # No auth headers needed
40
- # NOTE: Not using request_options due to OpenAI model role literal issues
41
- # request_options=CreateChatCompletionRequest,
41
+ request_options=OpenAIChatCompletionsRequest,
42
42
  )
43
43
  config.update(kwargs)
44
44
  return EndpointConfig(**config)
@@ -1,5 +1,4 @@
1
- # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
2
- #
1
+ # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
3
2
  # SPDX-License-Identifier: Apache-2.0
4
3
 
5
4
  """
@@ -1,4 +1,4 @@
1
- # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
1
+ # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
2
2
  # SPDX-License-Identifier: Apache-2.0
3
3
 
4
4
  from ._types import AssosiatedEventInfo, HookDict, HookEventTypes
@@ -1,4 +1,4 @@
1
- # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
1
+ # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
2
2
  # SPDX-License-Identifier: Apache-2.0
3
3
 
4
4
  from __future__ import annotations
@@ -1,4 +1,4 @@
1
- # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
1
+ # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
2
2
  # SPDX-License-Identifier: Apache-2.0
3
3
 
4
4
  from __future__ import annotations
@@ -1,4 +1,4 @@
1
- # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
1
+ # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
2
2
  # SPDX-License-Identifier: Apache-2.0
3
3
 
4
4
  from __future__ import annotations
@@ -1,4 +1,4 @@
1
- # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
1
+ # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
2
2
  # SPDX-License-Identifier: Apache-2.0
3
3
 
4
4
  from __future__ import annotations