lionagi 0.17.11__py3-none-any.whl → 0.18.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (109) hide show
  1. lionagi/_errors.py +0 -5
  2. lionagi/fields.py +83 -0
  3. lionagi/libs/schema/minimal_yaml.py +98 -0
  4. lionagi/ln/__init__.py +3 -1
  5. lionagi/ln/concurrency/primitives.py +4 -4
  6. lionagi/ln/concurrency/task.py +1 -0
  7. lionagi/ln/types.py +32 -5
  8. lionagi/models/field_model.py +21 -4
  9. lionagi/models/hashable_model.py +2 -3
  10. lionagi/operations/ReAct/ReAct.py +475 -238
  11. lionagi/operations/ReAct/utils.py +3 -0
  12. lionagi/operations/act/act.py +206 -0
  13. lionagi/operations/builder.py +5 -7
  14. lionagi/operations/chat/chat.py +130 -114
  15. lionagi/operations/communicate/communicate.py +101 -42
  16. lionagi/operations/fields.py +380 -0
  17. lionagi/operations/flow.py +8 -10
  18. lionagi/operations/interpret/interpret.py +65 -20
  19. lionagi/operations/node.py +4 -4
  20. lionagi/operations/operate/operate.py +216 -108
  21. lionagi/{protocols/operatives → operations/operate}/operative.py +4 -5
  22. lionagi/{protocols/operatives → operations/operate}/step.py +34 -39
  23. lionagi/operations/parse/parse.py +170 -142
  24. lionagi/operations/select/select.py +79 -18
  25. lionagi/operations/select/utils.py +8 -2
  26. lionagi/operations/types.py +119 -23
  27. lionagi/protocols/action/manager.py +5 -6
  28. lionagi/protocols/contracts.py +2 -2
  29. lionagi/protocols/generic/__init__.py +22 -0
  30. lionagi/protocols/generic/element.py +36 -127
  31. lionagi/protocols/generic/log.py +3 -2
  32. lionagi/protocols/generic/pile.py +9 -10
  33. lionagi/protocols/generic/progression.py +23 -22
  34. lionagi/protocols/graph/edge.py +6 -5
  35. lionagi/protocols/ids.py +6 -49
  36. lionagi/protocols/messages/__init__.py +29 -0
  37. lionagi/protocols/messages/action_request.py +86 -184
  38. lionagi/protocols/messages/action_response.py +73 -131
  39. lionagi/protocols/messages/assistant_response.py +130 -159
  40. lionagi/protocols/messages/base.py +31 -22
  41. lionagi/protocols/messages/instruction.py +280 -625
  42. lionagi/protocols/messages/manager.py +112 -62
  43. lionagi/protocols/messages/message.py +87 -197
  44. lionagi/protocols/messages/system.py +52 -123
  45. lionagi/protocols/types.py +1 -13
  46. lionagi/service/connections/__init__.py +3 -0
  47. lionagi/service/connections/endpoint.py +0 -8
  48. lionagi/service/connections/providers/claude_code_cli.py +3 -2
  49. lionagi/service/connections/providers/oai_.py +29 -94
  50. lionagi/service/connections/providers/ollama_.py +3 -2
  51. lionagi/service/hooks/_types.py +1 -1
  52. lionagi/service/hooks/_utils.py +1 -1
  53. lionagi/service/hooks/hook_event.py +3 -8
  54. lionagi/service/hooks/hook_registry.py +5 -5
  55. lionagi/service/hooks/hooked_event.py +63 -3
  56. lionagi/service/imodel.py +24 -20
  57. lionagi/service/third_party/claude_code.py +3 -3
  58. lionagi/service/third_party/openai_models.py +435 -0
  59. lionagi/service/token_calculator.py +1 -94
  60. lionagi/session/branch.py +190 -400
  61. lionagi/session/session.py +8 -99
  62. lionagi/tools/file/reader.py +2 -2
  63. lionagi/version.py +1 -1
  64. {lionagi-0.17.11.dist-info → lionagi-0.18.1.dist-info}/METADATA +6 -6
  65. lionagi-0.18.1.dist-info/RECORD +164 -0
  66. lionagi/fields/__init__.py +0 -47
  67. lionagi/fields/action.py +0 -188
  68. lionagi/fields/base.py +0 -153
  69. lionagi/fields/code.py +0 -239
  70. lionagi/fields/file.py +0 -234
  71. lionagi/fields/instruct.py +0 -135
  72. lionagi/fields/reason.py +0 -55
  73. lionagi/fields/research.py +0 -52
  74. lionagi/operations/_act/act.py +0 -86
  75. lionagi/operations/brainstorm/__init__.py +0 -2
  76. lionagi/operations/brainstorm/brainstorm.py +0 -498
  77. lionagi/operations/brainstorm/prompt.py +0 -11
  78. lionagi/operations/instruct/__init__.py +0 -2
  79. lionagi/operations/instruct/instruct.py +0 -28
  80. lionagi/operations/plan/__init__.py +0 -6
  81. lionagi/operations/plan/plan.py +0 -386
  82. lionagi/operations/plan/prompt.py +0 -25
  83. lionagi/operations/utils.py +0 -45
  84. lionagi/protocols/forms/__init__.py +0 -2
  85. lionagi/protocols/forms/base.py +0 -85
  86. lionagi/protocols/forms/flow.py +0 -79
  87. lionagi/protocols/forms/form.py +0 -86
  88. lionagi/protocols/forms/report.py +0 -48
  89. lionagi/protocols/mail/__init__.py +0 -2
  90. lionagi/protocols/mail/exchange.py +0 -220
  91. lionagi/protocols/mail/mail.py +0 -51
  92. lionagi/protocols/mail/mailbox.py +0 -103
  93. lionagi/protocols/mail/manager.py +0 -218
  94. lionagi/protocols/mail/package.py +0 -101
  95. lionagi/protocols/messages/templates/README.md +0 -28
  96. lionagi/protocols/messages/templates/action_request.jinja2 +0 -5
  97. lionagi/protocols/messages/templates/action_response.jinja2 +0 -9
  98. lionagi/protocols/messages/templates/assistant_response.jinja2 +0 -6
  99. lionagi/protocols/messages/templates/instruction_message.jinja2 +0 -61
  100. lionagi/protocols/messages/templates/system_message.jinja2 +0 -11
  101. lionagi/protocols/messages/templates/tool_schemas.jinja2 +0 -7
  102. lionagi/protocols/operatives/__init__.py +0 -2
  103. lionagi/service/connections/providers/types.py +0 -28
  104. lionagi/service/third_party/openai_model_names.py +0 -198
  105. lionagi/service/types.py +0 -58
  106. lionagi-0.17.11.dist-info/RECORD +0 -199
  107. /lionagi/operations/{_act → act}/__init__.py +0 -0
  108. {lionagi-0.17.11.dist-info → lionagi-0.18.1.dist-info}/WHEEL +0 -0
  109. {lionagi-0.17.11.dist-info → lionagi-0.18.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,144 +1,73 @@
1
1
  # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
2
2
  # SPDX-License-Identifier: Apache-2.0
3
3
 
4
+ from dataclasses import dataclass
4
5
  from datetime import datetime
5
- from typing import Any, NoReturn
6
+ from typing import Any
6
7
 
7
- from pydantic import JsonValue
8
- from typing_extensions import Self, override
8
+ from pydantic import Field, field_validator
9
9
 
10
10
  from .base import SenderRecipient
11
- from .message import MessageRole, RoledMessage, Template, jinja_env
11
+ from .message import MessageContent, MessageRole, RoledMessage
12
12
 
13
- __all__ = ("System",)
14
13
 
14
+ @dataclass(slots=True)
15
+ class SystemContent(MessageContent):
16
+ """Content for system messages.
15
17
 
16
- def format_system_content(
17
- system_datetime: bool | str | None,
18
- system_message: str,
19
- ) -> dict:
18
+ Fields:
19
+ system_message: Main system instruction text
20
+ system_datetime: Optional datetime string
20
21
  """
21
- Insert optional datetime string into the system message content.
22
22
 
23
- Args:
24
- system_datetime (bool|str|None):
25
- If True, embed current time. If str, use as time. If None, omit.
26
- system_message (str):
27
- The main system message text.
28
-
29
- Returns:
30
- dict: The combined system content.
31
- """
32
- content: dict = {"system_message": system_message}
33
- if system_datetime:
34
- if isinstance(system_datetime, str):
35
- content["system_datetime"] = system_datetime
36
- else:
37
- content["system_datetime"] = datetime.now().isoformat(
38
- timespec="minutes"
39
- )
40
- return content
41
-
42
-
43
- class System(RoledMessage):
44
- """
45
- A specialized message that sets a *system-level* context or policy.
46
- Usually the first in a conversation, instructing the AI about general
47
- constraints or identity.
48
- """
49
-
50
- template: str | Template | None = jinja_env.get_template(
51
- "system_message.jinja2"
23
+ system_message: str = (
24
+ "You are a helpful AI assistant. Let's think step by step."
52
25
  )
26
+ system_datetime: str | None = None
53
27
 
54
- @override
55
- @classmethod
56
- def create(
57
- cls,
58
- system_message="You are a helpful AI assistant. Let's think step by step.",
59
- system_datetime=None,
60
- sender: SenderRecipient = None,
61
- recipient: SenderRecipient = None,
62
- template=None,
63
- system: Any = None,
64
- **kwargs,
65
- ) -> Self:
66
- """
67
- Construct a system message with optional datetime annotation.
68
-
69
- Args:
70
- system_message (str):
71
- The main text instructing the AI about behavior/identity.
72
- system_datetime (bool|str, optional):
73
- If True or str, embed a time reference. If str, it is used directly.
74
- sender (SenderRecipient, optional):
75
- Typically `MessageRole.SYSTEM`.
76
- recipient (SenderRecipient, optional):
77
- Typically `MessageRole.ASSISTANT`.
78
- template (Template|str|None):
79
- An optional custom template for rendering.
80
- system (Any):
81
- Alias for `system_message` (deprecated).
82
- **kwargs:
83
- Additional content merged into the final dict.
28
+ @property
29
+ def rendered(self) -> str:
30
+ """Render system message with optional datetime."""
31
+ parts = []
32
+ if self.system_datetime:
33
+ parts.append(f"System Time: {self.system_datetime}")
34
+ parts.append(self.system_message)
35
+ return "\n\n".join(parts)
84
36
 
85
- Returns:
86
- System: A newly created system-level message.
87
- """
88
- if system and system_message:
89
- raise ValueError(
90
- "Cannot provide both system and system_message arguments."
91
- "as they are alias, and `system` is deprecated"
92
- )
93
- system_message = system_message or system
94
-
95
- content = format_system_content(
96
- system_datetime=system_datetime, system_message=system_message
37
+ @classmethod
38
+ def from_dict(cls, data: dict[str, Any]) -> "SystemContent":
39
+ """Construct SystemContent from dictionary."""
40
+ system_message = data.get(
41
+ "system_message",
42
+ cls.__dataclass_fields__["system_message"].default,
97
43
  )
98
- content.update(kwargs)
99
- params = {
100
- "role": MessageRole.SYSTEM,
101
- "content": content,
102
- "sender": sender or MessageRole.SYSTEM,
103
- "recipient": recipient or MessageRole.ASSISTANT,
104
- }
105
- if template:
106
- params["template"] = template
107
- return cls(**params)
44
+ system_datetime = data.get("system_datetime")
108
45
 
109
- def update(
110
- self,
111
- system_message: JsonValue = None,
112
- sender: SenderRecipient = None,
113
- recipient: SenderRecipient = None,
114
- system_datetime: bool | str = None,
115
- template: Template | str | None = None,
116
- **kwargs,
117
- ) -> NoReturn:
118
- """
119
- Adjust fields of this system message.
46
+ # Handle datetime generation
47
+ if system_datetime is True:
48
+ system_datetime = datetime.now().isoformat(timespec="minutes")
49
+ elif system_datetime is False or system_datetime is None:
50
+ system_datetime = None
120
51
 
121
- Args:
122
- system_message (JsonValue):
123
- New system message text.
124
- sender (SenderRecipient):
125
- Updated sender or role.
126
- recipient (SenderRecipient):
127
- Updated recipient or role.
128
- system_datetime (bool|str):
129
- If set, embed new datetime info.
130
- template (Template|str|None):
131
- New template override.
132
- **kwargs:
133
- Additional fields for self.content.
134
- """
135
- if any([system_message, system_message]):
136
- self.content = format_system_content(
137
- system_datetime=system_datetime, system_message=system_message
138
- )
139
- super().update(
140
- sender=sender, recipient=recipient, template=template, **kwargs
52
+ return cls(
53
+ system_message=system_message, system_datetime=system_datetime
141
54
  )
142
55
 
143
56
 
144
- # File: lionagi/protocols/messages/system.py
57
+ class System(RoledMessage):
58
+ """System-level message setting context or policy for the conversation."""
59
+
60
+ role: MessageRole = MessageRole.SYSTEM
61
+ content: SystemContent = Field(default_factory=SystemContent)
62
+ sender: SenderRecipient | None = MessageRole.SYSTEM
63
+ recipient: SenderRecipient | None = MessageRole.ASSISTANT
64
+
65
+ @field_validator("content", mode="before")
66
+ def _validate_content(cls, v):
67
+ if v is None:
68
+ return SystemContent()
69
+ if isinstance(v, dict):
70
+ return SystemContent.from_dict(v)
71
+ if isinstance(v, SystemContent):
72
+ return v
73
+ raise TypeError("content must be dict or SystemContent instance")
@@ -5,7 +5,7 @@ from ._concepts import Collective, Communicatable, Condition, Manager
5
5
  from ._concepts import Observable as LegacyObservable
6
6
  from ._concepts import Observer, Ordering, Relational, Sendable
7
7
  from .contracts import Observable, ObservableProto
8
- from .generic.element import ID, Element, IDError, IDType, validate_order
8
+ from .generic.element import ID, Element, validate_order
9
9
  from .generic.event import Event, EventStatus, Execution
10
10
  from .generic.log import (
11
11
  DataLogger,
@@ -20,12 +20,9 @@ from .generic.progression import Progression, prog
20
20
  from .graph.edge import EdgeCondition
21
21
  from .graph.graph import Edge, Graph, Node
22
22
  from .ids import canonical_id, to_uuid
23
- from .mail.exchange import Exchange, Mail, Mailbox, Package, PackageCategory
24
- from .mail.manager import MailManager
25
23
  from .messages.base import (
26
24
  MESSAGE_FIELDS,
27
25
  MessageField,
28
- MessageFlag,
29
26
  MessageRole,
30
27
  validate_sender_recipient,
31
28
  )
@@ -56,8 +53,6 @@ __all__ = (
56
53
  "to_uuid", # ID conversion utility
57
54
  "ID",
58
55
  "Element",
59
- "IDError",
60
- "IDType",
61
56
  "validate_order",
62
57
  "Event",
63
58
  "EventStatus",
@@ -75,14 +70,8 @@ __all__ = (
75
70
  "Edge",
76
71
  "Graph",
77
72
  "Node",
78
- "Exchange",
79
- "Mail",
80
- "Mailbox",
81
- "Package",
82
- "PackageCategory",
83
73
  "MESSAGE_FIELDS",
84
74
  "MessageField",
85
- "MessageFlag",
86
75
  "MessageRole",
87
76
  "validate_sender_recipient",
88
77
  "ActionRequest",
@@ -93,7 +82,6 @@ __all__ = (
93
82
  "RoledMessage",
94
83
  "SenderRecipient",
95
84
  "System",
96
- "MailManager",
97
85
  "DataLogger",
98
86
  "DataLoggerConfig",
99
87
  )
@@ -1,6 +1,7 @@
1
1
  # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
2
2
  # SPDX-License-Identifier: Apache-2.0
3
3
 
4
+ from .api_calling import APICalling
4
5
  from .endpoint import Endpoint
5
6
  from .endpoint_config import EndpointConfig
6
7
  from .header_factory import HeaderFactory
@@ -11,4 +12,6 @@ __all__ = (
11
12
  "EndpointConfig",
12
13
  "HeaderFactory",
13
14
  "match_endpoint",
15
+ "APICalling",
16
+ "match_endpoint",
14
17
  )
@@ -122,10 +122,7 @@ class Endpoint:
122
122
  # Validate the filtered payload
123
123
  payload = self.config.validate_payload(filtered_payload)
124
124
  else:
125
- # If no request_options, we still need to remove obvious non-API params
126
- # These are parameters that are never part of any API payload
127
125
  non_api_params = {
128
- "task",
129
126
  "provider",
130
127
  "base_url",
131
128
  "endpoint",
@@ -133,13 +130,9 @@ class Endpoint:
133
130
  "api_key",
134
131
  "queue_capacity",
135
132
  "capacity_refresh_time",
136
- "interval",
137
- "limit_requests",
138
- "limit_tokens",
139
133
  "invoke_with_endpoint",
140
134
  "extra_headers",
141
135
  "headers",
142
- "cache_control",
143
136
  "include_token_usage_to_model",
144
137
  "chat_model",
145
138
  "imodel",
@@ -148,7 +141,6 @@ class Endpoint:
148
141
  "aggregation_count",
149
142
  "action_strategy",
150
143
  "parse_model",
151
- "reason",
152
144
  "actions",
153
145
  "return_operative",
154
146
  "operative_model",
@@ -84,8 +84,9 @@ class ClaudeCodeCLIEndpoint(Endpoint):
84
84
  async def stream(
85
85
  self, request: dict | BaseModel, **kwargs
86
86
  ) -> AsyncIterator[ClaudeChunk | dict | ClaudeSession]:
87
- payload, _ = self.create_payload(request, **kwargs)["request"]
88
- async for chunk in stream_claude_code_cli(payload):
87
+ payload, _ = self.create_payload(request, **kwargs)
88
+ request_obj = payload["request"]
89
+ async for chunk in stream_claude_code_cli(request_obj):
89
90
  yield chunk
90
91
 
91
92
  async def _call(
@@ -18,45 +18,21 @@ from pydantic import BaseModel
18
18
  from lionagi.config import settings
19
19
  from lionagi.service.connections.endpoint import Endpoint
20
20
  from lionagi.service.connections.endpoint_config import EndpointConfig
21
- from lionagi.service.third_party.openai_model_names import (
22
- REASONING_MODELS,
23
- is_reasoning_model,
24
- )
25
-
26
- __all__ = (
27
- "OpenaiChatEndpoint",
28
- "OpenaiResponseEndpoint",
29
- "OpenaiEmbedEndpoint",
30
- "OpenrouterChatEndpoint",
31
- "GroqChatEndpoint",
32
- "OPENAI_CHAT_ENDPOINT_CONFIG",
33
- "OPENAI_RESPONSE_ENDPOINT_CONFIG",
34
- "OPENAI_EMBEDDING_ENDPOINT_CONFIG",
35
- "OPENROUTER_CHAT_ENDPOINT_CONFIG",
36
- "OPENROUTER_GEMINI_ENDPOINT_CONFIG",
37
- "GROQ_CHAT_ENDPOINT_CONFIG",
38
- "REASONING_MODELS",
39
- "REASONING_NOT_SUPPORT_PARAMS",
21
+ from lionagi.service.third_party.openai_models import (
22
+ OpenAIChatCompletionsRequest,
40
23
  )
41
24
 
42
25
 
43
- def _get_openai_config(**kwargs):
44
- """Create OpenAI endpoint configuration with defaults."""
45
- config = dict(
46
- name="openai_chat",
47
- provider="openai",
48
- base_url="https://api.openai.com/v1",
49
- endpoint="chat/completions",
50
- kwargs={"model": settings.OPENAI_DEFAULT_MODEL},
51
- api_key=settings.OPENAI_API_KEY or "dummy-key-for-testing",
52
- auth_type="bearer",
53
- content_type="application/json",
54
- method="POST",
55
- requires_tokens=True,
56
- # NOTE: OpenAI models have incorrect role literals, only use for param validation
57
- # request_options=CreateChatCompletionRequest,
58
- )
59
- config.update(kwargs)
26
+ def _get_oai_config(**kw):
27
+ config = {
28
+ "provider": "openai",
29
+ "base_url": "https://api.openai.com/v1",
30
+ "auth_type": "bearer",
31
+ "content_type": "application/json",
32
+ "method": "POST",
33
+ "api_key": settings.OPENAI_API_KEY or "dummy-key-for-testing",
34
+ }
35
+ config.update(kw)
60
36
  return EndpointConfig(**config)
61
37
 
62
38
 
@@ -72,8 +48,7 @@ def _get_openrouter_config(**kwargs):
72
48
  auth_type="bearer",
73
49
  content_type="application/json",
74
50
  method="POST",
75
- # NOTE: OpenRouter uses OpenAI-compatible format
76
- # request_options=CreateChatCompletionRequest,
51
+ request_options=OpenAIChatCompletionsRequest,
77
52
  )
78
53
  config.update(kwargs)
79
54
  return EndpointConfig(**config)
@@ -91,48 +66,21 @@ def _get_groq_config(**kwargs):
91
66
  auth_type="bearer",
92
67
  content_type="application/json",
93
68
  method="POST",
69
+ request_options=OpenAIChatCompletionsRequest,
94
70
  )
95
71
  config.update(kwargs)
96
72
  return EndpointConfig(**config)
97
73
 
98
74
 
99
- # OpenAI endpoints
100
- OPENAI_CHAT_ENDPOINT_CONFIG = _get_openai_config()
101
-
102
- OPENAI_RESPONSE_ENDPOINT_CONFIG = _get_openai_config(
103
- name="openai_response",
104
- endpoint="responses",
105
- )
106
-
107
- OPENAI_EMBEDDING_ENDPOINT_CONFIG = _get_openai_config(
108
- name="openai_embed",
109
- endpoint="embeddings",
110
- kwargs={"model": "text-embedding-3-small"},
111
- )
112
-
113
- # OpenRouter endpoints
114
- OPENROUTER_CHAT_ENDPOINT_CONFIG = _get_openrouter_config()
115
-
116
- OPENROUTER_GEMINI_ENDPOINT_CONFIG = _get_openrouter_config(
117
- name="openrouter_gemini",
118
- kwargs={"model": "google/gemini-2.5-flash"},
119
- )
120
-
121
- # Groq endpoints
122
- GROQ_CHAT_ENDPOINT_CONFIG = _get_groq_config()
123
-
124
- REASONING_NOT_SUPPORT_PARAMS = (
125
- "temperature",
126
- "top_p",
127
- "logit_bias",
128
- "logprobs",
129
- "top_logprobs",
130
- )
131
-
132
-
133
75
  class OpenaiChatEndpoint(Endpoint):
134
76
  def __init__(self, config=None, **kwargs):
135
- config = config or _get_openai_config()
77
+ config = config or _get_oai_config(
78
+ name="oai_chat",
79
+ endpoint="chat/completions",
80
+ request_options=OpenAIChatCompletionsRequest,
81
+ kwargs={"model": settings.OPENAI_DEFAULT_MODEL},
82
+ requires_tokens=True,
83
+ )
136
84
  super().__init__(config, **kwargs)
137
85
 
138
86
  def create_payload(
@@ -145,34 +93,20 @@ class OpenaiChatEndpoint(Endpoint):
145
93
  payload, headers = super().create_payload(
146
94
  request, extra_headers, **kwargs
147
95
  )
148
-
149
- # Handle reasoning models
150
- model = payload.get("model")
151
- if (
152
- model
153
- and is_reasoning_model(model)
154
- and not model.startswith("gpt-5")
155
- ):
156
- # Remove unsupported parameters for reasoning models
157
- for param in REASONING_NOT_SUPPORT_PARAMS:
158
- payload.pop(param, None)
159
-
160
- # Convert system role to developer role for reasoning models
161
- if "messages" in payload and payload["messages"]:
162
- if payload["messages"][0].get("role") == "system":
163
- payload["messages"][0]["role"] = "developer"
164
- else:
165
- # Remove reasoning_effort for non-reasoning models
166
- payload.pop("reasoning_effort", None)
96
+ # Convert system role to developer role for reasoning models
97
+ if "messages" in payload and payload["messages"]:
98
+ if payload["messages"][0].get("role") == "system":
99
+ payload["messages"][0]["role"] = "developer"
167
100
 
168
101
  return (payload, headers)
169
102
 
170
103
 
171
104
  class OpenaiResponseEndpoint(Endpoint):
172
105
  def __init__(self, config=None, **kwargs):
173
- config = config or _get_openai_config(
106
+ config = config or _get_oai_config(
174
107
  name="openai_response",
175
108
  endpoint="responses",
109
+ requires_tokens=True,
176
110
  )
177
111
  super().__init__(config, **kwargs)
178
112
 
@@ -191,9 +125,10 @@ class GroqChatEndpoint(Endpoint):
191
125
 
192
126
  class OpenaiEmbedEndpoint(Endpoint):
193
127
  def __init__(self, config=None, **kwargs):
194
- config = config or _get_openai_config(
128
+ config = config or _get_oai_config(
195
129
  name="openai_embed",
196
130
  endpoint="embeddings",
197
131
  kwargs={"model": "text-embedding-3-small"},
132
+ requires_tokens=True,
198
133
  )
199
134
  super().__init__(config, **kwargs)
@@ -24,6 +24,8 @@ _HAS_OLLAMA = is_import_installed("ollama")
24
24
 
25
25
  def _get_ollama_config(**kwargs):
26
26
  """Create Ollama endpoint configuration with defaults."""
27
+ from ...third_party.openai_models import OpenAIChatCompletionsRequest
28
+
27
29
  config = dict(
28
30
  name="ollama_chat",
29
31
  provider="ollama",
@@ -36,8 +38,7 @@ def _get_ollama_config(**kwargs):
36
38
  content_type="application/json",
37
39
  auth_type="none", # No authentication
38
40
  default_headers={}, # No auth headers needed
39
- # NOTE: Not using request_options due to OpenAI model role literal issues
40
- # request_options=CreateChatCompletionRequest,
41
+ request_options=OpenAIChatCompletionsRequest,
41
42
  )
42
43
  config.update(kwargs)
43
44
  return EndpointConfig(**config)
@@ -8,7 +8,7 @@ from typing import TypeVar
8
8
 
9
9
  from typing_extensions import TypedDict
10
10
 
11
- from lionagi.utils import Enum
11
+ from lionagi.ln.types import Enum
12
12
 
13
13
  SC = TypeVar("SC") # streaming chunk type
14
14
 
@@ -6,7 +6,7 @@ from __future__ import annotations
6
6
  from anyio import sleep
7
7
 
8
8
  from lionagi._errors import ValidationError
9
- from lionagi.utils import is_coro_func
9
+ from lionagi.ln import is_coro_func
10
10
 
11
11
  from ._types import ALLOWED_HOOKS_TYPES, HookEventTypes
12
12
 
@@ -3,7 +3,7 @@
3
3
 
4
4
  from __future__ import annotations
5
5
 
6
- from typing import TYPE_CHECKING, Any
6
+ from typing import Any
7
7
 
8
8
  import anyio
9
9
  from pydantic import Field, PrivateAttr, field_validator
@@ -12,19 +12,14 @@ from lionagi.ln.concurrency import fail_after, get_cancelled_exc_class
12
12
  from lionagi.protocols.types import Event, EventStatus
13
13
 
14
14
  from ._types import AssosiatedEventInfo, HookEventTypes
15
-
16
- if TYPE_CHECKING:
17
- from .hook_registry import HookRegistry
18
- else:
19
- # Import at runtime for Pydantic
20
- from .hook_registry import HookRegistry
15
+ from .hook_registry import HookRegistry
21
16
 
22
17
 
23
18
  class HookEvent(Event):
24
19
  registry: HookRegistry = Field(..., exclude=True)
25
20
  hook_type: HookEventTypes
26
21
  exit: bool = Field(False, exclude=True)
27
- timeout: int = Field(30, exclude=True)
22
+ timeout: int | float = Field(30, exclude=True)
28
23
  params: dict[str, Any] = Field(default_factory=dict, exclude=True)
29
24
  event_like: Event | type[Event] = Field(..., exclude=True)
30
25
  _should_exit: bool = PrivateAttr(False)
@@ -6,8 +6,8 @@ from __future__ import annotations
6
6
  from typing import Any, TypeVar
7
7
 
8
8
  from lionagi.ln.concurrency import get_cancelled_exc_class
9
+ from lionagi.ln.types import Undefined
9
10
  from lionagi.protocols.types import Event, EventStatus
10
- from lionagi.utils import UNDEFINED
11
11
 
12
12
  from ._types import HookDict, HookEventTypes, StreamHandlers
13
13
  from ._utils import get_handler, validate_hooks, validate_stream_handlers
@@ -99,7 +99,7 @@ class HookRegistry:
99
99
  )
100
100
  return (res, False, EventStatus.COMPLETED)
101
101
  except get_cancelled_exc_class() as e:
102
- return ((UNDEFINED, e), True, EventStatus.CANCELLED)
102
+ return ((Undefined, e), True, EventStatus.CANCELLED)
103
103
  except Exception as e:
104
104
  return (e, exit, EventStatus.CANCELLED)
105
105
 
@@ -125,7 +125,7 @@ class HookRegistry:
125
125
  )
126
126
  return (res, False, EventStatus.COMPLETED)
127
127
  except get_cancelled_exc_class() as e:
128
- return ((UNDEFINED, e), True, EventStatus.CANCELLED)
128
+ return ((Undefined, e), True, EventStatus.CANCELLED)
129
129
  except Exception as e:
130
130
  return (e, exit, EventStatus.CANCELLED)
131
131
 
@@ -147,7 +147,7 @@ class HookRegistry:
147
147
  )
148
148
  return (res, False, EventStatus.COMPLETED)
149
149
  except get_cancelled_exc_class() as e:
150
- return ((UNDEFINED, e), True, EventStatus.CANCELLED)
150
+ return ((Undefined, e), True, EventStatus.CANCELLED)
151
151
  except Exception as e:
152
152
  return (e, exit, EventStatus.ABORTED)
153
153
 
@@ -171,7 +171,7 @@ class HookRegistry:
171
171
  )
172
172
  return (res, False, None)
173
173
  except get_cancelled_exc_class() as e:
174
- return ((UNDEFINED, e), True, EventStatus.CANCELLED)
174
+ return ((Undefined, e), True, EventStatus.CANCELLED)
175
175
  except Exception as e:
176
176
  return (e, exit, EventStatus.ABORTED)
177
177