lionagi 0.17.11__py3-none-any.whl → 0.18.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lionagi/_errors.py +0 -5
- lionagi/fields.py +83 -0
- lionagi/libs/schema/minimal_yaml.py +98 -0
- lionagi/ln/__init__.py +3 -1
- lionagi/ln/concurrency/primitives.py +4 -4
- lionagi/ln/concurrency/task.py +1 -0
- lionagi/ln/types.py +32 -5
- lionagi/models/field_model.py +21 -4
- lionagi/models/hashable_model.py +2 -3
- lionagi/operations/ReAct/ReAct.py +475 -238
- lionagi/operations/ReAct/utils.py +3 -0
- lionagi/operations/act/act.py +206 -0
- lionagi/operations/builder.py +5 -7
- lionagi/operations/chat/chat.py +130 -114
- lionagi/operations/communicate/communicate.py +101 -42
- lionagi/operations/fields.py +380 -0
- lionagi/operations/flow.py +8 -10
- lionagi/operations/interpret/interpret.py +65 -20
- lionagi/operations/node.py +4 -4
- lionagi/operations/operate/operate.py +216 -108
- lionagi/{protocols/operatives → operations/operate}/operative.py +4 -5
- lionagi/{protocols/operatives → operations/operate}/step.py +34 -39
- lionagi/operations/parse/parse.py +170 -142
- lionagi/operations/select/select.py +79 -18
- lionagi/operations/select/utils.py +8 -2
- lionagi/operations/types.py +119 -23
- lionagi/protocols/action/manager.py +5 -6
- lionagi/protocols/contracts.py +2 -2
- lionagi/protocols/generic/__init__.py +22 -0
- lionagi/protocols/generic/element.py +36 -127
- lionagi/protocols/generic/log.py +3 -2
- lionagi/protocols/generic/pile.py +9 -10
- lionagi/protocols/generic/progression.py +23 -22
- lionagi/protocols/graph/edge.py +6 -5
- lionagi/protocols/ids.py +6 -49
- lionagi/protocols/messages/__init__.py +29 -0
- lionagi/protocols/messages/action_request.py +86 -184
- lionagi/protocols/messages/action_response.py +73 -131
- lionagi/protocols/messages/assistant_response.py +130 -159
- lionagi/protocols/messages/base.py +31 -22
- lionagi/protocols/messages/instruction.py +280 -625
- lionagi/protocols/messages/manager.py +112 -62
- lionagi/protocols/messages/message.py +87 -197
- lionagi/protocols/messages/system.py +52 -123
- lionagi/protocols/types.py +1 -13
- lionagi/service/connections/__init__.py +3 -0
- lionagi/service/connections/endpoint.py +0 -8
- lionagi/service/connections/providers/claude_code_cli.py +3 -2
- lionagi/service/connections/providers/oai_.py +29 -94
- lionagi/service/connections/providers/ollama_.py +3 -2
- lionagi/service/hooks/_types.py +1 -1
- lionagi/service/hooks/_utils.py +1 -1
- lionagi/service/hooks/hook_event.py +3 -8
- lionagi/service/hooks/hook_registry.py +5 -5
- lionagi/service/hooks/hooked_event.py +63 -3
- lionagi/service/imodel.py +24 -20
- lionagi/service/third_party/claude_code.py +3 -3
- lionagi/service/third_party/openai_models.py +435 -0
- lionagi/service/token_calculator.py +1 -94
- lionagi/session/branch.py +190 -400
- lionagi/session/session.py +8 -99
- lionagi/tools/file/reader.py +2 -2
- lionagi/version.py +1 -1
- {lionagi-0.17.11.dist-info → lionagi-0.18.1.dist-info}/METADATA +6 -6
- lionagi-0.18.1.dist-info/RECORD +164 -0
- lionagi/fields/__init__.py +0 -47
- lionagi/fields/action.py +0 -188
- lionagi/fields/base.py +0 -153
- lionagi/fields/code.py +0 -239
- lionagi/fields/file.py +0 -234
- lionagi/fields/instruct.py +0 -135
- lionagi/fields/reason.py +0 -55
- lionagi/fields/research.py +0 -52
- lionagi/operations/_act/act.py +0 -86
- lionagi/operations/brainstorm/__init__.py +0 -2
- lionagi/operations/brainstorm/brainstorm.py +0 -498
- lionagi/operations/brainstorm/prompt.py +0 -11
- lionagi/operations/instruct/__init__.py +0 -2
- lionagi/operations/instruct/instruct.py +0 -28
- lionagi/operations/plan/__init__.py +0 -6
- lionagi/operations/plan/plan.py +0 -386
- lionagi/operations/plan/prompt.py +0 -25
- lionagi/operations/utils.py +0 -45
- lionagi/protocols/forms/__init__.py +0 -2
- lionagi/protocols/forms/base.py +0 -85
- lionagi/protocols/forms/flow.py +0 -79
- lionagi/protocols/forms/form.py +0 -86
- lionagi/protocols/forms/report.py +0 -48
- lionagi/protocols/mail/__init__.py +0 -2
- lionagi/protocols/mail/exchange.py +0 -220
- lionagi/protocols/mail/mail.py +0 -51
- lionagi/protocols/mail/mailbox.py +0 -103
- lionagi/protocols/mail/manager.py +0 -218
- lionagi/protocols/mail/package.py +0 -101
- lionagi/protocols/messages/templates/README.md +0 -28
- lionagi/protocols/messages/templates/action_request.jinja2 +0 -5
- lionagi/protocols/messages/templates/action_response.jinja2 +0 -9
- lionagi/protocols/messages/templates/assistant_response.jinja2 +0 -6
- lionagi/protocols/messages/templates/instruction_message.jinja2 +0 -61
- lionagi/protocols/messages/templates/system_message.jinja2 +0 -11
- lionagi/protocols/messages/templates/tool_schemas.jinja2 +0 -7
- lionagi/protocols/operatives/__init__.py +0 -2
- lionagi/service/connections/providers/types.py +0 -28
- lionagi/service/third_party/openai_model_names.py +0 -198
- lionagi/service/types.py +0 -58
- lionagi-0.17.11.dist-info/RECORD +0 -199
- /lionagi/operations/{_act → act}/__init__.py +0 -0
- {lionagi-0.17.11.dist-info → lionagi-0.18.1.dist-info}/WHEEL +0 -0
- {lionagi-0.17.11.dist-info → lionagi-0.18.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -5,7 +5,7 @@ import anyio
|
|
|
5
5
|
from pydantic import PrivateAttr
|
|
6
6
|
|
|
7
7
|
from lionagi.ln import get_cancelled_exc_class
|
|
8
|
-
from lionagi.protocols.types import DataLogger, Event, EventStatus
|
|
8
|
+
from lionagi.protocols.types import DataLogger, Event, EventStatus
|
|
9
9
|
from lionagi.service.hooks import HookEvent, HookEventTypes
|
|
10
10
|
|
|
11
11
|
global_hook_logger = DataLogger(
|
|
@@ -43,21 +43,44 @@ class HookedEvent(Event):
|
|
|
43
43
|
self.execution.status = EventStatus.PROCESSING
|
|
44
44
|
if h_ev := self._pre_invoke_hook_event:
|
|
45
45
|
await h_ev.invoke()
|
|
46
|
+
|
|
47
|
+
# Check if hook failed or was cancelled - propagate to main event
|
|
48
|
+
if h_ev.execution.status in (
|
|
49
|
+
EventStatus.FAILED,
|
|
50
|
+
EventStatus.CANCELLED,
|
|
51
|
+
):
|
|
52
|
+
self.execution.status = h_ev.execution.status
|
|
53
|
+
self.execution.error = f"Pre-invoke hook {h_ev.execution.status.value}: {h_ev.execution.error}"
|
|
54
|
+
return
|
|
55
|
+
|
|
46
56
|
if h_ev._should_exit:
|
|
47
57
|
raise h_ev._exit_cause or RuntimeError(
|
|
48
58
|
"Pre-invocation hook requested exit without a cause"
|
|
49
59
|
)
|
|
50
|
-
await global_hook_logger.alog(
|
|
60
|
+
await global_hook_logger.alog(h_ev)
|
|
51
61
|
|
|
52
62
|
response = await self._invoke()
|
|
53
63
|
|
|
54
64
|
if h_ev := self._post_invoke_hook_event:
|
|
55
65
|
await h_ev.invoke()
|
|
66
|
+
|
|
67
|
+
# Check if hook failed or was cancelled - propagate to main event
|
|
68
|
+
if h_ev.execution.status in (
|
|
69
|
+
EventStatus.FAILED,
|
|
70
|
+
EventStatus.CANCELLED,
|
|
71
|
+
):
|
|
72
|
+
self.execution.status = h_ev.execution.status
|
|
73
|
+
self.execution.error = f"Post-invoke hook {h_ev.execution.status.value}: {h_ev.execution.error}"
|
|
74
|
+
self.execution.response = (
|
|
75
|
+
response # Keep response even if hook failed
|
|
76
|
+
)
|
|
77
|
+
return
|
|
78
|
+
|
|
56
79
|
if h_ev._should_exit:
|
|
57
80
|
raise h_ev._exit_cause or RuntimeError(
|
|
58
81
|
"Post-invocation hook requested exit without a cause"
|
|
59
82
|
)
|
|
60
|
-
await global_hook_logger.alog(
|
|
83
|
+
await global_hook_logger.alog(h_ev)
|
|
61
84
|
|
|
62
85
|
self.execution.response = response
|
|
63
86
|
self.execution.status = EventStatus.COMPLETED
|
|
@@ -87,10 +110,47 @@ class HookedEvent(Event):
|
|
|
87
110
|
try:
|
|
88
111
|
self.execution.status = EventStatus.PROCESSING
|
|
89
112
|
|
|
113
|
+
# Execute pre-invoke hook if present
|
|
114
|
+
if h_ev := self._pre_invoke_hook_event:
|
|
115
|
+
await h_ev.invoke()
|
|
116
|
+
|
|
117
|
+
# Check if hook failed or was cancelled - propagate to main event
|
|
118
|
+
if h_ev.execution.status in (
|
|
119
|
+
EventStatus.FAILED,
|
|
120
|
+
EventStatus.CANCELLED,
|
|
121
|
+
):
|
|
122
|
+
self.execution.status = h_ev.execution.status
|
|
123
|
+
self.execution.error = f"Pre-invoke hook {h_ev.execution.status.value}: {h_ev.execution.error}"
|
|
124
|
+
return
|
|
125
|
+
|
|
126
|
+
if h_ev._should_exit:
|
|
127
|
+
raise h_ev._exit_cause or RuntimeError(
|
|
128
|
+
"Pre-invocation hook requested exit without a cause"
|
|
129
|
+
)
|
|
130
|
+
await global_hook_logger.alog(h_ev)
|
|
131
|
+
|
|
90
132
|
async for chunk in self._stream():
|
|
91
133
|
response.append(chunk)
|
|
92
134
|
yield chunk
|
|
93
135
|
|
|
136
|
+
# Execute post-invoke hook if present
|
|
137
|
+
if h_ev := self._post_invoke_hook_event:
|
|
138
|
+
await h_ev.invoke()
|
|
139
|
+
|
|
140
|
+
# Check if hook failed or was cancelled - don't fail the stream since data was already sent
|
|
141
|
+
if h_ev.execution.status in (
|
|
142
|
+
EventStatus.FAILED,
|
|
143
|
+
EventStatus.CANCELLED,
|
|
144
|
+
):
|
|
145
|
+
# Log but don't fail the stream
|
|
146
|
+
await global_hook_logger.alog(h_ev)
|
|
147
|
+
elif h_ev._should_exit:
|
|
148
|
+
raise h_ev._exit_cause or RuntimeError(
|
|
149
|
+
"Post-invocation hook requested exit without a cause"
|
|
150
|
+
)
|
|
151
|
+
else:
|
|
152
|
+
await global_hook_logger.alog(h_ev)
|
|
153
|
+
|
|
94
154
|
self.execution.response = response
|
|
95
155
|
self.execution.status = EventStatus.COMPLETED
|
|
96
156
|
|
lionagi/service/imodel.py
CHANGED
|
@@ -3,19 +3,22 @@
|
|
|
3
3
|
|
|
4
4
|
import asyncio
|
|
5
5
|
from collections.abc import AsyncGenerator, Callable
|
|
6
|
+
from typing import Any
|
|
7
|
+
from uuid import UUID, uuid4
|
|
6
8
|
|
|
7
9
|
from pydantic import BaseModel
|
|
8
10
|
|
|
9
11
|
from lionagi.ln import is_coro_func, now_utc
|
|
10
|
-
from lionagi.protocols.generic
|
|
11
|
-
|
|
12
|
-
from
|
|
13
|
-
from
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
12
|
+
from lionagi.protocols.generic import ID, Event, EventStatus, Log
|
|
13
|
+
|
|
14
|
+
from .connections import APICalling, Endpoint, match_endpoint
|
|
15
|
+
from .hooks import (
|
|
16
|
+
HookedEvent,
|
|
17
|
+
HookEvent,
|
|
18
|
+
HookEventTypes,
|
|
19
|
+
HookRegistry,
|
|
20
|
+
global_hook_logger,
|
|
21
|
+
)
|
|
19
22
|
from .rate_limited_processor import RateLimitedAPIExecutor
|
|
20
23
|
|
|
21
24
|
|
|
@@ -52,7 +55,7 @@ class iModel:
|
|
|
52
55
|
provider_metadata: dict | None = None,
|
|
53
56
|
hook_registry: HookRegistry | dict | None = None,
|
|
54
57
|
exit_hook: bool = False,
|
|
55
|
-
id:
|
|
58
|
+
id: UUID | str = None,
|
|
56
59
|
created_at: float | None = None,
|
|
57
60
|
**kwargs,
|
|
58
61
|
) -> None:
|
|
@@ -100,7 +103,7 @@ class iModel:
|
|
|
100
103
|
if id is not None:
|
|
101
104
|
self.id = ID.get_id(id)
|
|
102
105
|
else:
|
|
103
|
-
self.id =
|
|
106
|
+
self.id = uuid4()
|
|
104
107
|
if created_at is not None:
|
|
105
108
|
if not isinstance(created_at, float):
|
|
106
109
|
raise ValueError("created_at must be a float timestamp.")
|
|
@@ -270,7 +273,7 @@ class iModel:
|
|
|
270
273
|
include_token_usage_to_model=include_token_usage_to_model,
|
|
271
274
|
)
|
|
272
275
|
|
|
273
|
-
async def process_chunk(self, chunk) ->
|
|
276
|
+
async def process_chunk(self, chunk) -> Any:
|
|
274
277
|
"""Processes a chunk of streaming data.
|
|
275
278
|
|
|
276
279
|
Override this method in subclasses if you need custom handling
|
|
@@ -284,6 +287,7 @@ class iModel:
|
|
|
284
287
|
if is_coro_func(self.streaming_process_func):
|
|
285
288
|
return await self.streaming_process_func(chunk)
|
|
286
289
|
return self.streaming_process_func(chunk)
|
|
290
|
+
return None
|
|
287
291
|
|
|
288
292
|
async def stream(self, api_call=None, **kw) -> AsyncGenerator:
|
|
289
293
|
"""Performs a streaming API call with the given arguments.
|
|
@@ -313,8 +317,8 @@ class iModel:
|
|
|
313
317
|
try:
|
|
314
318
|
async for i in api_call.stream():
|
|
315
319
|
result = await self.process_chunk(i)
|
|
316
|
-
if
|
|
317
|
-
|
|
320
|
+
# Yield processed result if available, otherwise yield raw chunk
|
|
321
|
+
yield result if result is not None else i
|
|
318
322
|
except Exception as e:
|
|
319
323
|
raise ValueError(f"Failed to stream API call: {e}")
|
|
320
324
|
finally:
|
|
@@ -323,8 +327,8 @@ class iModel:
|
|
|
323
327
|
try:
|
|
324
328
|
async for i in api_call.stream():
|
|
325
329
|
result = await self.process_chunk(i)
|
|
326
|
-
if
|
|
327
|
-
|
|
330
|
+
# Yield processed result if available, otherwise yield raw chunk
|
|
331
|
+
yield result if result is not None else i
|
|
328
332
|
except Exception as e:
|
|
329
333
|
raise ValueError(f"Failed to stream API call: {e}")
|
|
330
334
|
finally:
|
|
@@ -360,10 +364,10 @@ class iModel:
|
|
|
360
364
|
await self.executor.append(api_call)
|
|
361
365
|
await self.executor.forward()
|
|
362
366
|
ctr = 0
|
|
363
|
-
while api_call.status
|
|
364
|
-
EventStatus.
|
|
365
|
-
EventStatus.
|
|
366
|
-
|
|
367
|
+
while api_call.status in [
|
|
368
|
+
EventStatus.PROCESSING,
|
|
369
|
+
EventStatus.PENDING,
|
|
370
|
+
]:
|
|
367
371
|
if ctr > 100:
|
|
368
372
|
break
|
|
369
373
|
await self.executor.forward()
|
|
@@ -18,12 +18,10 @@ from pathlib import Path
|
|
|
18
18
|
from textwrap import shorten
|
|
19
19
|
from typing import Any, Literal
|
|
20
20
|
|
|
21
|
-
from json_repair import repair_json
|
|
22
21
|
from pydantic import BaseModel, Field, field_validator, model_validator
|
|
23
22
|
|
|
24
23
|
from lionagi import ln
|
|
25
24
|
from lionagi.libs.schema.as_readable import as_readable
|
|
26
|
-
from lionagi.utils import is_coro_func, is_import_installed
|
|
27
25
|
|
|
28
26
|
HAS_CLAUDE_CODE_CLI = False
|
|
29
27
|
CLAUDE_CLI = None
|
|
@@ -415,6 +413,8 @@ async def _ndjson_from_cli(request: ClaudeCodeRequest):
|
|
|
415
413
|
• Robust against braces inside strings (uses json.JSONDecoder.raw_decode)
|
|
416
414
|
• Falls back to `json_repair.repair_json` when necessary.
|
|
417
415
|
"""
|
|
416
|
+
from json_repair import repair_json
|
|
417
|
+
|
|
418
418
|
workspace = request.cwd()
|
|
419
419
|
workspace.mkdir(parents=True, exist_ok=True)
|
|
420
420
|
|
|
@@ -555,7 +555,7 @@ def _pp_final(sess: ClaudeSession, theme) -> None:
|
|
|
555
555
|
async def _maybe_await(func, *args, **kw):
|
|
556
556
|
"""Call func which may be sync or async."""
|
|
557
557
|
res = func(*args, **kw) if func else None
|
|
558
|
-
if is_coro_func(res):
|
|
558
|
+
if ln.is_coro_func(res):
|
|
559
559
|
await res
|
|
560
560
|
|
|
561
561
|
|
|
@@ -0,0 +1,435 @@
|
|
|
1
|
+
"""
|
|
2
|
+
OpenAI Model Names extracted from generated models.
|
|
3
|
+
|
|
4
|
+
This module provides lists of allowed model names for different OpenAI services,
|
|
5
|
+
extracted from the auto-generated openai_models.py file.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import warnings
|
|
11
|
+
from enum import Enum
|
|
12
|
+
from typing import Any, Dict, List, Literal, Optional, Union
|
|
13
|
+
|
|
14
|
+
from pydantic import BaseModel, Field, model_validator
|
|
15
|
+
|
|
16
|
+
warnings.filterwarnings("ignore", category=UserWarning, module="pydantic")
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
# Manually define the chat models from the ChatModel class in openai_models.py
|
|
20
|
+
# These are extracted from the Literal type definition
|
|
21
|
+
CHAT_MODELS = (
|
|
22
|
+
"gpt-5",
|
|
23
|
+
"gpt-5-mini",
|
|
24
|
+
"gpt-5-nano",
|
|
25
|
+
"gpt-5-2025-08-07",
|
|
26
|
+
"gpt-5-mini-2025-08-07",
|
|
27
|
+
"gpt-5-nano-2025-08-07",
|
|
28
|
+
"gpt-5-chat-latest",
|
|
29
|
+
"gpt-4.1",
|
|
30
|
+
"gpt-4.1-mini",
|
|
31
|
+
"gpt-4.1-nano",
|
|
32
|
+
"gpt-4.1-2025-04-14",
|
|
33
|
+
"gpt-4.1-mini-2025-04-14",
|
|
34
|
+
"gpt-4.1-nano-2025-04-14",
|
|
35
|
+
"o4-mini",
|
|
36
|
+
"o4-mini-2025-04-16",
|
|
37
|
+
"o3",
|
|
38
|
+
"o3-2025-04-16",
|
|
39
|
+
"o3-mini",
|
|
40
|
+
"o3-mini-2025-01-31",
|
|
41
|
+
"o1",
|
|
42
|
+
"o1-2024-12-17",
|
|
43
|
+
"o1-preview",
|
|
44
|
+
"o1-preview-2024-09-12",
|
|
45
|
+
"o1-mini",
|
|
46
|
+
"o1-mini-2024-09-12",
|
|
47
|
+
"gpt-4o",
|
|
48
|
+
"gpt-4o-2024-11-20",
|
|
49
|
+
"gpt-4o-2024-08-06",
|
|
50
|
+
"gpt-4o-2024-05-13",
|
|
51
|
+
"gpt-4o-audio-preview",
|
|
52
|
+
"gpt-4o-audio-preview-2024-10-01",
|
|
53
|
+
"gpt-4o-audio-preview-2024-12-17",
|
|
54
|
+
"gpt-4o-audio-preview-2025-06-03",
|
|
55
|
+
"gpt-4o-mini-audio-preview",
|
|
56
|
+
"gpt-4o-mini-audio-preview-2024-12-17",
|
|
57
|
+
"gpt-4o-search-preview",
|
|
58
|
+
"gpt-4o-mini-search-preview",
|
|
59
|
+
"gpt-4o-search-preview-2025-03-11",
|
|
60
|
+
"gpt-4o-mini-search-preview-2025-03-11",
|
|
61
|
+
"chatgpt-4o-latest",
|
|
62
|
+
"codex-mini-latest",
|
|
63
|
+
"gpt-4o-mini",
|
|
64
|
+
"gpt-4o-mini-2024-07-18",
|
|
65
|
+
"gpt-4-turbo",
|
|
66
|
+
"gpt-4-turbo-2024-04-09",
|
|
67
|
+
"gpt-4-0125-preview",
|
|
68
|
+
"gpt-4-turbo-preview",
|
|
69
|
+
"gpt-4-1106-preview",
|
|
70
|
+
"gpt-4-vision-preview",
|
|
71
|
+
"gpt-4",
|
|
72
|
+
"gpt-4-0314",
|
|
73
|
+
"gpt-4-0613",
|
|
74
|
+
"gpt-4-32k",
|
|
75
|
+
"gpt-4-32k-0314",
|
|
76
|
+
"gpt-4-32k-0613",
|
|
77
|
+
"gpt-3.5-turbo",
|
|
78
|
+
"gpt-3.5-turbo-16k",
|
|
79
|
+
"gpt-3.5-turbo-0301",
|
|
80
|
+
"gpt-3.5-turbo-0613",
|
|
81
|
+
"gpt-3.5-turbo-1106",
|
|
82
|
+
"gpt-3.5-turbo-0125",
|
|
83
|
+
"gpt-3.5-turbo-16k-0613",
|
|
84
|
+
"o1-pro",
|
|
85
|
+
"o1-pro-2025-03-19",
|
|
86
|
+
"o3-pro",
|
|
87
|
+
"o3-pro-2025-06-10",
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
REASONING_MODELS = (
|
|
91
|
+
model
|
|
92
|
+
for model in CHAT_MODELS
|
|
93
|
+
if model.startswith(("o1", "o1-", "o3", "o3-", "o4", "o4-", "gpt-5"))
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
# Embedding models
|
|
97
|
+
EMBEDDING_MODELS = (
|
|
98
|
+
"text-embedding-ada-002",
|
|
99
|
+
"text-embedding-3-small",
|
|
100
|
+
"text-embedding-3-large",
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
IMAGE_MODELS = ("dall-e-2", "dall-e-3", "gpt-image-1")
|
|
104
|
+
|
|
105
|
+
MODERATION_MODELS = ("text-moderation-latest", "text-moderation-stable")
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
ChatModels = Literal[CHAT_MODELS]
|
|
109
|
+
ReasoningModels = Literal[REASONING_MODELS]
|
|
110
|
+
EmbeddingModels = Literal[EMBEDDING_MODELS]
|
|
111
|
+
ImageModels = Literal[IMAGE_MODELS]
|
|
112
|
+
ModerationModels = Literal[MODERATION_MODELS]
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
# Audio models
|
|
116
|
+
AUDIO_MODELS = {
|
|
117
|
+
"tts": ["tts-1", "tts-1-hd", "gpt-4o-mini-tts"],
|
|
118
|
+
"transcription": [
|
|
119
|
+
"whisper-1",
|
|
120
|
+
"gpt-4o-transcribe",
|
|
121
|
+
"gpt-4o-mini-transcribe",
|
|
122
|
+
],
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
# ---------- Roles & content parts ----------
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
class ChatRole(str, Enum):
|
|
130
|
+
system = "system"
|
|
131
|
+
developer = "developer" # modern system-like role
|
|
132
|
+
user = "user"
|
|
133
|
+
assistant = "assistant"
|
|
134
|
+
tool = "tool" # for tool results sent back to the model
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
class TextPart(BaseModel):
|
|
138
|
+
"""Text content part for multimodal messages."""
|
|
139
|
+
|
|
140
|
+
type: Literal["text"] = "text"
|
|
141
|
+
text: str
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
class ImageURLObject(BaseModel):
|
|
145
|
+
"""Image URL object; 'detail' is optional and model-dependent."""
|
|
146
|
+
|
|
147
|
+
url: str
|
|
148
|
+
detail: Literal["auto", "low", "high"] | None = Field(
|
|
149
|
+
default=None,
|
|
150
|
+
description="Optional detail control for vision models (auto/low/high).",
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
class ImageURLPart(BaseModel):
|
|
155
|
+
"""Image content part for multimodal messages."""
|
|
156
|
+
|
|
157
|
+
type: Literal["image_url"] = "image_url"
|
|
158
|
+
image_url: ImageURLObject
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
ContentPart = TextPart | ImageURLPart
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
# ---------- Tool-calling structures ----------
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
class FunctionDef(BaseModel):
|
|
168
|
+
"""JSON Schema function definition for tool-calling."""
|
|
169
|
+
|
|
170
|
+
name: str
|
|
171
|
+
description: str | None = None
|
|
172
|
+
parameters: dict[str, Any] = Field(
|
|
173
|
+
default_factory=dict,
|
|
174
|
+
description="JSON Schema describing function parameters.",
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
class FunctionTool(BaseModel):
|
|
179
|
+
type: Literal["function"] = "function"
|
|
180
|
+
function: FunctionDef
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
class FunctionCall(BaseModel):
|
|
184
|
+
"""Legacy function_call field on assistant messages."""
|
|
185
|
+
|
|
186
|
+
name: str
|
|
187
|
+
arguments: str
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
class ToolCallFunction(BaseModel):
|
|
191
|
+
name: str
|
|
192
|
+
arguments: str
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
class ToolCall(BaseModel):
|
|
196
|
+
"""Assistant's tool call (modern)."""
|
|
197
|
+
|
|
198
|
+
id: str
|
|
199
|
+
type: Literal["function"] = "function"
|
|
200
|
+
function: ToolCallFunction
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
class ToolChoiceFunction(BaseModel):
|
|
204
|
+
"""Explicit tool selection."""
|
|
205
|
+
|
|
206
|
+
type: Literal["function"] = "function"
|
|
207
|
+
function: dict[str, str] # {"name": "<function_name>"}
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
ToolChoice = Union[Literal["auto", "none"], ToolChoiceFunction]
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
# ---------- Response format (structured outputs) ----------
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
class ResponseFormatText(BaseModel):
|
|
217
|
+
type: Literal["text"] = "text"
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
class ResponseFormatJSONObject(BaseModel):
|
|
221
|
+
type: Literal["json_object"] = "json_object"
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
class JSONSchemaFormat(BaseModel):
|
|
225
|
+
name: str
|
|
226
|
+
schema_: dict[str, Any] = Field(
|
|
227
|
+
alias="schema", description="JSON Schema definition"
|
|
228
|
+
)
|
|
229
|
+
strict: bool | None = Field(
|
|
230
|
+
default=None,
|
|
231
|
+
description="If true, disallow unspecified properties (strict schema).",
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
model_config = {"populate_by_name": True}
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
class ResponseFormatJSONSchema(BaseModel):
|
|
238
|
+
type: Literal["json_schema"] = "json_schema"
|
|
239
|
+
json_schema: JSONSchemaFormat
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
ResponseFormat = Union[
|
|
243
|
+
ResponseFormatText,
|
|
244
|
+
ResponseFormatJSONObject,
|
|
245
|
+
ResponseFormatJSONSchema,
|
|
246
|
+
]
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
# ---------- Messages (discriminated by role) ----------
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
class SystemMessage(BaseModel):
|
|
253
|
+
role: Literal[ChatRole.system] = ChatRole.system
|
|
254
|
+
content: str | list[ContentPart]
|
|
255
|
+
name: str | None = None # optional per API
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
class DeveloperMessage(BaseModel):
|
|
259
|
+
role: Literal[ChatRole.developer] = ChatRole.developer
|
|
260
|
+
content: str | list[ContentPart]
|
|
261
|
+
name: str | None = None
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
class UserMessage(BaseModel):
|
|
265
|
+
role: Literal[ChatRole.user] = ChatRole.user
|
|
266
|
+
content: str | list[ContentPart]
|
|
267
|
+
name: str | None = None
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
class AssistantMessage(BaseModel):
|
|
271
|
+
role: Literal[ChatRole.assistant] = ChatRole.assistant
|
|
272
|
+
# Either textual content, or only tool_calls (when asking you to call tools)
|
|
273
|
+
content: str | list[ContentPart] | None = None
|
|
274
|
+
name: str | None = None
|
|
275
|
+
tool_calls: list[ToolCall] | None = None # modern tool-calling result
|
|
276
|
+
function_call: FunctionCall | None = None # legacy function-calling result
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
class ToolMessage(BaseModel):
|
|
280
|
+
role: Literal[ChatRole.tool] = ChatRole.tool
|
|
281
|
+
content: str # tool output returned to the model
|
|
282
|
+
tool_call_id: str # must reference the assistant's tool_calls[i].id
|
|
283
|
+
|
|
284
|
+
|
|
285
|
+
ChatMessage = (
|
|
286
|
+
SystemMessage
|
|
287
|
+
| DeveloperMessage
|
|
288
|
+
| UserMessage
|
|
289
|
+
| AssistantMessage
|
|
290
|
+
| ToolMessage
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
# ---------- Stream options ----------
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
class StreamOptions(BaseModel):
|
|
297
|
+
include_usage: bool | None = Field(
|
|
298
|
+
default=None,
|
|
299
|
+
description="If true, a final streamed chunk includes token usage.",
|
|
300
|
+
)
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
# ---------- Main request model ----------
|
|
304
|
+
|
|
305
|
+
|
|
306
|
+
class OpenAIChatCompletionsRequest(BaseModel):
|
|
307
|
+
"""
|
|
308
|
+
Request body for OpenAI Chat Completions.
|
|
309
|
+
Endpoint: POST https://api.openai.com/v1/chat/completions
|
|
310
|
+
"""
|
|
311
|
+
|
|
312
|
+
# Required
|
|
313
|
+
model: str = Field(..., description="Model name, e.g., 'gpt-4o', 'gpt-4o-mini'.") # type: ignore
|
|
314
|
+
messages: list[ChatMessage] = Field(
|
|
315
|
+
...,
|
|
316
|
+
description="Conversation so far, including system/developer context.",
|
|
317
|
+
)
|
|
318
|
+
|
|
319
|
+
# Sampling & penalties
|
|
320
|
+
temperature: float | None = Field(
|
|
321
|
+
default=None, ge=0.0, le=2.0, description="Higher is more random."
|
|
322
|
+
)
|
|
323
|
+
top_p: float | None = Field(
|
|
324
|
+
default=None, ge=0.0, le=1.0, description="Nucleus sampling."
|
|
325
|
+
)
|
|
326
|
+
presence_penalty: float | None = Field(
|
|
327
|
+
default=None,
|
|
328
|
+
ge=-2.0,
|
|
329
|
+
le=2.0,
|
|
330
|
+
description="Encourages new topics; -2..2.",
|
|
331
|
+
)
|
|
332
|
+
frequency_penalty: float | None = Field(
|
|
333
|
+
default=None,
|
|
334
|
+
ge=-2.0,
|
|
335
|
+
le=2.0,
|
|
336
|
+
description="Penalizes repetition; -2..2.",
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
# Token limits
|
|
340
|
+
max_completion_tokens: int | None = Field(
|
|
341
|
+
default=None,
|
|
342
|
+
description="Preferred cap on generated tokens (newer models).",
|
|
343
|
+
)
|
|
344
|
+
max_tokens: int | None = Field(
|
|
345
|
+
default=None,
|
|
346
|
+
description="Legacy completion cap (still accepted by many models).",
|
|
347
|
+
)
|
|
348
|
+
|
|
349
|
+
# Count, stop, logits
|
|
350
|
+
n: int | None = Field(
|
|
351
|
+
default=None, ge=1, description="# of choices to generate."
|
|
352
|
+
)
|
|
353
|
+
stop: str | list[str] | None = Field(
|
|
354
|
+
default=None, description="Stop sequence(s)."
|
|
355
|
+
)
|
|
356
|
+
logit_bias: dict[str, float] | None = Field(
|
|
357
|
+
default=None,
|
|
358
|
+
description="Map of token-id -> bias (-100..100).",
|
|
359
|
+
)
|
|
360
|
+
seed: int | None = Field(
|
|
361
|
+
default=None,
|
|
362
|
+
description="Optional reproducibility seed (model-dependent).",
|
|
363
|
+
)
|
|
364
|
+
logprobs: bool | None = None
|
|
365
|
+
top_logprobs: int | None = Field(
|
|
366
|
+
default=None,
|
|
367
|
+
ge=0,
|
|
368
|
+
description="When logprobs is true, how many top tokens to include.",
|
|
369
|
+
)
|
|
370
|
+
|
|
371
|
+
# Tool calling (modern)
|
|
372
|
+
tools: list[FunctionTool] | None = None
|
|
373
|
+
tool_choice: ToolChoice | None = Field(
|
|
374
|
+
default=None,
|
|
375
|
+
description="'auto' (default), 'none', or a function selection.",
|
|
376
|
+
)
|
|
377
|
+
parallel_tool_calls: bool | None = Field(
|
|
378
|
+
default=None,
|
|
379
|
+
description="Allow multiple tool calls in a single assistant turn.",
|
|
380
|
+
)
|
|
381
|
+
|
|
382
|
+
# Legacy function-calling (still supported)
|
|
383
|
+
functions: list[FunctionDef] | None = None
|
|
384
|
+
function_call: Literal["none", "auto"] | FunctionCall | None = None
|
|
385
|
+
|
|
386
|
+
# Structured outputs
|
|
387
|
+
response_format: ResponseFormat | None = None
|
|
388
|
+
|
|
389
|
+
# Streaming
|
|
390
|
+
stream: bool | None = None
|
|
391
|
+
stream_options: StreamOptions | None = None
|
|
392
|
+
|
|
393
|
+
# Routing / tiering
|
|
394
|
+
service_tier: (
|
|
395
|
+
Literal["auto", "default", "flex", "scale", "priority"] | None
|
|
396
|
+
) = Field(
|
|
397
|
+
default=None,
|
|
398
|
+
description="Processing tier; requires account eligibility.",
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
# Misc
|
|
402
|
+
user: str | None = Field(
|
|
403
|
+
default=None,
|
|
404
|
+
description="End-user identifier for abuse monitoring & analytics.",
|
|
405
|
+
)
|
|
406
|
+
store: bool | None = Field(
|
|
407
|
+
default=None,
|
|
408
|
+
description="Whether to store the response server-side (model-dependent).",
|
|
409
|
+
)
|
|
410
|
+
metadata: dict[str, Any] | None = None
|
|
411
|
+
reasoning_effort: Literal["low", "medium", "high"] | None = Field(
|
|
412
|
+
default=None,
|
|
413
|
+
description="For reasoning models: trade-off between speed and accuracy.",
|
|
414
|
+
)
|
|
415
|
+
|
|
416
|
+
@model_validator(mode="after")
|
|
417
|
+
def _validate_reasoning_model_params(self):
|
|
418
|
+
if self.is_openai_model:
|
|
419
|
+
if self.is_reasoning_model:
|
|
420
|
+
self.temperature = None
|
|
421
|
+
self.top_p = None
|
|
422
|
+
self.logprobs = None
|
|
423
|
+
self.top_logprobs = None
|
|
424
|
+
self.logit_bias = None
|
|
425
|
+
else:
|
|
426
|
+
self.reasoning_effort = None
|
|
427
|
+
return self
|
|
428
|
+
|
|
429
|
+
@property
|
|
430
|
+
def is_reasoning_model(self) -> bool:
|
|
431
|
+
return self.model in REASONING_MODELS
|
|
432
|
+
|
|
433
|
+
@property
|
|
434
|
+
def is_openai_model(self) -> bool:
|
|
435
|
+
return self.model in CHAT_MODELS
|