lionagi 0.17.11__py3-none-any.whl → 0.18.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. lionagi/libs/schema/minimal_yaml.py +98 -0
  2. lionagi/ln/types.py +32 -5
  3. lionagi/models/field_model.py +9 -0
  4. lionagi/operations/ReAct/ReAct.py +474 -237
  5. lionagi/operations/ReAct/utils.py +3 -0
  6. lionagi/operations/act/act.py +206 -0
  7. lionagi/operations/chat/chat.py +130 -114
  8. lionagi/operations/communicate/communicate.py +101 -42
  9. lionagi/operations/flow.py +4 -4
  10. lionagi/operations/interpret/interpret.py +65 -20
  11. lionagi/operations/operate/operate.py +212 -106
  12. lionagi/operations/parse/parse.py +170 -142
  13. lionagi/operations/select/select.py +78 -17
  14. lionagi/operations/select/utils.py +1 -1
  15. lionagi/operations/types.py +119 -23
  16. lionagi/protocols/generic/log.py +3 -2
  17. lionagi/protocols/messages/__init__.py +27 -0
  18. lionagi/protocols/messages/action_request.py +86 -184
  19. lionagi/protocols/messages/action_response.py +73 -131
  20. lionagi/protocols/messages/assistant_response.py +130 -159
  21. lionagi/protocols/messages/base.py +26 -18
  22. lionagi/protocols/messages/instruction.py +281 -625
  23. lionagi/protocols/messages/manager.py +112 -62
  24. lionagi/protocols/messages/message.py +87 -197
  25. lionagi/protocols/messages/system.py +52 -123
  26. lionagi/protocols/types.py +0 -2
  27. lionagi/service/connections/endpoint.py +0 -8
  28. lionagi/service/connections/providers/oai_.py +29 -94
  29. lionagi/service/connections/providers/ollama_.py +3 -2
  30. lionagi/service/hooks/hooked_event.py +2 -2
  31. lionagi/service/third_party/claude_code.py +3 -2
  32. lionagi/service/third_party/openai_models.py +433 -0
  33. lionagi/session/branch.py +170 -178
  34. lionagi/session/session.py +3 -9
  35. lionagi/tools/file/reader.py +2 -2
  36. lionagi/version.py +1 -1
  37. {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/METADATA +1 -2
  38. {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/RECORD +41 -49
  39. lionagi/operations/_act/act.py +0 -86
  40. lionagi/protocols/messages/templates/README.md +0 -28
  41. lionagi/protocols/messages/templates/action_request.jinja2 +0 -5
  42. lionagi/protocols/messages/templates/action_response.jinja2 +0 -9
  43. lionagi/protocols/messages/templates/assistant_response.jinja2 +0 -6
  44. lionagi/protocols/messages/templates/instruction_message.jinja2 +0 -61
  45. lionagi/protocols/messages/templates/system_message.jinja2 +0 -11
  46. lionagi/protocols/messages/templates/tool_schemas.jinja2 +0 -7
  47. lionagi/service/connections/providers/types.py +0 -28
  48. lionagi/service/third_party/openai_model_names.py +0 -198
  49. lionagi/service/types.py +0 -58
  50. /lionagi/operations/{_act → act}/__init__.py +0 -0
  51. {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/WHEEL +0 -0
  52. {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,433 @@
1
+ """
2
+ OpenAI Model Names extracted from generated models.
3
+
4
+ This module provides lists of allowed model names for different OpenAI services,
5
+ extracted from the auto-generated openai_models.py file.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import warnings
11
+ from enum import Enum
12
+ from typing import Any, Dict, List, Literal, Optional, Union
13
+
14
+ from pydantic import BaseModel, Field, model_validator
15
+
16
+ warnings.filterwarnings("ignore", category=UserWarning, module="pydantic")
17
+
18
+
19
+ # Manually define the chat models from the ChatModel class in openai_models.py
20
+ # These are extracted from the Literal type definition
21
+ CHAT_MODELS = (
22
+ "gpt-5",
23
+ "gpt-5-mini",
24
+ "gpt-5-nano",
25
+ "gpt-5-2025-08-07",
26
+ "gpt-5-mini-2025-08-07",
27
+ "gpt-5-nano-2025-08-07",
28
+ "gpt-5-chat-latest",
29
+ "gpt-4.1",
30
+ "gpt-4.1-mini",
31
+ "gpt-4.1-nano",
32
+ "gpt-4.1-2025-04-14",
33
+ "gpt-4.1-mini-2025-04-14",
34
+ "gpt-4.1-nano-2025-04-14",
35
+ "o4-mini",
36
+ "o4-mini-2025-04-16",
37
+ "o3",
38
+ "o3-2025-04-16",
39
+ "o3-mini",
40
+ "o3-mini-2025-01-31",
41
+ "o1",
42
+ "o1-2024-12-17",
43
+ "o1-preview",
44
+ "o1-preview-2024-09-12",
45
+ "o1-mini",
46
+ "o1-mini-2024-09-12",
47
+ "gpt-4o",
48
+ "gpt-4o-2024-11-20",
49
+ "gpt-4o-2024-08-06",
50
+ "gpt-4o-2024-05-13",
51
+ "gpt-4o-audio-preview",
52
+ "gpt-4o-audio-preview-2024-10-01",
53
+ "gpt-4o-audio-preview-2024-12-17",
54
+ "gpt-4o-audio-preview-2025-06-03",
55
+ "gpt-4o-mini-audio-preview",
56
+ "gpt-4o-mini-audio-preview-2024-12-17",
57
+ "gpt-4o-search-preview",
58
+ "gpt-4o-mini-search-preview",
59
+ "gpt-4o-search-preview-2025-03-11",
60
+ "gpt-4o-mini-search-preview-2025-03-11",
61
+ "chatgpt-4o-latest",
62
+ "codex-mini-latest",
63
+ "gpt-4o-mini",
64
+ "gpt-4o-mini-2024-07-18",
65
+ "gpt-4-turbo",
66
+ "gpt-4-turbo-2024-04-09",
67
+ "gpt-4-0125-preview",
68
+ "gpt-4-turbo-preview",
69
+ "gpt-4-1106-preview",
70
+ "gpt-4-vision-preview",
71
+ "gpt-4",
72
+ "gpt-4-0314",
73
+ "gpt-4-0613",
74
+ "gpt-4-32k",
75
+ "gpt-4-32k-0314",
76
+ "gpt-4-32k-0613",
77
+ "gpt-3.5-turbo",
78
+ "gpt-3.5-turbo-16k",
79
+ "gpt-3.5-turbo-0301",
80
+ "gpt-3.5-turbo-0613",
81
+ "gpt-3.5-turbo-1106",
82
+ "gpt-3.5-turbo-0125",
83
+ "gpt-3.5-turbo-16k-0613",
84
+ "o1-pro",
85
+ "o1-pro-2025-03-19",
86
+ "o3-pro",
87
+ "o3-pro-2025-06-10",
88
+ )
89
+
90
+ REASONING_MODELS = (
91
+ model
92
+ for model in CHAT_MODELS
93
+ if model.startswith(("o1", "o1-", "o3", "o3-", "o4", "o4-", "gpt-5"))
94
+ )
95
+
96
+ # Embedding models
97
+ EMBEDDING_MODELS = (
98
+ "text-embedding-ada-002",
99
+ "text-embedding-3-small",
100
+ "text-embedding-3-large",
101
+ )
102
+
103
+ IMAGE_MODELS = ("dall-e-2", "dall-e-3", "gpt-image-1")
104
+
105
+ MODERATION_MODELS = ("text-moderation-latest", "text-moderation-stable")
106
+
107
+
108
+ ChatModels = Literal[CHAT_MODELS]
109
+ ReasoningModels = Literal[REASONING_MODELS]
110
+ EmbeddingModels = Literal[EMBEDDING_MODELS]
111
+ ImageModels = Literal[IMAGE_MODELS]
112
+ ModerationModels = Literal[MODERATION_MODELS]
113
+
114
+
115
+ # Audio models
116
+ AUDIO_MODELS = {
117
+ "tts": ["tts-1", "tts-1-hd", "gpt-4o-mini-tts"],
118
+ "transcription": [
119
+ "whisper-1",
120
+ "gpt-4o-transcribe",
121
+ "gpt-4o-mini-transcribe",
122
+ ],
123
+ }
124
+
125
+
126
+ # ---------- Roles & content parts ----------
127
+
128
+
129
+ class ChatRole(str, Enum):
130
+ system = "system"
131
+ developer = "developer" # modern system-like role
132
+ user = "user"
133
+ assistant = "assistant"
134
+ tool = "tool" # for tool results sent back to the model
135
+
136
+
137
+ class TextPart(BaseModel):
138
+ """Text content part for multimodal messages."""
139
+
140
+ type: Literal["text"] = "text"
141
+ text: str
142
+
143
+
144
+ class ImageURLObject(BaseModel):
145
+ """Image URL object; 'detail' is optional and model-dependent."""
146
+
147
+ url: str
148
+ detail: Optional[Literal["auto", "low", "high"]] = Field(
149
+ default=None,
150
+ description="Optional detail control for vision models (auto/low/high).",
151
+ )
152
+
153
+
154
+ class ImageURLPart(BaseModel):
155
+ """Image content part for multimodal messages."""
156
+
157
+ type: Literal["image_url"] = "image_url"
158
+ image_url: ImageURLObject
159
+
160
+
161
+ ContentPart = TextPart | ImageURLPart
162
+
163
+
164
+ # ---------- Tool-calling structures ----------
165
+
166
+
167
+ class FunctionDef(BaseModel):
168
+ """JSON Schema function definition for tool-calling."""
169
+
170
+ name: str
171
+ description: Optional[str] = None
172
+ parameters: Dict[str, Any] = Field(
173
+ default_factory=dict,
174
+ description="JSON Schema describing function parameters.",
175
+ )
176
+
177
+
178
+ class FunctionTool(BaseModel):
179
+ type: Literal["function"] = "function"
180
+ function: FunctionDef
181
+
182
+
183
+ class FunctionCall(BaseModel):
184
+ """Legacy function_call field on assistant messages."""
185
+
186
+ name: str
187
+ arguments: str
188
+
189
+
190
+ class ToolCallFunction(BaseModel):
191
+ name: str
192
+ arguments: str
193
+
194
+
195
+ class ToolCall(BaseModel):
196
+ """Assistant's tool call (modern)."""
197
+
198
+ id: str
199
+ type: Literal["function"] = "function"
200
+ function: ToolCallFunction
201
+
202
+
203
+ class ToolChoiceFunction(BaseModel):
204
+ """Explicit tool selection."""
205
+
206
+ type: Literal["function"] = "function"
207
+ function: Dict[str, str] # {"name": "<function_name>"}
208
+
209
+
210
+ ToolChoice = Union[Literal["auto", "none"], ToolChoiceFunction]
211
+
212
+
213
+ # ---------- Response format (structured outputs) ----------
214
+
215
+
216
+ class ResponseFormatText(BaseModel):
217
+ type: Literal["text"] = "text"
218
+
219
+
220
+ class ResponseFormatJSONObject(BaseModel):
221
+ type: Literal["json_object"] = "json_object"
222
+
223
+
224
+ class JSONSchemaFormat(BaseModel):
225
+ name: str
226
+ schema: Dict[str, Any]
227
+ strict: Optional[bool] = Field(
228
+ default=None,
229
+ description="If true, disallow unspecified properties (strict schema).",
230
+ )
231
+
232
+
233
+ class ResponseFormatJSONSchema(BaseModel):
234
+ type: Literal["json_schema"] = "json_schema"
235
+ json_schema: JSONSchemaFormat
236
+
237
+
238
+ ResponseFormat = Union[
239
+ ResponseFormatText,
240
+ ResponseFormatJSONObject,
241
+ ResponseFormatJSONSchema,
242
+ ]
243
+
244
+
245
+ # ---------- Messages (discriminated by role) ----------
246
+
247
+
248
+ class SystemMessage(BaseModel):
249
+ role: Literal[ChatRole.system] = ChatRole.system
250
+ content: Union[str, List[ContentPart]]
251
+ name: Optional[str] = None # optional per API
252
+
253
+
254
+ class DeveloperMessage(BaseModel):
255
+ role: Literal[ChatRole.developer] = ChatRole.developer
256
+ content: Union[str, List[ContentPart]]
257
+ name: Optional[str] = None
258
+
259
+
260
+ class UserMessage(BaseModel):
261
+ role: Literal[ChatRole.user] = ChatRole.user
262
+ content: Union[str, List[ContentPart]]
263
+ name: Optional[str] = None
264
+
265
+
266
+ class AssistantMessage(BaseModel):
267
+ role: Literal[ChatRole.assistant] = ChatRole.assistant
268
+ # Either textual content, or only tool_calls (when asking you to call tools)
269
+ content: Optional[Union[str, List[ContentPart]]] = None
270
+ name: Optional[str] = None
271
+ tool_calls: Optional[List[ToolCall]] = None # modern tool-calling result
272
+ function_call: Optional[FunctionCall] = (
273
+ None # legacy function-calling result
274
+ )
275
+
276
+
277
+ class ToolMessage(BaseModel):
278
+ role: Literal[ChatRole.tool] = ChatRole.tool
279
+ content: str # tool output returned to the model
280
+ tool_call_id: str # must reference the assistant's tool_calls[i].id
281
+
282
+
283
+ ChatMessage = (
284
+ SystemMessage
285
+ | DeveloperMessage
286
+ | UserMessage
287
+ | AssistantMessage
288
+ | ToolMessage
289
+ )
290
+
291
+ # ---------- Stream options ----------
292
+
293
+
294
+ class StreamOptions(BaseModel):
295
+ include_usage: Optional[bool] = Field(
296
+ default=None,
297
+ description="If true, a final streamed chunk includes token usage.",
298
+ )
299
+
300
+
301
+ # ---------- Main request model ----------
302
+
303
+
304
+ class OpenAIChatCompletionsRequest(BaseModel):
305
+ """
306
+ Request body for OpenAI Chat Completions.
307
+ Endpoint: POST https://api.openai.com/v1/chat/completions
308
+ """
309
+
310
+ # Required
311
+ model: str = Field(..., description="Model name, e.g., 'gpt-4o', 'gpt-4o-mini'.") # type: ignore
312
+ messages: List[ChatMessage] = Field(
313
+ ...,
314
+ description="Conversation so far, including system/developer context.",
315
+ )
316
+
317
+ # Sampling & penalties
318
+ temperature: float | None = Field(
319
+ default=None, ge=0.0, le=2.0, description="Higher is more random."
320
+ )
321
+ top_p: float | None = Field(
322
+ default=None, ge=0.0, le=1.0, description="Nucleus sampling."
323
+ )
324
+ presence_penalty: float | None = Field(
325
+ default=None,
326
+ ge=-2.0,
327
+ le=2.0,
328
+ description="Encourages new topics; -2..2.",
329
+ )
330
+ frequency_penalty: float | None = Field(
331
+ default=None,
332
+ ge=-2.0,
333
+ le=2.0,
334
+ description="Penalizes repetition; -2..2.",
335
+ )
336
+
337
+ # Token limits
338
+ max_completion_tokens: int | None = Field(
339
+ default=None,
340
+ description="Preferred cap on generated tokens (newer models).",
341
+ )
342
+ max_tokens: int | None = Field(
343
+ default=None,
344
+ description="Legacy completion cap (still accepted by many models).",
345
+ )
346
+
347
+ # Count, stop, logits
348
+ n: int | None = Field(
349
+ default=None, ge=1, description="# of choices to generate."
350
+ )
351
+ stop: str | List[str] | None = Field(
352
+ default=None, description="Stop sequence(s)."
353
+ )
354
+ logit_bias: dict[str, float] | None = Field(
355
+ default=None,
356
+ description="Map of token-id -> bias (-100..100).",
357
+ )
358
+ seed: int | None = Field(
359
+ default=None,
360
+ description="Optional reproducibility seed (model-dependent).",
361
+ )
362
+ logprobs: bool | None = None
363
+ top_logprobs: int | None = Field(
364
+ default=None,
365
+ ge=0,
366
+ description="When logprobs is true, how many top tokens to include.",
367
+ )
368
+
369
+ # Tool calling (modern)
370
+ tools: list[FunctionTool] | None = None
371
+ tool_choice: ToolChoice | None = Field(
372
+ default=None,
373
+ description="'auto' (default), 'none', or a function selection.",
374
+ )
375
+ parallel_tool_calls: bool | None = Field(
376
+ default=None,
377
+ description="Allow multiple tool calls in a single assistant turn.",
378
+ )
379
+
380
+ # Legacy function-calling (still supported)
381
+ functions: list[FunctionDef] | None = None
382
+ function_call: Literal["none", "auto"] | FunctionCall | None = None
383
+
384
+ # Structured outputs
385
+ response_format: ResponseFormat | None = None
386
+
387
+ # Streaming
388
+ stream: bool | None = None
389
+ stream_options: StreamOptions | None = None
390
+
391
+ # Routing / tiering
392
+ service_tier: (
393
+ Literal["auto", "default", "flex", "scale", "priority"] | None
394
+ ) = Field(
395
+ default=None,
396
+ description="Processing tier; requires account eligibility.",
397
+ )
398
+
399
+ # Misc
400
+ user: str | None = Field(
401
+ default=None,
402
+ description="End-user identifier for abuse monitoring & analytics.",
403
+ )
404
+ store: bool | None = Field(
405
+ default=None,
406
+ description="Whether to store the response server-side (model-dependent).",
407
+ )
408
+ metadata: dict[str, Any] | None = None
409
+ reasoning_effort: Optional[Literal["low", "medium", "high"]] = Field(
410
+ default=None,
411
+ description="For reasoning models: trade-off between speed and accuracy.",
412
+ )
413
+
414
+ @model_validator(mode="after")
415
+ def _validate_reasoning_model_params(self):
416
+ if self.is_openai_model:
417
+ if self.is_reasoning_model:
418
+ self.temperature = None
419
+ self.top_p = None
420
+ self.logprobs = None
421
+ self.top_logprobs = None
422
+ self.logit_bias = None
423
+ else:
424
+ self.reasoning_effort = None
425
+ return self
426
+
427
+ @property
428
+ def is_reasoning_model(self) -> bool:
429
+ return self.model in REASONING_MODELS
430
+
431
+ @property
432
+ def is_openai_model(self) -> bool:
433
+ return self.model in CHAT_MODELS