synth-ai 0.2.3__py3-none-any.whl → 0.2.4.dev2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. synth_ai/compound/cais.py +0 -0
  2. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/filter_traces_sft_turso.py +115 -1
  3. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/test_crafter_react_agent_lm_synth.py +3 -3
  4. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/test_crafter_react_agent_lm_synth_v2_backup.py +3 -3
  5. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/run_rollouts_for_models_and_compare_v3.py +4 -4
  6. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/test_crafter_react_agent_openai_v2_backup.py +3 -3
  7. synth_ai/environments/examples/crafter_classic/agent_demos/example_v3_usage.py +1 -1
  8. synth_ai/environments/examples/crafter_classic/environment.py +1 -1
  9. synth_ai/environments/examples/crafter_custom/environment.py +1 -1
  10. synth_ai/environments/service/core_routes.py +1 -1
  11. synth_ai/learning/prompts/mipro.py +8 -0
  12. synth_ai/lm/core/main_v3.py +219 -158
  13. synth_ai/tracing_v3/__init__.py +2 -2
  14. synth_ai/tracing_v3/abstractions.py +62 -17
  15. synth_ai/tracing_v3/hooks.py +1 -1
  16. synth_ai/tracing_v3/llm_call_record_helpers.py +350 -0
  17. synth_ai/tracing_v3/lm_call_record_abstractions.py +257 -0
  18. synth_ai/tracing_v3/session_tracer.py +5 -5
  19. synth_ai/tracing_v3/tests/test_concurrent_operations.py +1 -1
  20. synth_ai/tracing_v3/tests/test_llm_call_records.py +672 -0
  21. synth_ai/tracing_v3/tests/test_session_tracer.py +43 -9
  22. synth_ai/tracing_v3/tests/test_turso_manager.py +1 -1
  23. synth_ai/tracing_v3/turso/manager.py +10 -3
  24. synth_ai/tracing_v3/turso/models.py +1 -0
  25. {synth_ai-0.2.3.dist-info → synth_ai-0.2.4.dev2.dist-info}/METADATA +3 -2
  26. {synth_ai-0.2.3.dist-info → synth_ai-0.2.4.dev2.dist-info}/RECORD +30 -26
  27. {synth_ai-0.2.3.dist-info → synth_ai-0.2.4.dev2.dist-info}/WHEEL +0 -0
  28. {synth_ai-0.2.3.dist-info → synth_ai-0.2.4.dev2.dist-info}/entry_points.txt +0 -0
  29. {synth_ai-0.2.3.dist-info → synth_ai-0.2.4.dev2.dist-info}/licenses/LICENSE +0 -0
  30. {synth_ai-0.2.3.dist-info → synth_ai-0.2.4.dev2.dist-info}/top_level.txt +0 -0
@@ -5,37 +5,39 @@ This module provides the LM class with async v3 tracing support,
5
5
  replacing the v2 DuckDB-based implementation.
6
6
  """
7
7
 
8
- from typing import Any, Dict, List, Literal, Optional, Union
9
- import os
10
- import functools
8
+ from typing import Any, Literal
11
9
  import asyncio
12
10
  import time
13
11
 
14
- from pydantic import BaseModel, Field
12
+ from pydantic import BaseModel
15
13
 
16
- from synth_ai.lm.core.exceptions import StructuredOutputCoercionFailureException
14
+ from synth_ai.lm.config import reasoning_models
17
15
  from synth_ai.lm.core.vendor_clients import (
18
16
  anthropic_naming_regexes,
19
17
  get_client,
20
18
  openai_naming_regexes,
21
19
  )
22
20
  from synth_ai.lm.structured_outputs.handler import StructuredOutputHandler
23
- from synth_ai.lm.vendors.base import VendorBase, BaseLMResponse
24
21
  from synth_ai.lm.tools.base import BaseTool
25
- from synth_ai.lm.config import reasoning_models
22
+ from synth_ai.lm.vendors.base import BaseLMResponse, VendorBase
26
23
 
27
24
  # V3 tracing imports
28
- from synth_ai.tracing_v3.session_tracer import SessionTracer
29
- from synth_ai.tracing_v3.decorators import set_session_id, set_turn_number, set_session_tracer
30
25
  from synth_ai.tracing_v3.abstractions import LMCAISEvent, TimeRecord
26
+ from synth_ai.tracing_v3.decorators import set_turn_number
27
+ from synth_ai.tracing_v3.llm_call_record_helpers import (
28
+ compute_aggregates_from_call_records,
29
+ create_llm_call_record_from_response,
30
+ )
31
+ from synth_ai.tracing_v3.session_tracer import SessionTracer
31
32
 
32
33
 
33
34
  def build_messages(
34
35
  sys_msg: str,
35
36
  user_msg: str,
36
- images_bytes: List = [],
37
- model_name: Optional[str] = None,
38
- ) -> List[Dict]:
37
+ images_bytes: list | None = None,
38
+ model_name: str | None = None,
39
+ ) -> list[dict]:
40
+ images_bytes = images_bytes or []
39
41
  if len(images_bytes) > 0 and any(regex.match(model_name) for regex in openai_naming_regexes):
40
42
  return [
41
43
  {"role": "system", "content": sys_msg},
@@ -51,9 +53,7 @@ def build_messages(
51
53
  ],
52
54
  },
53
55
  ]
54
- elif len(images_bytes) > 0 and any(
55
- regex.match(model_name) for regex in anthropic_naming_regexes
56
- ):
56
+ elif len(images_bytes) > 0 and any(regex.match(model_name) for regex in anthropic_naming_regexes):
57
57
  return [
58
58
  {"role": "system", "content": sys_msg},
59
59
  {
@@ -84,27 +84,27 @@ class LM:
84
84
 
85
85
  def __init__(
86
86
  self,
87
- vendor: Optional[str] = None,
88
- model: Optional[str] = None,
87
+ vendor: str | None = None,
88
+ model: str | None = None,
89
89
  # v2 compatibility parameters
90
- model_name: Optional[str] = None, # Alias for model
91
- formatting_model_name: Optional[str] = None, # For structured outputs
92
- provider: Optional[str] = None, # Alias for vendor
90
+ model_name: str | None = None, # Alias for model
91
+ formatting_model_name: str | None = None, # For structured outputs
92
+ provider: str | None = None, # Alias for vendor
93
93
  synth_logging: bool = True, # v2 compatibility
94
94
  max_retries: Literal["None", "Few", "Many"] = "Few", # v2 compatibility
95
95
  # v3 parameters
96
- is_structured: Optional[bool] = None,
97
- structured_outputs_vendor: Optional[str] = None,
98
- response_format: Union[BaseModel, Dict[str, Any], None] = None,
96
+ is_structured: bool | None = None,
97
+ structured_outputs_vendor: str | None = None,
98
+ response_format: type[BaseModel] | dict[str, Any] | None = None,
99
99
  json_mode: bool = False,
100
100
  temperature: float = 0.8,
101
- session_tracer: Optional[SessionTracer] = None,
102
- system_id: Optional[str] = None,
101
+ session_tracer: SessionTracer | None = None,
102
+ system_id: str | None = None,
103
103
  enable_v3_tracing: bool = True,
104
- enable_v2_tracing: Optional[bool] = None, # v2 compatibility
104
+ enable_v2_tracing: bool | None = None, # v2 compatibility
105
105
  # Responses API parameters
106
106
  auto_store_responses: bool = True,
107
- use_responses_api: Optional[bool] = None,
107
+ use_responses_api: bool | None = None,
108
108
  **additional_params,
109
109
  ):
110
110
  # Handle v2 compatibility parameters
@@ -119,14 +119,14 @@ class LM:
119
119
  if vendor is None and model is not None:
120
120
  # Import vendor detection logic
121
121
  from synth_ai.lm.core.vendor_clients import (
122
- openai_naming_regexes,
123
122
  anthropic_naming_regexes,
124
- gemini_naming_regexes,
123
+ custom_endpoint_naming_regexes,
125
124
  deepseek_naming_regexes,
126
- groq_naming_regexes,
125
+ gemini_naming_regexes,
127
126
  grok_naming_regexes,
127
+ groq_naming_regexes,
128
+ openai_naming_regexes,
128
129
  openrouter_naming_regexes,
129
- custom_endpoint_naming_regexes,
130
130
  together_naming_regexes,
131
131
  )
132
132
 
@@ -164,22 +164,51 @@ class LM:
164
164
  self.enable_v3_tracing = enable_v3_tracing
165
165
  self.additional_params = additional_params
166
166
 
167
+ # Initialize vendor wrapper early, before any potential usage
168
+ # (e.g., within StructuredOutputHandler initialization below)
169
+ self._vendor_wrapper = None
170
+
167
171
  # Responses API thread management
168
172
  self.auto_store_responses = auto_store_responses
169
173
  self.use_responses_api = use_responses_api
170
- self._last_response_id: Optional[str] = None
174
+ self._last_response_id: str | None = None
171
175
 
172
176
  # Set structured output handler if needed
173
177
  if self.response_format:
174
178
  self.is_structured = True
179
+ # Choose mode automatically: prefer forced_json for OpenAI/reasoning models
180
+ forced_json_preferred = (self.vendor == "openai") or (
181
+ self.model in reasoning_models if self.model else False
182
+ )
183
+ structured_output_mode = "forced_json" if forced_json_preferred else "stringified_json"
184
+
185
+ # Build core and formatting clients
186
+ core_client = get_client(
187
+ self.model,
188
+ with_formatting=(structured_output_mode == "forced_json"),
189
+ provider=self.vendor,
190
+ )
191
+ formatting_model = formatting_model_name or self.model
192
+ formatting_client = get_client(
193
+ formatting_model,
194
+ with_formatting=True,
195
+ provider=self.vendor if self.vendor != "custom_endpoint" else None,
196
+ )
197
+
198
+ # Map retries
199
+ max_retries_dict = {"None": 0, "Few": 2, "Many": 5}
200
+ handler_params = {"max_retries": max_retries_dict.get(max_retries, 2)}
201
+
175
202
  self.structured_output_handler = StructuredOutputHandler(
176
- response_format=self.response_format, vendor_wrapper=self.get_vendor_wrapper()
203
+ core_client,
204
+ formatting_client,
205
+ structured_output_mode,
206
+ handler_params,
177
207
  )
178
208
  else:
179
209
  self.structured_output_handler = None
180
210
 
181
- # Initialize vendor wrapper
182
- self._vendor_wrapper = None
211
+ # Vendor wrapper lazy-instantiated via get_vendor_wrapper()
183
212
 
184
213
  def get_vendor_wrapper(self) -> VendorBase:
185
214
  """Get or create the vendor wrapper."""
@@ -194,45 +223,62 @@ class LM:
194
223
  return self.use_responses_api
195
224
 
196
225
  # Auto-detect based on model
197
- RESPONSES_MODELS = {
226
+ responses_models = {
198
227
  "o4-mini", "o3", "o3-mini", # Supported Synth-hosted models
199
228
  "gpt-oss-120b", "gpt-oss-20b" # OSS models via Synth
200
229
  }
201
- return self.model in RESPONSES_MODELS or (self.model and self.model in reasoning_models)
230
+ return self.model in responses_models or (self.model and self.model in reasoning_models)
202
231
 
203
232
  def _should_use_harmony(self) -> bool:
204
233
  """Determine if Harmony encoding should be used for OSS models."""
205
234
  # Only use Harmony for OSS models when NOT using OpenAI vendor
206
235
  # OpenAI hosts these models directly via Responses API
207
- HARMONY_MODELS = {"gpt-oss-120b", "gpt-oss-20b"}
208
- return self.model in HARMONY_MODELS and self.vendor != "openai"
236
+ harmony_models = {"gpt-oss-120b", "gpt-oss-20b"}
237
+ return self.model in harmony_models and self.vendor != "openai"
209
238
 
210
239
  async def respond_async(
211
240
  self,
212
- system_message: Optional[str] = None,
213
- user_message: Optional[str] = None,
214
- messages: Optional[List[Dict]] = None, # v2 compatibility
215
- images_bytes: List[bytes] = [],
216
- images_as_bytes: Optional[List[bytes]] = None, # v2 compatibility
217
- response_model: Optional[BaseModel] = None, # v2 compatibility
218
- tools: Optional[List[BaseTool]] = None,
219
- turn_number: Optional[int] = None,
220
- previous_response_id: Optional[str] = None, # Responses API thread management
241
+ system_message: str | None = None,
242
+ user_message: str | None = None,
243
+ messages: list[dict] | None = None, # v2 compatibility
244
+ images_bytes: list[bytes] | None = None,
245
+ images_as_bytes: list[bytes] | None = None, # v2 compatibility
246
+ response_model: type[BaseModel] | None = None, # v2 compatibility
247
+ tools: list[BaseTool] | None = None,
248
+ turn_number: int | None = None,
249
+ previous_response_id: str | None = None, # Responses API thread management
221
250
  **kwargs,
222
251
  ) -> BaseLMResponse:
223
252
  """Async method to get LM response with v3 tracing."""
224
253
  start_time = time.time()
225
254
 
226
255
  # Handle v2 compatibility
227
- if images_as_bytes is not None:
228
- images_bytes = images_as_bytes
256
+ images_bytes = images_as_bytes if images_as_bytes is not None else (images_bytes or [])
229
257
 
230
- # Handle response_model for structured outputs
258
+ # Handle response_model for structured outputs (runtime-provided)
231
259
  if response_model and not self.response_format:
232
260
  self.response_format = response_model
233
261
  self.is_structured = True
262
+ # Mirror initialization logic from __init__
263
+ forced_json_preferred = (self.vendor == "openai") or (
264
+ self.model in reasoning_models if self.model else False
265
+ )
266
+ structured_output_mode = "forced_json" if forced_json_preferred else "stringified_json"
267
+ core_client = get_client(
268
+ self.model,
269
+ with_formatting=(structured_output_mode == "forced_json"),
270
+ provider=self.vendor,
271
+ )
272
+ formatting_client = get_client(
273
+ self.model,
274
+ with_formatting=True,
275
+ provider=self.vendor if self.vendor != "custom_endpoint" else None,
276
+ )
234
277
  self.structured_output_handler = StructuredOutputHandler(
235
- response_format=self.response_format, vendor_wrapper=self.get_vendor_wrapper()
278
+ core_client,
279
+ formatting_client,
280
+ structured_output_mode,
281
+ {"max_retries": 2},
236
282
  )
237
283
 
238
284
  # Set turn number if provided
@@ -255,83 +301,94 @@ class LM:
255
301
  )
256
302
  messages_to_use = build_messages(system_message, user_message, images_bytes, self.model)
257
303
 
258
- # Get vendor wrapper
259
- vendor_wrapper = self.get_vendor_wrapper()
260
-
261
- # Determine API type to use
262
- use_responses = self._should_use_responses_api()
263
- use_harmony = self._should_use_harmony()
264
-
265
- # Decide response ID to use for thread management
266
- response_id_to_use = None
267
- if previous_response_id:
268
- response_id_to_use = previous_response_id # Manual override
269
- elif self.auto_store_responses and self._last_response_id:
270
- response_id_to_use = self._last_response_id # Auto-chain
271
-
272
- # Prepare parameters based on vendor type
273
- if hasattr(vendor_wrapper, "_hit_api_async"):
274
- # OpenAIStandard expects lm_config
275
- lm_config = {"temperature": self.temperature, **self.additional_params, **kwargs}
276
- if self.json_mode:
277
- lm_config["response_format"] = {"type": "json_object"}
278
-
279
- params = {"model": self.model, "messages": messages_to_use, "lm_config": lm_config}
304
+ # If using structured outputs, route through the handler
305
+ if self.structured_output_handler and self.response_format:
280
306
  if tools:
281
- params["tools"] = tools
307
+ raise ValueError("Tools are not supported with structured output mode")
308
+ response = await self.structured_output_handler.call_async(
309
+ messages=messages_to_use,
310
+ model=self.model,
311
+ response_model=self.response_format,
312
+ use_ephemeral_cache_only=False,
313
+ lm_config={"temperature": self.temperature, **self.additional_params, **kwargs},
314
+ reasoning_effort="high",
315
+ )
282
316
  else:
283
- # Other vendors use flat params
284
- params = {
285
- "model": self.model,
286
- "messages": messages_to_use,
287
- "temperature": self.temperature,
288
- **self.additional_params,
289
- **kwargs,
290
- }
317
+ # Get vendor wrapper
318
+ vendor_wrapper = self.get_vendor_wrapper()
319
+
320
+ # Determine API type to use
321
+ use_responses = self._should_use_responses_api()
322
+ use_harmony = self._should_use_harmony()
323
+
324
+ # Decide response ID to use for thread management
325
+ response_id_to_use = None
326
+ if previous_response_id:
327
+ response_id_to_use = previous_response_id # Manual override
328
+ elif self.auto_store_responses and self._last_response_id:
329
+ response_id_to_use = self._last_response_id # Auto-chain
330
+
331
+ # Prepare parameters based on vendor type
332
+ if hasattr(vendor_wrapper, "_hit_api_async"):
333
+ # OpenAIStandard expects lm_config
334
+ lm_config = {"temperature": self.temperature, **self.additional_params, **kwargs}
335
+ if self.json_mode:
336
+ lm_config["response_format"] = {"type": "json_object"}
337
+
338
+ params = {"model": self.model, "messages": messages_to_use, "lm_config": lm_config}
339
+ if tools:
340
+ params["tools"] = tools
341
+ else:
342
+ # Other vendors use flat params
343
+ params = {
344
+ "model": self.model,
345
+ "messages": messages_to_use,
346
+ "temperature": self.temperature,
347
+ **self.additional_params,
348
+ **kwargs,
349
+ }
291
350
 
292
- if tools:
293
- params["tools"] = [tool.to_dict() for tool in tools]
351
+ if tools:
352
+ params["tools"] = [tool.to_dict() for tool in tools]
294
353
 
295
- if self.json_mode:
296
- params["response_format"] = {"type": "json_object"}
354
+ if self.json_mode:
355
+ params["response_format"] = {"type": "json_object"}
297
356
 
298
- # Call vendor with appropriate API type
299
- try:
300
- # Route to appropriate API
301
- if use_harmony and hasattr(vendor_wrapper, "_hit_api_async_harmony"):
302
- params["previous_response_id"] = response_id_to_use
303
- response = await vendor_wrapper._hit_api_async_harmony(**params)
304
- elif use_responses and hasattr(vendor_wrapper, "_hit_api_async_responses"):
305
- params["previous_response_id"] = response_id_to_use
306
- response = await vendor_wrapper._hit_api_async_responses(**params)
307
- else:
308
- # Standard chat completions API
309
- if hasattr(vendor_wrapper, "_hit_api_async"):
310
- response = await vendor_wrapper._hit_api_async(**params)
311
- elif hasattr(vendor_wrapper, "respond_async"):
312
- response = await vendor_wrapper.respond_async(**params)
313
- elif hasattr(vendor_wrapper, "respond"):
314
- # Fallback to sync in executor
315
- loop = asyncio.get_event_loop()
316
- response = await loop.run_in_executor(None, vendor_wrapper.respond, params)
357
+ # Call vendor with appropriate API type
358
+ try:
359
+ # Route to appropriate API
360
+ if use_harmony and hasattr(vendor_wrapper, "_hit_api_async_harmony"):
361
+ params["previous_response_id"] = response_id_to_use
362
+ response = await vendor_wrapper._hit_api_async_harmony(**params)
363
+ elif use_responses and hasattr(vendor_wrapper, "_hit_api_async_responses"):
364
+ params["previous_response_id"] = response_id_to_use
365
+ response = await vendor_wrapper._hit_api_async_responses(**params)
317
366
  else:
318
- raise AttributeError(
319
- f"Vendor wrapper {type(vendor_wrapper).__name__} has no suitable response method"
320
- )
321
- if not hasattr(response, 'api_type'):
322
- response.api_type = "chat"
323
-
324
- # Update stored response ID if auto-storing
325
- if self.auto_store_responses and hasattr(response, 'response_id') and response.response_id:
326
- self._last_response_id = response.response_id
327
-
328
- except Exception as e:
329
- print(f"Error calling vendor: {e}")
330
- raise
331
-
332
- # Handle structured output
333
- if self.structured_output_handler:
334
- response = self.structured_output_handler.process_response(response)
367
+ # Standard chat completions API
368
+ if hasattr(vendor_wrapper, "_hit_api_async"):
369
+ response = await vendor_wrapper._hit_api_async(**params)
370
+ elif hasattr(vendor_wrapper, "respond_async"):
371
+ response = await vendor_wrapper.respond_async(**params)
372
+ elif hasattr(vendor_wrapper, "respond"):
373
+ # Fallback to sync in executor
374
+ loop = asyncio.get_event_loop()
375
+ response = await loop.run_in_executor(None, vendor_wrapper.respond, params)
376
+ else:
377
+ raise AttributeError(
378
+ f"Vendor wrapper {type(vendor_wrapper).__name__} has no suitable response method"
379
+ )
380
+ if not hasattr(response, 'api_type'):
381
+ response.api_type = "chat"
382
+
383
+ # Update stored response ID if auto-storing
384
+ if self.auto_store_responses and hasattr(response, 'response_id') and response.response_id:
385
+ self._last_response_id = response.response_id
386
+
387
+ except Exception as e:
388
+ print(f"Error calling vendor: {e}")
389
+ raise
390
+
391
+ # No additional post-processing needed for structured outputs here
335
392
 
336
393
  # Record tracing event if enabled
337
394
  if (
@@ -340,36 +397,40 @@ class LM:
340
397
  and hasattr(self.session_tracer, "current_session")
341
398
  ):
342
399
  latency_ms = int((time.time() - start_time) * 1000)
400
+
401
+ # Create LLMCallRecord from the response
402
+ from datetime import datetime
403
+ started_at = datetime.utcnow()
404
+ completed_at = datetime.utcnow()
405
+
406
+ call_record = create_llm_call_record_from_response(
407
+ response=response,
408
+ model_name=self.model or self.vendor,
409
+ provider=self.vendor,
410
+ messages=messages_to_use,
411
+ temperature=self.temperature,
412
+ request_params={**self.additional_params, **kwargs},
413
+ tools=tools,
414
+ started_at=started_at,
415
+ completed_at=completed_at,
416
+ latency_ms=latency_ms,
417
+ )
418
+
419
+ # Compute aggregates from the call record
420
+ aggregates = compute_aggregates_from_call_records([call_record])
343
421
 
344
- # Extract usage info if available
345
- usage_info = {}
346
- if hasattr(response, "usage") and response.usage:
347
- usage_info = {
348
- "input_tokens": response.usage.get("input_tokens", 0),
349
- "output_tokens": response.usage.get("output_tokens", 0),
350
- "total_tokens": response.usage.get("total_tokens", 0),
351
- "cost_usd": response.usage.get("cost_usd", 0.0),
352
- }
353
- else:
354
- # Default values when usage is not available
355
- usage_info = {
356
- "input_tokens": 0,
357
- "output_tokens": 0,
358
- "total_tokens": 0,
359
- "cost_usd": 0.0,
360
- }
361
-
362
- # Create LM event
422
+ # Create LM event with call_records
363
423
  lm_event = LMCAISEvent(
364
424
  system_instance_id=self.system_id,
365
425
  time_record=TimeRecord(event_time=time.time(), message_time=turn_number),
366
- model_name=self.model or self.vendor,
367
- provider=self.vendor,
368
- input_tokens=usage_info["input_tokens"],
369
- output_tokens=usage_info["output_tokens"],
370
- total_tokens=usage_info["total_tokens"],
371
- cost_usd=usage_info["cost_usd"],
372
- latency_ms=latency_ms,
426
+ # Aggregates at event level
427
+ input_tokens=aggregates["input_tokens"],
428
+ output_tokens=aggregates["output_tokens"],
429
+ total_tokens=aggregates["total_tokens"],
430
+ cost_usd=aggregates["cost_usd"],
431
+ latency_ms=aggregates["latency_ms"],
432
+ # Store the call record
433
+ call_records=[call_record],
373
434
  metadata={
374
435
  "temperature": self.temperature,
375
436
  "json_mode": self.json_mode,
@@ -417,15 +478,15 @@ class LM:
417
478
 
418
479
  def respond(
419
480
  self,
420
- system_message: Optional[str] = None,
421
- user_message: Optional[str] = None,
422
- messages: Optional[List[Dict]] = None, # v2 compatibility
423
- images_bytes: List[bytes] = [],
424
- images_as_bytes: Optional[List[bytes]] = None, # v2 compatibility
425
- response_model: Optional[BaseModel] = None, # v2 compatibility
426
- tools: Optional[List[BaseTool]] = None,
427
- previous_response_id: Optional[str] = None, # Responses API thread management
428
- turn_number: Optional[int] = None,
481
+ system_message: str | None = None,
482
+ user_message: str | None = None,
483
+ messages: list[dict] | None = None, # v2 compatibility
484
+ images_bytes: list[bytes] | None = None,
485
+ images_as_bytes: list[bytes] | None = None, # v2 compatibility
486
+ response_model: type[BaseModel] | None = None, # v2 compatibility
487
+ tools: list[BaseTool] | None = None,
488
+ previous_response_id: str | None = None, # Responses API thread management
489
+ turn_number: int | None = None,
429
490
  **kwargs,
430
491
  ) -> BaseLMResponse:
431
492
  """Synchronous wrapper for respond_async."""
@@ -77,7 +77,7 @@ from .abstractions import (
77
77
  BaseEvent,
78
78
  RuntimeEvent,
79
79
  EnvironmentEvent,
80
- SessionEventMessage,
80
+ SessionEventMarkovBlanketMessage,
81
81
  TimeRecord,
82
82
  )
83
83
  from .config import TursoConfig
@@ -89,7 +89,7 @@ __all__ = [
89
89
  "BaseEvent",
90
90
  "RuntimeEvent",
91
91
  "EnvironmentEvent",
92
- "SessionEventMessage",
92
+ "SessionEventMarkovBlanketMessage",
93
93
  "TimeRecord",
94
94
  "TursoConfig",
95
95
  ]