symbolicai 1.5.0__py3-none-any.whl → 1.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. symai/__init__.py +21 -71
  2. symai/backend/base.py +0 -26
  3. symai/backend/engines/drawing/engine_gemini_image.py +101 -0
  4. symai/backend/engines/embedding/engine_openai.py +11 -8
  5. symai/backend/engines/neurosymbolic/__init__.py +8 -0
  6. symai/backend/engines/neurosymbolic/engine_anthropic_claudeX_chat.py +1 -0
  7. symai/backend/engines/neurosymbolic/engine_anthropic_claudeX_reasoning.py +48 -1
  8. symai/backend/engines/neurosymbolic/engine_cerebras.py +1 -0
  9. symai/backend/engines/neurosymbolic/engine_google_geminiX_reasoning.py +14 -1
  10. symai/backend/engines/neurosymbolic/engine_openrouter.py +294 -0
  11. symai/backend/mixin/__init__.py +4 -0
  12. symai/backend/mixin/anthropic.py +37 -16
  13. symai/backend/mixin/openrouter.py +2 -0
  14. symai/components.py +203 -13
  15. symai/extended/interfaces/nanobanana.py +23 -0
  16. symai/interfaces.py +2 -0
  17. symai/ops/primitives.py +0 -18
  18. symai/shellsv.py +2 -7
  19. symai/strategy.py +44 -4
  20. {symbolicai-1.5.0.dist-info → symbolicai-1.7.0.dist-info}/METADATA +3 -10
  21. {symbolicai-1.5.0.dist-info → symbolicai-1.7.0.dist-info}/RECORD +25 -48
  22. {symbolicai-1.5.0.dist-info → symbolicai-1.7.0.dist-info}/WHEEL +1 -1
  23. symai/backend/driver/webclient.py +0 -217
  24. symai/backend/engines/crawler/engine_selenium.py +0 -94
  25. symai/backend/engines/drawing/engine_dall_e.py +0 -131
  26. symai/backend/engines/embedding/engine_plugin_embeddings.py +0 -12
  27. symai/backend/engines/experiments/engine_bard_wrapper.py +0 -131
  28. symai/backend/engines/experiments/engine_gptfinetuner.py +0 -32
  29. symai/backend/engines/experiments/engine_llamacpp_completion.py +0 -142
  30. symai/backend/engines/neurosymbolic/engine_openai_gptX_completion.py +0 -277
  31. symai/collect/__init__.py +0 -8
  32. symai/collect/dynamic.py +0 -117
  33. symai/collect/pipeline.py +0 -156
  34. symai/collect/stats.py +0 -434
  35. symai/extended/crawler.py +0 -21
  36. symai/extended/interfaces/selenium.py +0 -18
  37. symai/extended/interfaces/vectordb.py +0 -21
  38. symai/extended/personas/__init__.py +0 -3
  39. symai/extended/personas/builder.py +0 -105
  40. symai/extended/personas/dialogue.py +0 -126
  41. symai/extended/personas/persona.py +0 -154
  42. symai/extended/personas/research/__init__.py +0 -1
  43. symai/extended/personas/research/yann_lecun.py +0 -62
  44. symai/extended/personas/sales/__init__.py +0 -1
  45. symai/extended/personas/sales/erik_james.py +0 -62
  46. symai/extended/personas/student/__init__.py +0 -1
  47. symai/extended/personas/student/max_tenner.py +0 -51
  48. symai/extended/strategies/__init__.py +0 -1
  49. symai/extended/strategies/cot.py +0 -40
  50. {symbolicai-1.5.0.dist-info → symbolicai-1.7.0.dist-info}/entry_points.txt +0 -0
  51. {symbolicai-1.5.0.dist-info → symbolicai-1.7.0.dist-info}/licenses/LICENSE +0 -0
  52. {symbolicai-1.5.0.dist-info → symbolicai-1.7.0.dist-info}/top_level.txt +0 -0
symai/__init__.py CHANGED
@@ -11,8 +11,6 @@ from rich.table import Table
11
11
  from rich.tree import Tree
12
12
 
13
13
  from .backend import settings
14
- from .menu.screen import show_intro_menu
15
- from .misc.console import ConsoleStyle
16
14
  from .utils import UserMessage
17
15
 
18
16
  # do not remove - hides the libraries' debug messages
@@ -33,7 +31,7 @@ os.environ["TOKENIZERS_PARALLELISM"] = "false"
33
31
  # Create singleton instance
34
32
  config_manager = settings.SymAIConfig()
35
33
 
36
- SYMAI_VERSION = "1.5.0"
34
+ SYMAI_VERSION = "1.7.0"
37
35
  __version__ = SYMAI_VERSION
38
36
  __root_dir__ = config_manager.config_dir
39
37
 
@@ -87,20 +85,6 @@ def _start_symai():
87
85
  # Load and manage configurations
88
86
  symai_config = config_manager.load_config("symai.config.json")
89
87
 
90
- # MIGRATE THE ENVIRONMENT VARIABLES
91
- # *==========================================================================================================*
92
- if "COLLECTION_URI" not in symai_config:
93
- updates = {
94
- "COLLECTION_URI": "mongodb+srv://User:vt3epocXitd6WlQ6@extensityai.c1ajxxy.mongodb.net/?retryWrites=true&w=majority",
95
- "COLLECTION_DB": "ExtensityAI",
96
- "COLLECTION_STORAGE": "SymbolicAI",
97
- "SUPPORT_COMMUNITY": False,
98
- }
99
- config_manager.migrate_config("symai.config.json", updates)
100
- with ConsoleStyle("info") as console:
101
- msg = "Currently you are sharing your user experience with us by uploading the data to our research server, and thereby helping us improve future models and the overall SymbolicAI experience. We thank you very much for supporting the research community! If you wish to disable the data collection option go to your .symai config situated in your home directory or set the environment variable `SUPPORT_COMMUNITY` to `False`."
102
- console.print(msg)
103
-
104
88
  # POST-MIGRATION CHECKS
105
89
  # *==============================================================================================================*
106
90
  if "TEXT_TO_SPEECH_ENGINE_API_KEY" not in symai_config:
@@ -114,11 +98,6 @@ def _start_symai():
114
98
  symsh_config = config_manager.load_config("symsh.config.json")
115
99
  symserver_config = config_manager.load_config("symserver.config.json")
116
100
 
117
- # MIGRATE THE SHELL SPLASH SCREEN CONFIGURATION
118
- # *==============================================================================================================*
119
- if "show-splash-screen" not in symsh_config:
120
- config_manager.migrate_config("symsh.config.json", {"show-splash-screen": True})
121
-
122
101
  # CHECK IF THE USER HAS A NEUROSYMBOLIC API KEY
123
102
  # *==============================================================================================================*
124
103
  if not (
@@ -130,7 +109,6 @@ def _start_symai():
130
109
  ):
131
110
  # Try to fallback to the global (home) config if environment is not home
132
111
  if config_manager.config_dir != config_manager._home_config_dir:
133
- show_intro_menu()
134
112
  UserMessage(
135
113
  f"You didn't configure your environment ({config_manager.config_dir})! Falling back to the global ({config_manager._home_config_dir}) configuration if it exists."
136
114
  )
@@ -345,57 +323,29 @@ def display_config():
345
323
 
346
324
 
347
325
  def setup_wizard(_symai_config_path_):
348
- show_intro_menu()
349
-
350
- _nesy_engine_api_key = ""
351
- _nesy_engine_model = ""
352
- _symbolic_engine_api_key = ""
353
- _symbolic_engine_model = ""
354
- _embedding_engine_api_key = ""
355
- _embedding_model = ""
356
- _drawing_engine_api_key = ""
357
- _drawing_engine_model = ""
358
- _vision_engine_model = ""
359
- _search_engine_api_key = ""
360
- _search_engine_model = ""
361
- _ocr_engine_api_key = ""
362
- _speech_to_text_engine_model = ""
363
- _speech_to_text_api_key = ""
364
- _text_to_speech_engine_api_key = ""
365
- _text_to_speech_engine_model = ""
366
- _text_to_speech_engine_voice = ""
367
- _indexing_engine_api_key = ""
368
- _indexing_engine_environment = ""
369
- _caption_engine_environment = ""
370
- _support_comminity = False
371
-
372
326
  config_manager.save_config(
373
327
  _symai_config_path_,
374
328
  {
375
- "NEUROSYMBOLIC_ENGINE_API_KEY": _nesy_engine_api_key,
376
- "NEUROSYMBOLIC_ENGINE_MODEL": _nesy_engine_model,
377
- "SYMBOLIC_ENGINE_API_KEY": _symbolic_engine_api_key,
378
- "SYMBOLIC_ENGINE": _symbolic_engine_model,
379
- "EMBEDDING_ENGINE_API_KEY": _embedding_engine_api_key,
380
- "EMBEDDING_ENGINE_MODEL": _embedding_model,
381
- "DRAWING_ENGINE_API_KEY": _drawing_engine_api_key,
382
- "DRAWING_ENGINE_MODEL": _drawing_engine_model,
383
- "VISION_ENGINE_MODEL": _vision_engine_model,
384
- "SEARCH_ENGINE_API_KEY": _search_engine_api_key,
385
- "SEARCH_ENGINE_MODEL": _search_engine_model,
386
- "OCR_ENGINE_API_KEY": _ocr_engine_api_key,
387
- "SPEECH_TO_TEXT_ENGINE_MODEL": _speech_to_text_engine_model,
388
- "SPEECH_TO_TEXT_API_KEY": _speech_to_text_api_key,
389
- "TEXT_TO_SPEECH_ENGINE_API_KEY": _text_to_speech_engine_api_key,
390
- "TEXT_TO_SPEECH_ENGINE_MODEL": _text_to_speech_engine_model,
391
- "TEXT_TO_SPEECH_ENGINE_VOICE": _text_to_speech_engine_voice,
392
- "INDEXING_ENGINE_API_KEY": _indexing_engine_api_key,
393
- "INDEXING_ENGINE_ENVIRONMENT": _indexing_engine_environment,
394
- "CAPTION_ENGINE_MODEL": _caption_engine_environment,
395
- "COLLECTION_URI": "mongodb+srv://User:vt3epocXitd6WlQ6@extensityai.c1ajxxy.mongodb.net/?retryWrites=true&w=majority",
396
- "COLLECTION_DB": "ExtensityAI",
397
- "COLLECTION_STORAGE": "SymbolicAI",
398
- "SUPPORT_COMMUNITY": _support_comminity,
329
+ "NEUROSYMBOLIC_ENGINE_API_KEY": "",
330
+ "NEUROSYMBOLIC_ENGINE_MODEL": "",
331
+ "SYMBOLIC_ENGINE_API_KEY": "",
332
+ "SYMBOLIC_ENGINE": "",
333
+ "EMBEDDING_ENGINE_API_KEY": "",
334
+ "EMBEDDING_ENGINE_MODEL": "",
335
+ "DRAWING_ENGINE_API_KEY": "",
336
+ "DRAWING_ENGINE_MODEL": "",
337
+ "VISION_ENGINE_MODEL": "",
338
+ "SEARCH_ENGINE_API_KEY": "",
339
+ "SEARCH_ENGINE_MODEL": "",
340
+ "OCR_ENGINE_API_KEY": "",
341
+ "SPEECH_TO_TEXT_ENGINE_MODEL": "",
342
+ "SPEECH_TO_TEXT_API_KEY": "",
343
+ "TEXT_TO_SPEECH_ENGINE_API_KEY": "",
344
+ "TEXT_TO_SPEECH_ENGINE_MODEL": "",
345
+ "TEXT_TO_SPEECH_ENGINE_VOICE": "",
346
+ "INDEXING_ENGINE_API_KEY": "",
347
+ "INDEXING_ENGINE_ENVIRONMENT": "",
348
+ "CAPTION_ENGINE_MODEL": "",
399
349
  },
400
350
  )
401
351
 
symai/backend/base.py CHANGED
@@ -3,21 +3,11 @@ import time
3
3
  from abc import ABC, abstractmethod
4
4
  from typing import Any
5
5
 
6
- from ..collect import CollectionRepository, rec_serialize
7
6
  from ..utils import UserMessage
8
7
  from .settings import HOME_PATH
9
8
 
10
9
  ENGINE_UNREGISTERED = "<UNREGISTERED/>"
11
10
 
12
- COLLECTION_LOGGING_ENGINES = {
13
- "GPTXChatEngine",
14
- "GPTXCompletionEngine",
15
- "SerpApiEngine",
16
- "WolframAlphaEngine",
17
- "SeleniumEngine",
18
- "OCREngine",
19
- }
20
-
21
11
 
22
12
  class Engine(ABC):
23
13
  def __init__(self) -> None:
@@ -26,8 +16,6 @@ class Engine(ABC):
26
16
  self.logging = False
27
17
  self.log_level = logging.DEBUG
28
18
  self.time_clock = False
29
- self.collection = CollectionRepository()
30
- self.collection.connect()
31
19
  # create formatter
32
20
  __root_dir__ = HOME_PATH
33
21
  __root_dir__.mkdir(parents=True, exist_ok=True)
@@ -66,9 +54,6 @@ class Engine(ABC):
66
54
  if self.logging:
67
55
  self.logger.log(self.log_level, log)
68
56
 
69
- if str(self) in COLLECTION_LOGGING_ENGINES:
70
- self._record_collection_entry(argument, metadata, req_time)
71
-
72
57
  self._trigger_output_handlers(argument, res, metadata)
73
58
  return res, metadata
74
59
 
@@ -92,17 +77,6 @@ class Engine(ABC):
92
77
  if argument_handler:
93
78
  argument_handler((result, metadata))
94
79
 
95
- def _record_collection_entry(self, argument: Any, metadata: dict, req_time: float) -> None:
96
- self.collection.add(
97
- forward={"args": rec_serialize(argument.args), "kwds": rec_serialize(argument.kwargs)},
98
- engine=str(self),
99
- metadata={
100
- "time": req_time,
101
- "data": rec_serialize(metadata),
102
- "argument": rec_serialize(argument),
103
- },
104
- )
105
-
106
80
  def id(self) -> str:
107
81
  return ENGINE_UNREGISTERED
108
82
 
@@ -0,0 +1,101 @@
1
+ import base64
2
+ import logging
3
+ import mimetypes
4
+ import tempfile
5
+ from pathlib import Path
6
+
7
+ from google import genai
8
+ from google.genai import types
9
+
10
+ from ....symbol import Result
11
+ from ....utils import UserMessage
12
+ from ...base import Engine
13
+ from ...settings import SYMAI_CONFIG
14
+
15
+ logging.getLogger("google.genai").setLevel(logging.ERROR)
16
+ logging.getLogger("google_genai").propagate = False
17
+
18
+
19
+ class GeminiImageResult(Result):
20
+ def __init__(self, value, **kwargs):
21
+ super().__init__(value, **kwargs)
22
+ paths = []
23
+ for candidate in getattr(value, "candidates", []) or []:
24
+ content = getattr(candidate, "content", None)
25
+ parts = getattr(content, "parts", []) if content else []
26
+ for part in parts:
27
+ inline_data = getattr(part, "inline_data", None)
28
+ if inline_data is None:
29
+ continue
30
+ mime_type = getattr(inline_data, "mime_type", None) or "image/png"
31
+ data = getattr(inline_data, "data", None)
32
+ if data is None:
33
+ continue
34
+ if isinstance(data, str):
35
+ data = base64.b64decode(data)
36
+ suffix = mimetypes.guess_extension(mime_type) or ".png"
37
+ with tempfile.NamedTemporaryFile(suffix=suffix, delete=False) as tmp_file:
38
+ path = tmp_file.name
39
+ with Path(path).open("wb") as f:
40
+ f.write(data)
41
+ paths.append(path)
42
+ if not paths:
43
+ UserMessage("Gemini image generation returned no images.", raise_with=ValueError)
44
+ self._value = paths
45
+
46
+
47
+ class GeminiImageEngine(Engine):
48
+ def __init__(self, api_key: str | None = None, model: str | None = None):
49
+ super().__init__()
50
+ self.config = SYMAI_CONFIG
51
+ self.api_key = self.config.get("DRAWING_ENGINE_API_KEY") if api_key is None else api_key
52
+ self.model = self.config.get("DRAWING_ENGINE_MODEL") if model is None else model
53
+ self.name = self.__class__.__name__
54
+ self.client = genai.Client(api_key=self.api_key)
55
+
56
+ def id(self) -> str:
57
+ cfg_model = self.config.get("DRAWING_ENGINE_MODEL")
58
+ if cfg_model and cfg_model.startswith(("gemini-2.5-flash-image", "gemini-3-pro-image-preview")):
59
+ return "drawing"
60
+ return super().id()
61
+
62
+ def command(self, *args, **kwargs):
63
+ super().command(*args, **kwargs)
64
+ if "DRAWING_ENGINE_API_KEY" in kwargs:
65
+ self.api_key = kwargs["DRAWING_ENGINE_API_KEY"]
66
+ self.client = genai.Client(api_key=self.api_key)
67
+ if "DRAWING_ENGINE_MODEL" in kwargs:
68
+ self.model = kwargs["DRAWING_ENGINE_MODEL"]
69
+
70
+ def prepare(self, argument):
71
+ argument.prop.prepared_input = str(argument.prop.processed_input)
72
+
73
+ def forward(self, argument):
74
+ prompt = argument.prop.prepared_input
75
+ kwargs = argument.kwargs
76
+ model = kwargs.get("model", self.model)
77
+ operation = kwargs.get("operation")
78
+
79
+ if operation != "create":
80
+ UserMessage(f"Unknown operation: {operation}", raise_with=ValueError)
81
+
82
+ response_modalities = kwargs.get("response_modalities", ["IMAGE"])
83
+ config = kwargs.get("config")
84
+ if config is None:
85
+ config = types.GenerateContentConfig(response_modalities=response_modalities)
86
+
87
+ except_remedy = kwargs.get("except_remedy", None)
88
+ try:
89
+ res = self.client.models.generate_content(
90
+ model=model,
91
+ contents=prompt,
92
+ config=config,
93
+ )
94
+ except Exception as e:
95
+ if except_remedy is None:
96
+ raise
97
+ res = except_remedy(self, e, None, argument)
98
+
99
+ metadata = {}
100
+ result = GeminiImageResult(res)
101
+ return [result], metadata
@@ -20,25 +20,28 @@ class EmbeddingEngine(Engine, OpenAIMixin):
20
20
  logger = logging.getLogger("openai")
21
21
  logger.setLevel(logging.WARNING)
22
22
  self.config = SYMAI_CONFIG
23
+ self._api_key = api_key or self.config.get("EMBEDDING_ENGINE_API_KEY")
24
+ self._model = model or self.config.get("EMBEDDING_ENGINE_MODEL")
23
25
  if self.id() != "embedding":
24
26
  return # do not initialize if not embedding; avoids conflict with llama.cpp check in EngineRepository.register_from_package
25
- openai.api_key = self.config["EMBEDDING_ENGINE_API_KEY"] if api_key is None else api_key
26
- self.model = self.config["EMBEDDING_ENGINE_MODEL"] if model is None else model
27
+ # Use openai client instance (required for openai 1.0+)
28
+ self._client = openai.OpenAI(api_key=self._api_key)
29
+ self.model = self._model
27
30
  self.max_tokens = self.api_max_context_tokens()
28
31
  self.embedding_dim = self.api_embedding_dims()
29
32
  self.name = self.__class__.__name__
30
33
 
31
34
  def id(self) -> str:
32
- if self.config.get("EMBEDDING_ENGINE_API_KEY") and self.config[
33
- "EMBEDDING_ENGINE_MODEL"
34
- ].startswith("text-embedding"):
35
+ # Check stored params (from constructor or config)
36
+ if self._api_key and self._model and self._model.startswith("text-embedding"):
35
37
  return "embedding"
36
38
  return super().id() # default to unregistered
37
39
 
38
40
  def command(self, *args, **kwargs):
39
41
  super().command(*args, **kwargs)
40
42
  if "EMBEDDING_ENGINE_API_KEY" in kwargs:
41
- openai.api_key = kwargs["EMBEDDING_ENGINE_API_KEY"]
43
+ self._api_key = kwargs["EMBEDDING_ENGINE_API_KEY"]
44
+ self._client = openai.OpenAI(api_key=self._api_key)
42
45
  if "EMBEDDING_ENGINE_MODEL" in kwargs:
43
46
  self.model = kwargs["EMBEDDING_ENGINE_MODEL"]
44
47
 
@@ -52,11 +55,11 @@ class EmbeddingEngine(Engine, OpenAIMixin):
52
55
  new_dim = kwargs.get("new_dim")
53
56
 
54
57
  try:
55
- res = openai.embeddings.create(model=self.model, input=inp)
58
+ res = self._client.embeddings.create(model=self.model, input=inp)
56
59
  except Exception as e:
57
60
  if except_remedy is None:
58
61
  raise e
59
- callback = openai.embeddings.create
62
+ callback = self._client.embeddings.create
60
63
  res = except_remedy(e, inp, callback, self, *args, **kwargs)
61
64
 
62
65
  if new_dim:
@@ -12,6 +12,8 @@ from ...mixin import (
12
12
  OPENAI_CHAT_MODELS,
13
13
  OPENAI_REASONING_MODELS,
14
14
  OPENAI_RESPONSES_MODELS,
15
+ OPENROUTER_CHAT_MODELS,
16
+ OPENROUTER_REASONING_MODELS,
15
17
  )
16
18
  from .engine_anthropic_claudeX_chat import ClaudeXChatEngine
17
19
  from .engine_anthropic_claudeX_reasoning import ClaudeXReasoningEngine
@@ -19,6 +21,7 @@ from .engine_cerebras import CerebrasEngine
19
21
  from .engine_deepseekX_reasoning import DeepSeekXReasoningEngine
20
22
  from .engine_google_geminiX_reasoning import GeminiXReasoningEngine
21
23
  from .engine_groq import GroqEngine
24
+ from .engine_openrouter import OpenRouterEngine
22
25
  from .engine_openai_gptX_chat import GPTXChatEngine
23
26
  from .engine_openai_gptX_reasoning import GPTXReasoningEngine
24
27
  from .engine_openai_responses import OpenAIResponsesEngine
@@ -36,6 +39,8 @@ ENGINE_MAPPING = {
36
39
  **dict.fromkeys(OPENAI_RESPONSES_MODELS, OpenAIResponsesEngine),
37
40
  **dict.fromkeys(GROQ_CHAT_MODELS, GroqEngine),
38
41
  **dict.fromkeys(GROQ_REASONING_MODELS, GroqEngine),
42
+ **dict.fromkeys(OPENROUTER_CHAT_MODELS, OpenRouterEngine),
43
+ **dict.fromkeys(OPENROUTER_REASONING_MODELS, OpenRouterEngine),
39
44
  }
40
45
 
41
46
  __all__ = [
@@ -53,6 +58,8 @@ __all__ = [
53
58
  "OPENAI_CHAT_MODELS",
54
59
  "OPENAI_REASONING_MODELS",
55
60
  "OPENAI_RESPONSES_MODELS",
61
+ "OPENROUTER_CHAT_MODELS",
62
+ "OPENROUTER_REASONING_MODELS",
56
63
  "ClaudeXChatEngine",
57
64
  "ClaudeXReasoningEngine",
58
65
  "DeepSeekXReasoningEngine",
@@ -60,5 +67,6 @@ __all__ = [
60
67
  "GPTXReasoningEngine",
61
68
  "GeminiXReasoningEngine",
62
69
  "GroqEngine",
70
+ "OpenRouterEngine",
63
71
  "OpenAIResponsesEngine",
64
72
  ]
@@ -64,6 +64,7 @@ class ClaudeXChatEngine(Engine, AnthropicMixin):
64
64
  and "4-0" not in self.config.get("NEUROSYMBOLIC_ENGINE_MODEL")
65
65
  and "4-1" not in self.config.get("NEUROSYMBOLIC_ENGINE_MODEL")
66
66
  and "4-5" not in self.config.get("NEUROSYMBOLIC_ENGINE_MODEL")
67
+ and "4-6" not in self.config.get("NEUROSYMBOLIC_ENGINE_MODEL")
67
68
  )
68
69
  ):
69
70
  return "neurosymbolic"
@@ -66,6 +66,7 @@ class ClaudeXReasoningEngine(Engine, AnthropicMixin):
66
66
  or "4-0" in self.config.get("NEUROSYMBOLIC_ENGINE_MODEL")
67
67
  or "4-1" in self.config.get("NEUROSYMBOLIC_ENGINE_MODEL")
68
68
  or "4-5" in self.config.get("NEUROSYMBOLIC_ENGINE_MODEL")
69
+ or "4-6" in self.config.get("NEUROSYMBOLIC_ENGINE_MODEL")
69
70
  )
70
71
  ):
71
72
  return "neurosymbolic"
@@ -348,9 +349,48 @@ class ClaudeXReasoningEngine(Engine, AnthropicMixin):
348
349
 
349
350
  return {"role": "user", "content": user_text}
350
351
 
352
+ def _build_output_config(self, response_format):
353
+ if response_format is None:
354
+ return NOT_GIVEN
355
+
356
+ if response_format["type"] == "json_schema":
357
+ schema = response_format.get("schema")
358
+ if schema is None and response_format.get("json_schema") is not None:
359
+ schema = response_format["json_schema"].get("schema", response_format["json_schema"])
360
+ if schema is None:
361
+ return NOT_GIVEN
362
+ return {"format": {"type": "json_schema", "schema": schema}}
363
+
364
+ if response_format["type"] == "json_object":
365
+ schema = response_format.get("schema")
366
+ if schema is None:
367
+ return NOT_GIVEN
368
+ return {"format": {"type": "json_schema", "schema": schema}}
369
+
370
+ return NOT_GIVEN
371
+
351
372
  def _prepare_request_payload(self, argument):
352
373
  kwargs = argument.kwargs
353
374
  model = kwargs.get("model", self.model)
375
+ long_context_1m = bool(kwargs.get("long_context_1m", False))
376
+ use_long_context_1m = long_context_1m and self.supports_long_context_1m(model)
377
+ effective_context_tokens = self.api_max_context_tokens(model=model)
378
+ if effective_context_tokens is None:
379
+ effective_context_tokens = 200_000
380
+ if use_long_context_1m:
381
+ effective_context_tokens = self.api_max_context_tokens(
382
+ long_context_1m=True, model=model
383
+ )
384
+ if long_context_1m and not use_long_context_1m:
385
+ UserMessage(
386
+ "long_context_1m is only supported for claude-opus-4-6 and claude-sonnet-4-5; "
387
+ f"falling back to {effective_context_tokens} token context."
388
+ )
389
+
390
+ extra_headers = None
391
+ if use_long_context_1m:
392
+ extra_headers = {"anthropic-beta": self.long_context_beta_header()}
393
+
354
394
  stop = kwargs.get("stop", NOT_GIVEN)
355
395
  temperature = kwargs.get("temperature", 1)
356
396
  thinking_arg = kwargs.get("thinking", NOT_GIVEN)
@@ -368,6 +408,8 @@ class ClaudeXReasoningEngine(Engine, AnthropicMixin):
368
408
  tool_choice = kwargs.get("tool_choice", NOT_GIVEN)
369
409
  metadata_anthropic = kwargs.get("metadata", NOT_GIVEN)
370
410
  max_tokens = kwargs.get("max_tokens", self.max_response_tokens)
411
+ response_format = kwargs.get("response_format", argument.prop.response_format)
412
+ output_config = self._build_output_config(response_format)
371
413
 
372
414
  if stop != NOT_GIVEN and not isinstance(stop, list):
373
415
  stop = [stop]
@@ -377,7 +419,7 @@ class ClaudeXReasoningEngine(Engine, AnthropicMixin):
377
419
  if stop != NOT_GIVEN:
378
420
  stop = [r"{s}" for s in stop]
379
421
 
380
- return {
422
+ payload = {
381
423
  "model": model,
382
424
  "max_tokens": max_tokens,
383
425
  "stop_sequences": stop,
@@ -389,7 +431,12 @@ class ClaudeXReasoningEngine(Engine, AnthropicMixin):
389
431
  "metadata": metadata_anthropic,
390
432
  "tools": tools,
391
433
  "tool_choice": tool_choice,
434
+ "output_config": output_config,
392
435
  }
436
+ if extra_headers is not None:
437
+ payload["extra_headers"] = extra_headers
438
+
439
+ return payload
393
440
 
394
441
  def _collect_response(self, res):
395
442
  if isinstance(res, list):
@@ -16,6 +16,7 @@ logging.getLogger("requests").setLevel(logging.ERROR)
16
16
  logging.getLogger("urllib").setLevel(logging.ERROR)
17
17
  logging.getLogger("httpx").setLevel(logging.ERROR)
18
18
  logging.getLogger("httpcore").setLevel(logging.ERROR)
19
+ logging.getLogger("hpack").setLevel(logging.ERROR)
19
20
 
20
21
 
21
22
  _NON_VERBOSE_OUTPUT = (
@@ -580,12 +580,25 @@ class GeminiXReasoningEngine(Engine, GoogleMixin):
580
580
  def _prepare_request_payload(self, argument):
581
581
  kwargs = argument.kwargs
582
582
 
583
+ stop_sequences = kwargs.get("stop")
584
+ if stop_sequences:
585
+ if isinstance(stop_sequences, str):
586
+ stop_sequences = [stop_sequences]
587
+ elif not isinstance(stop_sequences, list):
588
+ stop_sequences = None
589
+ else:
590
+ stop_sequences = None
591
+ if isinstance(stop_sequences, list):
592
+ stop_sequences = [seq for seq in stop_sequences if seq]
593
+ if not stop_sequences:
594
+ stop_sequences = None
595
+
583
596
  payload = {
584
597
  "max_output_tokens": kwargs.get("max_tokens", self.max_response_tokens),
585
598
  "temperature": kwargs.get("temperature", 1.0),
586
599
  "top_p": kwargs.get("top_p", 0.95),
587
600
  "top_k": kwargs.get("top_k", 40),
588
- "stop_sequences": kwargs.get("stop", None),
601
+ "stop_sequences": stop_sequences,
589
602
  "stream": kwargs.get("stream", False),
590
603
  }
591
604