symbolicai 1.0.0__py3-none-any.whl → 1.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (129) hide show
  1. symai/__init__.py +198 -134
  2. symai/backend/base.py +51 -51
  3. symai/backend/engines/drawing/engine_bfl.py +33 -33
  4. symai/backend/engines/drawing/engine_gpt_image.py +4 -10
  5. symai/backend/engines/embedding/engine_llama_cpp.py +50 -35
  6. symai/backend/engines/embedding/engine_openai.py +22 -16
  7. symai/backend/engines/execute/engine_python.py +16 -16
  8. symai/backend/engines/files/engine_io.py +51 -49
  9. symai/backend/engines/imagecaptioning/engine_blip2.py +27 -23
  10. symai/backend/engines/imagecaptioning/engine_llavacpp_client.py +53 -46
  11. symai/backend/engines/index/engine_pinecone.py +116 -88
  12. symai/backend/engines/index/engine_qdrant.py +1011 -0
  13. symai/backend/engines/index/engine_vectordb.py +78 -52
  14. symai/backend/engines/lean/engine_lean4.py +65 -25
  15. symai/backend/engines/neurosymbolic/__init__.py +35 -28
  16. symai/backend/engines/neurosymbolic/engine_anthropic_claudeX_chat.py +137 -135
  17. symai/backend/engines/neurosymbolic/engine_anthropic_claudeX_reasoning.py +145 -152
  18. symai/backend/engines/neurosymbolic/engine_cerebras.py +328 -0
  19. symai/backend/engines/neurosymbolic/engine_deepseekX_reasoning.py +75 -49
  20. symai/backend/engines/neurosymbolic/engine_google_geminiX_reasoning.py +199 -155
  21. symai/backend/engines/neurosymbolic/engine_groq.py +106 -72
  22. symai/backend/engines/neurosymbolic/engine_huggingface.py +100 -67
  23. symai/backend/engines/neurosymbolic/engine_llama_cpp.py +121 -93
  24. symai/backend/engines/neurosymbolic/engine_openai_gptX_chat.py +213 -132
  25. symai/backend/engines/neurosymbolic/engine_openai_gptX_reasoning.py +180 -137
  26. symai/backend/engines/ocr/engine_apilayer.py +18 -20
  27. symai/backend/engines/output/engine_stdout.py +9 -9
  28. symai/backend/engines/{webscraping → scrape}/engine_requests.py +25 -11
  29. symai/backend/engines/search/engine_openai.py +95 -83
  30. symai/backend/engines/search/engine_parallel.py +665 -0
  31. symai/backend/engines/search/engine_perplexity.py +40 -41
  32. symai/backend/engines/search/engine_serpapi.py +33 -28
  33. symai/backend/engines/speech_to_text/engine_local_whisper.py +37 -27
  34. symai/backend/engines/symbolic/engine_wolframalpha.py +14 -8
  35. symai/backend/engines/text_to_speech/engine_openai.py +15 -19
  36. symai/backend/engines/text_vision/engine_clip.py +34 -28
  37. symai/backend/engines/userinput/engine_console.py +3 -4
  38. symai/backend/mixin/__init__.py +4 -0
  39. symai/backend/mixin/anthropic.py +48 -40
  40. symai/backend/mixin/cerebras.py +9 -0
  41. symai/backend/mixin/deepseek.py +4 -5
  42. symai/backend/mixin/google.py +5 -4
  43. symai/backend/mixin/groq.py +2 -4
  44. symai/backend/mixin/openai.py +132 -110
  45. symai/backend/settings.py +14 -14
  46. symai/chat.py +164 -94
  47. symai/collect/dynamic.py +13 -11
  48. symai/collect/pipeline.py +39 -31
  49. symai/collect/stats.py +109 -69
  50. symai/components.py +578 -238
  51. symai/constraints.py +14 -5
  52. symai/core.py +1495 -1210
  53. symai/core_ext.py +55 -50
  54. symai/endpoints/api.py +113 -58
  55. symai/extended/api_builder.py +22 -17
  56. symai/extended/arxiv_pdf_parser.py +13 -5
  57. symai/extended/bibtex_parser.py +8 -4
  58. symai/extended/conversation.py +88 -69
  59. symai/extended/document.py +40 -27
  60. symai/extended/file_merger.py +45 -7
  61. symai/extended/graph.py +38 -24
  62. symai/extended/html_style_template.py +17 -11
  63. symai/extended/interfaces/blip_2.py +1 -1
  64. symai/extended/interfaces/clip.py +4 -2
  65. symai/extended/interfaces/console.py +5 -3
  66. symai/extended/interfaces/dall_e.py +3 -1
  67. symai/extended/interfaces/file.py +2 -0
  68. symai/extended/interfaces/flux.py +3 -1
  69. symai/extended/interfaces/gpt_image.py +15 -6
  70. symai/extended/interfaces/input.py +2 -1
  71. symai/extended/interfaces/llava.py +1 -1
  72. symai/extended/interfaces/{naive_webscraping.py → naive_scrape.py} +3 -2
  73. symai/extended/interfaces/naive_vectordb.py +2 -2
  74. symai/extended/interfaces/ocr.py +4 -2
  75. symai/extended/interfaces/openai_search.py +2 -0
  76. symai/extended/interfaces/parallel.py +30 -0
  77. symai/extended/interfaces/perplexity.py +2 -0
  78. symai/extended/interfaces/pinecone.py +6 -4
  79. symai/extended/interfaces/python.py +2 -0
  80. symai/extended/interfaces/serpapi.py +2 -0
  81. symai/extended/interfaces/terminal.py +0 -1
  82. symai/extended/interfaces/tts.py +2 -1
  83. symai/extended/interfaces/whisper.py +2 -1
  84. symai/extended/interfaces/wolframalpha.py +1 -0
  85. symai/extended/metrics/__init__.py +1 -1
  86. symai/extended/metrics/similarity.py +5 -2
  87. symai/extended/os_command.py +31 -22
  88. symai/extended/packages/symdev.py +39 -34
  89. symai/extended/packages/sympkg.py +30 -27
  90. symai/extended/packages/symrun.py +46 -35
  91. symai/extended/repo_cloner.py +10 -9
  92. symai/extended/seo_query_optimizer.py +15 -12
  93. symai/extended/solver.py +104 -76
  94. symai/extended/summarizer.py +8 -7
  95. symai/extended/taypan_interpreter.py +10 -9
  96. symai/extended/vectordb.py +28 -15
  97. symai/formatter/formatter.py +39 -31
  98. symai/formatter/regex.py +46 -44
  99. symai/functional.py +184 -86
  100. symai/imports.py +85 -51
  101. symai/interfaces.py +1 -1
  102. symai/memory.py +33 -24
  103. symai/menu/screen.py +28 -19
  104. symai/misc/console.py +27 -27
  105. symai/misc/loader.py +4 -3
  106. symai/models/base.py +147 -76
  107. symai/models/errors.py +1 -1
  108. symai/ops/__init__.py +1 -1
  109. symai/ops/measures.py +17 -14
  110. symai/ops/primitives.py +933 -635
  111. symai/post_processors.py +28 -24
  112. symai/pre_processors.py +58 -52
  113. symai/processor.py +15 -9
  114. symai/prompts.py +714 -649
  115. symai/server/huggingface_server.py +115 -32
  116. symai/server/llama_cpp_server.py +14 -6
  117. symai/server/qdrant_server.py +206 -0
  118. symai/shell.py +98 -39
  119. symai/shellsv.py +307 -223
  120. symai/strategy.py +135 -81
  121. symai/symbol.py +276 -225
  122. symai/utils.py +62 -46
  123. {symbolicai-1.0.0.dist-info → symbolicai-1.1.1.dist-info}/METADATA +19 -9
  124. symbolicai-1.1.1.dist-info/RECORD +169 -0
  125. symbolicai-1.0.0.dist-info/RECORD +0 -163
  126. {symbolicai-1.0.0.dist-info → symbolicai-1.1.1.dist-info}/WHEEL +0 -0
  127. {symbolicai-1.0.0.dist-info → symbolicai-1.1.1.dist-info}/entry_points.txt +0 -0
  128. {symbolicai-1.0.0.dist-info → symbolicai-1.1.1.dist-info}/licenses/LICENSE +0 -0
  129. {symbolicai-1.0.0.dist-info → symbolicai-1.1.1.dist-info}/top_level.txt +0 -0
@@ -30,19 +30,20 @@ class TokenizerWrapper:
30
30
  def encode(self, text: str) -> int:
31
31
  return self.compute_tokens_func([{"role": "user", "content": text}])
32
32
 
33
+
33
34
  class GeminiXReasoningEngine(Engine, GoogleMixin):
34
35
  def __init__(self, api_key: str | None = None, model: str | None = None):
35
36
  super().__init__()
36
37
  self.config = deepcopy(SYMAI_CONFIG)
37
38
  # In case we use EngineRepository.register to inject the api_key and model => dynamically change the engine at runtime
38
39
  if api_key is not None and model is not None:
39
- self.config['NEUROSYMBOLIC_ENGINE_API_KEY'] = api_key
40
- self.config['NEUROSYMBOLIC_ENGINE_MODEL'] = model
41
- if self.id() != 'neurosymbolic':
42
- return # do not initialize if not neurosymbolic; avoids conflict with llama.cpp check in EngineRepository.register_from_package
40
+ self.config["NEUROSYMBOLIC_ENGINE_API_KEY"] = api_key
41
+ self.config["NEUROSYMBOLIC_ENGINE_MODEL"] = model
42
+ if self.id() != "neurosymbolic":
43
+ return # do not initialize if not neurosymbolic; avoids conflict with llama.cpp check in EngineRepository.register_from_package
43
44
 
44
- self.api_key = self.config['NEUROSYMBOLIC_ENGINE_API_KEY']
45
- self.model = self.config['NEUROSYMBOLIC_ENGINE_MODEL']
45
+ self.api_key = self.config["NEUROSYMBOLIC_ENGINE_API_KEY"]
46
+ self.model = self.config["NEUROSYMBOLIC_ENGINE_MODEL"]
46
47
  self.name = self.__class__.__name__
47
48
  self.tokenizer = TokenizerWrapper(self.compute_required_tokens)
48
49
  self.max_context_tokens = self.api_max_context_tokens()
@@ -50,18 +51,18 @@ class GeminiXReasoningEngine(Engine, GoogleMixin):
50
51
  self.client = genai.Client(api_key=self.api_key)
51
52
 
52
53
  def id(self) -> str:
53
- model = self.config.get('NEUROSYMBOLIC_ENGINE_MODEL')
54
- if model and model.startswith('gemini'):
55
- return 'neurosymbolic'
56
- return super().id() # default to unregistered
54
+ model = self.config.get("NEUROSYMBOLIC_ENGINE_MODEL")
55
+ if model and model.startswith("gemini"):
56
+ return "neurosymbolic"
57
+ return super().id() # default to unregistered
57
58
 
58
59
  def command(self, *args, **kwargs):
59
60
  super().command(*args, **kwargs)
60
- if 'NEUROSYMBOLIC_ENGINE_API_KEY' in kwargs:
61
- self.api_key = kwargs['NEUROSYMBOLIC_ENGINE_API_KEY']
61
+ if "NEUROSYMBOLIC_ENGINE_API_KEY" in kwargs:
62
+ self.api_key = kwargs["NEUROSYMBOLIC_ENGINE_API_KEY"]
62
63
  genai.configure(api_key=self.api_key)
63
- if 'NEUROSYMBOLIC_ENGINE_MODEL' in kwargs:
64
- self.model = kwargs['NEUROSYMBOLIC_ENGINE_MODEL']
64
+ if "NEUROSYMBOLIC_ENGINE_MODEL" in kwargs:
65
+ self.model = kwargs["NEUROSYMBOLIC_ENGINE_MODEL"]
65
66
  self.client = genai.GenerativeModel(model_name=self.model)
66
67
 
67
68
  def compute_required_tokens(self, messages) -> int:
@@ -71,11 +72,11 @@ class GeminiXReasoningEngine(Engine, GoogleMixin):
71
72
  msg_parts = msg if isinstance(msg, list) else [msg]
72
73
  for part in msg_parts:
73
74
  if isinstance(part, str):
74
- role = 'user'
75
+ role = "user"
75
76
  content_str = part
76
77
  elif isinstance(part, dict):
77
- role = part.get('role')
78
- content_str = str(part.get('content', ''))
78
+ role = part.get("role")
79
+ content_str = str(part.get("content", ""))
79
80
  current_message_api_parts: list[types.Part] = []
80
81
  image_api_parts = self._handle_image_content(content_str)
81
82
  current_message_api_parts.extend(image_api_parts)
@@ -91,7 +92,9 @@ class GeminiXReasoningEngine(Engine, GoogleMixin):
91
92
  return 0
92
93
 
93
94
  try:
94
- count_response = self.client.models.count_tokens(model=self.model, contents=api_contents)
95
+ count_response = self.client.models.count_tokens(
96
+ model=self.model, contents=api_contents
97
+ )
95
98
  return count_response.total_tokens
96
99
  except Exception as e:
97
100
  UserMessage(f"Gemini count_tokens failed: {e}")
@@ -103,13 +106,13 @@ class GeminiXReasoningEngine(Engine, GoogleMixin):
103
106
  def _handle_document_content(self, content: str):
104
107
  """Handle document content by uploading to Gemini"""
105
108
  try:
106
- pattern = r'<<document:(.*?):>>'
109
+ pattern = r"<<document:(.*?):>>"
107
110
  matches = re.findall(pattern, content)
108
111
  if not matches:
109
112
  return None
110
113
 
111
114
  doc_path = matches[0].strip()
112
- if doc_path.startswith('http'):
115
+ if doc_path.startswith("http"):
113
116
  UserMessage("URL documents not yet supported for Gemini")
114
117
  return None
115
118
  return genai.upload_file(doc_path)
@@ -124,25 +127,28 @@ class GeminiXReasoningEngine(Engine, GoogleMixin):
124
127
  try:
125
128
  image_parts.extend(self._create_parts_from_image_source(img_src))
126
129
  except Exception as e:
127
- UserMessage(f"Failed to process image source '{img_src}'. Error: {e!s}", raise_with=ValueError)
130
+ UserMessage(
131
+ f"Failed to process image source '{img_src}'. Error: {e!s}",
132
+ raise_with=ValueError,
133
+ )
128
134
  return image_parts
129
135
 
130
136
  def _extract_image_sources(self, content: str) -> list[str]:
131
- pattern = r'<<vision:(.*?):>>'
137
+ pattern = r"<<vision:(.*?):>>"
132
138
  return [match.strip() for match in re.findall(pattern, content)]
133
139
 
134
140
  def _create_parts_from_image_source(self, img_src: str) -> list[types.Part]:
135
- if img_src.startswith('data:image'):
141
+ if img_src.startswith("data:image"):
136
142
  return self._create_parts_from_data_uri(img_src)
137
- if img_src.startswith(('http://', 'https://')):
143
+ if img_src.startswith(("http://", "https://")):
138
144
  return self._create_parts_from_url(img_src)
139
- if img_src.startswith('frames:'):
145
+ if img_src.startswith("frames:"):
140
146
  return self._create_parts_from_frames(img_src)
141
147
  return self._create_parts_from_local_path(img_src)
142
148
 
143
149
  def _create_parts_from_data_uri(self, img_src: str) -> list[types.Part]:
144
- header, encoded = img_src.split(',', 1)
145
- mime_type = header.split(';')[0].split(':')[1]
150
+ header, encoded = img_src.split(",", 1)
151
+ mime_type = header.split(";")[0].split(":")[1]
146
152
  image_bytes = base64.b64decode(encoded)
147
153
  part = genai.types.Part(inline_data=genai.types.Blob(mime_type=mime_type, data=image_bytes))
148
154
  return [part]
@@ -151,15 +157,17 @@ class GeminiXReasoningEngine(Engine, GoogleMixin):
151
157
  response = requests.get(img_src, timeout=10)
152
158
  response.raise_for_status()
153
159
  image_bytes = response.content
154
- mime_type = response.headers.get('Content-Type', 'application/octet-stream')
155
- if not mime_type.startswith('image/'):
156
- UserMessage(f"URL content type '{mime_type}' does not appear to be an image for: {img_src}. Attempting to use anyway.")
160
+ mime_type = response.headers.get("Content-Type", "application/octet-stream")
161
+ if not mime_type.startswith("image/"):
162
+ UserMessage(
163
+ f"URL content type '{mime_type}' does not appear to be an image for: {img_src}. Attempting to use anyway."
164
+ )
157
165
  part = genai.types.Part(inline_data=genai.types.Blob(mime_type=mime_type, data=image_bytes))
158
166
  return [part]
159
167
 
160
168
  def _create_parts_from_frames(self, img_src: str) -> list[types.Part]:
161
- temp_path = img_src.replace('frames:', '')
162
- parts = temp_path.split(':', 1)
169
+ temp_path = img_src.replace("frames:", "")
170
+ parts = temp_path.split(":", 1)
163
171
  if len(parts) != 2:
164
172
  UserMessage(f"Invalid 'frames:' format: {img_src}")
165
173
  return []
@@ -170,9 +178,9 @@ class GeminiXReasoningEngine(Engine, GoogleMixin):
170
178
  UserMessage(f"Invalid max_frames number in 'frames:' format: {img_src}")
171
179
  return []
172
180
  frame_buffers, ext = encode_media_frames(actual_path)
173
- mime_type = f'image/{ext.lower()}' if ext else 'application/octet-stream'
174
- if ext and ext.lower() == 'jpg':
175
- mime_type = 'image/jpeg'
181
+ mime_type = f"image/{ext.lower()}" if ext else "application/octet-stream"
182
+ if ext and ext.lower() == "jpg":
183
+ mime_type = "image/jpeg"
176
184
  if not frame_buffers:
177
185
  UserMessage(f"encode_media_frames returned no frames for: {actual_path}")
178
186
  return []
@@ -182,7 +190,11 @@ class GeminiXReasoningEngine(Engine, GoogleMixin):
182
190
  for frame_idx in indices:
183
191
  if frame_idx < len(frame_buffers):
184
192
  image_bytes = frame_buffers[frame_idx]
185
- parts_list.append(genai.types.Part(inline_data=genai.types.Blob(mime_type=mime_type, data=image_bytes)))
193
+ parts_list.append(
194
+ genai.types.Part(
195
+ inline_data=genai.types.Blob(mime_type=mime_type, data=image_bytes)
196
+ )
197
+ )
186
198
  return parts_list
187
199
 
188
200
  def _create_parts_from_local_path(self, img_src: str) -> list[types.Part]:
@@ -193,30 +205,30 @@ class GeminiXReasoningEngine(Engine, GoogleMixin):
193
205
  image_bytes = local_file_path.read_bytes()
194
206
  mime_type, _ = mimetypes.guess_type(local_file_path)
195
207
  if mime_type is None:
196
- file_ext = local_file_path.suffix.lower().lstrip('.')
197
- if file_ext == 'jpg':
198
- mime_type = 'image/jpeg'
199
- elif file_ext == 'png':
200
- mime_type = 'image/png'
201
- elif file_ext == 'gif':
202
- mime_type = 'image/gif'
203
- elif file_ext == 'webp':
204
- mime_type = 'image/webp'
208
+ file_ext = local_file_path.suffix.lower().lstrip(".")
209
+ if file_ext == "jpg":
210
+ mime_type = "image/jpeg"
211
+ elif file_ext == "png":
212
+ mime_type = "image/png"
213
+ elif file_ext == "gif":
214
+ mime_type = "image/gif"
215
+ elif file_ext == "webp":
216
+ mime_type = "image/webp"
205
217
  else:
206
- mime_type = 'application/octet-stream'
218
+ mime_type = "application/octet-stream"
207
219
  part = genai.types.Part(inline_data=genai.types.Blob(mime_type=mime_type, data=image_bytes))
208
220
  return [part]
209
221
 
210
222
  def _handle_video_content(self, content: str):
211
223
  """Handle video content by uploading to Gemini"""
212
224
  try:
213
- pattern = r'<<video:(.*?):>>'
225
+ pattern = r"<<video:(.*?):>>"
214
226
  matches = re.findall(pattern, content)
215
227
  if not matches:
216
228
  return None
217
229
 
218
230
  video_path = matches[0].strip()
219
- if video_path.startswith('http'):
231
+ if video_path.startswith("http"):
220
232
  UserMessage("URL videos not yet supported for Gemini")
221
233
  return None
222
234
  # Upload local video
@@ -228,13 +240,13 @@ class GeminiXReasoningEngine(Engine, GoogleMixin):
228
240
  def _handle_audio_content(self, content: str):
229
241
  """Handle audio content by uploading to Gemini"""
230
242
  try:
231
- pattern = r'<<audio:(.*?):>>'
243
+ pattern = r"<<audio:(.*?):>>"
232
244
  matches = re.findall(pattern, content)
233
245
  if not matches:
234
246
  return None
235
247
 
236
248
  audio_path = matches[0].strip()
237
- if audio_path.startswith('http'):
249
+ if audio_path.startswith("http"):
238
250
  UserMessage("URL audio not yet supported for Gemini")
239
251
  return None
240
252
  # Upload local audio
@@ -246,14 +258,14 @@ class GeminiXReasoningEngine(Engine, GoogleMixin):
246
258
  def _remove_media_patterns(self, text: str) -> str:
247
259
  """Remove media pattern markers from text"""
248
260
  patterns = [
249
- r'<<vision:(.*?):>>',
250
- r'<<video:(.*?):>>',
251
- r'<<audio:(.*?):>>',
252
- r'<<document:(.*?):>>'
261
+ r"<<vision:(.*?):>>",
262
+ r"<<video:(.*?):>>",
263
+ r"<<audio:(.*?):>>",
264
+ r"<<document:(.*?):>>",
253
265
  ]
254
266
 
255
267
  for pattern in patterns:
256
- text = re.sub(pattern, '', text)
268
+ text = re.sub(pattern, "", text)
257
269
 
258
270
  return text
259
271
 
@@ -262,24 +274,24 @@ class GeminiXReasoningEngine(Engine, GoogleMixin):
262
274
  media_content = []
263
275
 
264
276
  # Process document content
265
- if '<<document:' in processed_input_str:
277
+ if "<<document:" in processed_input_str:
266
278
  doc_file = self._handle_document_content(processed_input_str)
267
279
  if doc_file:
268
280
  media_content.append(doc_file)
269
281
 
270
282
  # Process image content
271
- if '<<vision:' in processed_input_str:
283
+ if "<<vision:" in processed_input_str:
272
284
  image_files = self._handle_image_content(processed_input_str)
273
285
  media_content.extend(image_files)
274
286
 
275
287
  # Process video content
276
- if '<<video:' in processed_input_str:
288
+ if "<<video:" in processed_input_str:
277
289
  video_file = self._handle_video_content(processed_input_str)
278
290
  if video_file:
279
291
  media_content.append(video_file)
280
292
 
281
293
  # Process audio content
282
- if '<<audio:' in processed_input_str:
294
+ if "<<audio:" in processed_input_str:
283
295
  audio_file = self._handle_audio_content(processed_input_str)
284
296
  if audio_file:
285
297
  media_content.append(audio_file)
@@ -291,26 +303,23 @@ class GeminiXReasoningEngine(Engine, GoogleMixin):
291
303
  thinking_content = ""
292
304
  text_content = ""
293
305
 
294
- if hasattr(res, 'candidates') and res.candidates:
306
+ if hasattr(res, "candidates") and res.candidates:
295
307
  candidate = res.candidates[0]
296
- if hasattr(candidate, 'content') and candidate.content:
308
+ if hasattr(candidate, "content") and candidate.content:
297
309
  for part in candidate.content.parts:
298
- if hasattr(part, 'text') and part.text:
299
- if hasattr(part, 'thought') and part.thought:
310
+ if hasattr(part, "text") and part.text:
311
+ if hasattr(part, "thought") and part.thought:
300
312
  thinking_content += part.text
301
313
  else:
302
314
  text_content += part.text
303
315
 
304
- return {
305
- "thinking": thinking_content,
306
- "text": text_content
307
- }
316
+ return {"thinking": thinking_content, "text": text_content}
308
317
 
309
318
  def forward(self, argument):
310
319
  kwargs = argument.kwargs
311
320
  _system, prompt = argument.prop.prepared_input
312
321
  payload = self._prepare_request_payload(argument)
313
- except_remedy = kwargs.get('except_remedy')
322
+ except_remedy = kwargs.get("except_remedy")
314
323
 
315
324
  contents = self._build_contents_from_prompt(prompt)
316
325
 
@@ -320,136 +329,166 @@ class GeminiXReasoningEngine(Engine, GoogleMixin):
320
329
  except Exception as e:
321
330
  res = self._handle_generation_error(e, except_remedy, argument)
322
331
 
323
- metadata = {'raw_output': res}
324
- if payload.get('tools'):
332
+ metadata = {"raw_output": res}
333
+ if payload.get("tools"):
325
334
  metadata = self._process_function_calls(res, metadata)
326
335
 
327
- if kwargs.get('raw_output', False):
336
+ if kwargs.get("raw_output", False):
328
337
  return [res], metadata
329
338
 
330
339
  output = self._collect_response(res)
331
340
 
332
- if output['thinking']:
333
- metadata['thinking'] = output['thinking']
341
+ if output["thinking"]:
342
+ metadata["thinking"] = output["thinking"]
334
343
 
335
- processed_text = output['text']
344
+ processed_text = output["text"]
336
345
  if argument.prop.response_format:
337
- processed_text = processed_text.replace('```json', '').replace('```', '')
346
+ processed_text = processed_text.replace("```json", "").replace("```", "")
338
347
 
339
348
  return [processed_text], metadata
340
349
 
341
350
  def _build_contents_from_prompt(self, prompt) -> list[types.Content]:
342
351
  contents: list[types.Content] = []
343
352
  for msg in prompt:
344
- role = msg['role']
345
- parts_list = msg['content']
353
+ role = msg["role"]
354
+ parts_list = msg["content"]
346
355
  contents.append(types.Content(role=role, parts=parts_list))
347
356
  return contents
348
357
 
349
358
  def _build_generation_config(self, payload: dict) -> types.GenerateContentConfig:
350
359
  generation_config = types.GenerateContentConfig(
351
- max_output_tokens=payload.get('max_output_tokens'),
352
- temperature=payload.get('temperature', 1.0),
353
- top_p=payload.get('top_p', 0.95),
354
- top_k=payload.get('top_k', 40),
355
- stop_sequences=payload.get('stop_sequences'),
356
- response_mime_type=payload.get('response_mime_type', 'text/plain'),
360
+ max_output_tokens=payload.get("max_output_tokens"),
361
+ temperature=payload.get("temperature", 1.0),
362
+ top_p=payload.get("top_p", 0.95),
363
+ top_k=payload.get("top_k", 40),
364
+ stop_sequences=payload.get("stop_sequences"),
365
+ response_mime_type=payload.get("response_mime_type", "text/plain"),
357
366
  )
358
367
  self._apply_optional_config_fields(generation_config, payload)
359
368
  return generation_config
360
369
 
361
- def _apply_optional_config_fields(self, generation_config: types.GenerateContentConfig, payload: dict) -> None:
362
- if payload.get('system_instruction'):
363
- generation_config.system_instruction = payload['system_instruction']
364
- if payload.get('thinking_config'):
365
- generation_config.thinking_config = payload['thinking_config']
366
- if payload.get('tools'):
367
- generation_config.tools = payload['tools']
368
- generation_config.automatic_function_calling = payload['automatic_function_calling']
369
-
370
- def _generate_model_response(self, kwargs: dict, contents: list[types.Content], generation_config: types.GenerateContentConfig):
370
+ def _apply_optional_config_fields(
371
+ self, generation_config: types.GenerateContentConfig, payload: dict
372
+ ) -> None:
373
+ if payload.get("system_instruction"):
374
+ generation_config.system_instruction = payload["system_instruction"]
375
+ if payload.get("thinking_config"):
376
+ generation_config.thinking_config = payload["thinking_config"]
377
+ if payload.get("tools"):
378
+ generation_config.tools = payload["tools"]
379
+ generation_config.automatic_function_calling = payload["automatic_function_calling"]
380
+
381
+ def _generate_model_response(
382
+ self,
383
+ kwargs: dict,
384
+ contents: list[types.Content],
385
+ generation_config: types.GenerateContentConfig,
386
+ ):
371
387
  return self.client.models.generate_content(
372
- model=kwargs.get('model', self.model),
373
- contents=contents,
374
- config=generation_config
388
+ model=kwargs.get("model", self.model), contents=contents, config=generation_config
375
389
  )
376
390
 
377
391
  def _handle_generation_error(self, exception: Exception, except_remedy, argument):
378
- if self.api_key is None or self.api_key == '':
379
- msg = 'Google API key is not set. Please set it in the config file or pass it as an argument to the command method.'
392
+ if self.api_key is None or self.api_key == "":
393
+ msg = "Google API key is not set. Please set it in the config file or pass it as an argument to the command method."
380
394
  UserMessage(msg)
381
- if self.config['NEUROSYMBOLIC_ENGINE_API_KEY'] is None or self.config['NEUROSYMBOLIC_ENGINE_API_KEY'] == '':
395
+ if (
396
+ self.config["NEUROSYMBOLIC_ENGINE_API_KEY"] is None
397
+ or self.config["NEUROSYMBOLIC_ENGINE_API_KEY"] == ""
398
+ ):
382
399
  UserMessage(msg, raise_with=ValueError)
383
- self.api_key = self.config['NEUROSYMBOLIC_ENGINE_API_KEY']
400
+ self.api_key = self.config["NEUROSYMBOLIC_ENGINE_API_KEY"]
384
401
  genai.configure(api_key=self.api_key)
385
402
  if except_remedy is not None:
386
403
  return except_remedy(self, exception, self.client.generate_content, argument)
387
- UserMessage(f'Error during generation. Caused by: {exception}', raise_with=ValueError)
404
+ UserMessage(f"Error during generation. Caused by: {exception}", raise_with=ValueError)
388
405
  return None
389
406
 
390
407
  def _process_function_calls(self, res, metadata):
391
408
  hit = False
392
- if hasattr(res, 'candidates') and res.candidates:
409
+ if hasattr(res, "candidates") and res.candidates:
393
410
  candidate = res.candidates[0]
394
- if hasattr(candidate, 'content') and candidate.content:
411
+ if hasattr(candidate, "content") and candidate.content:
395
412
  for part in candidate.content.parts:
396
- if hasattr(part, 'function_call') and part.function_call:
413
+ if hasattr(part, "function_call") and part.function_call:
397
414
  if hit:
398
- UserMessage("Multiple function calls detected in the response but only the first one will be processed.")
415
+ UserMessage(
416
+ "Multiple function calls detected in the response but only the first one will be processed."
417
+ )
399
418
  break
400
419
  func_call = part.function_call
401
- metadata['function_call'] = {
402
- 'name': func_call.name,
403
- 'arguments': func_call.args
420
+ metadata["function_call"] = {
421
+ "name": func_call.name,
422
+ "arguments": func_call.args,
404
423
  }
405
424
  hit = True
406
425
  return metadata
407
426
 
408
427
  def _prepare_raw_input(self, argument):
409
428
  if not argument.prop.processed_input:
410
- UserMessage('Need to provide a prompt instruction to the engine if `raw_input` is enabled!', raise_with=ValueError)
429
+ UserMessage(
430
+ "Need to provide a prompt instruction to the engine if `raw_input` is enabled!",
431
+ raise_with=ValueError,
432
+ )
411
433
 
412
434
  raw_prompt_data = argument.prop.processed_input
413
435
  normalized_prompts = self._normalize_raw_prompt_data(raw_prompt_data)
414
- system_instruction, non_system_messages = self._separate_system_instruction(normalized_prompts)
436
+ system_instruction, non_system_messages = self._separate_system_instruction(
437
+ normalized_prompts
438
+ )
415
439
  messages_for_api = self._build_raw_input_messages(non_system_messages)
416
440
  return system_instruction, messages_for_api
417
441
 
418
442
  def _normalize_raw_prompt_data(self, raw_prompt_data):
419
443
  if isinstance(raw_prompt_data, str):
420
- return [{'role': 'user', 'content': raw_prompt_data}]
444
+ return [{"role": "user", "content": raw_prompt_data}]
421
445
  if isinstance(raw_prompt_data, dict):
422
446
  return [raw_prompt_data]
423
447
  if isinstance(raw_prompt_data, list):
424
448
  for item in raw_prompt_data:
425
449
  if not isinstance(item, dict):
426
- UserMessage(f"Invalid item in raw_input list: {item}. Expected dict.", raise_with=ValueError)
450
+ UserMessage(
451
+ f"Invalid item in raw_input list: {item}. Expected dict.",
452
+ raise_with=ValueError,
453
+ )
427
454
  return raw_prompt_data
428
- UserMessage(f"Unsupported type for raw_input: {type(raw_prompt_data)}. Expected str, dict, or list of dicts.", raise_with=ValueError)
455
+ UserMessage(
456
+ f"Unsupported type for raw_input: {type(raw_prompt_data)}. Expected str, dict, or list of dicts.",
457
+ raise_with=ValueError,
458
+ )
429
459
  return []
430
460
 
431
461
  def _separate_system_instruction(self, normalized_prompts):
432
462
  system_instruction = None
433
463
  non_system_messages = []
434
464
  for msg in normalized_prompts:
435
- role = msg.get('role')
436
- content = msg.get('content')
465
+ role = msg.get("role")
466
+ content = msg.get("content")
437
467
  if role is None or content is None:
438
- UserMessage(f"Message in raw_input is missing 'role' or 'content': {msg}", raise_with=ValueError)
468
+ UserMessage(
469
+ f"Message in raw_input is missing 'role' or 'content': {msg}",
470
+ raise_with=ValueError,
471
+ )
439
472
  if not isinstance(content, str):
440
- UserMessage(f"Message content for role '{role}' in raw_input must be a string. Found type: {type(content)} for content: {content}", raise_with=ValueError)
441
- if role == 'system':
473
+ UserMessage(
474
+ f"Message content for role '{role}' in raw_input must be a string. Found type: {type(content)} for content: {content}",
475
+ raise_with=ValueError,
476
+ )
477
+ if role == "system":
442
478
  if system_instruction is not None:
443
- UserMessage('Only one system instruction is allowed in raw_input mode!', raise_with=ValueError)
479
+ UserMessage(
480
+ "Only one system instruction is allowed in raw_input mode!",
481
+ raise_with=ValueError,
482
+ )
444
483
  system_instruction = content
445
484
  else:
446
- non_system_messages.append({'role': role, 'content': content})
485
+ non_system_messages.append({"role": role, "content": content})
447
486
  return system_instruction, non_system_messages
448
487
 
449
488
  def _build_raw_input_messages(self, messages):
450
489
  messages_for_api = []
451
490
  for msg in messages:
452
- content_str = str(msg.get('content', ''))
491
+ content_str = str(msg.get("content", ""))
453
492
  current_message_api_parts: list[types.Part] = []
454
493
  image_api_parts = self._handle_image_content(content_str)
455
494
  if image_api_parts:
@@ -458,14 +497,11 @@ class GeminiXReasoningEngine(Engine, GoogleMixin):
458
497
  if text_only_content:
459
498
  current_message_api_parts.append(types.Part(text=text_only_content))
460
499
  if current_message_api_parts:
461
- messages_for_api.append({
462
- 'role': msg['role'],
463
- 'content': current_message_api_parts
464
- })
500
+ messages_for_api.append({"role": msg["role"], "content": current_message_api_parts})
465
501
  return messages_for_api
466
502
 
467
503
  def prepare(self, argument):
468
- #@NOTE: OpenAI compatibility at high level
504
+ # @NOTE: OpenAI compatibility at high level
469
505
  if argument.prop.raw_input:
470
506
  argument.prop.prepared_input = self._prepare_raw_input(argument)
471
507
  return
@@ -474,7 +510,9 @@ class GeminiXReasoningEngine(Engine, GoogleMixin):
474
510
  media_content = self._process_multimodal_content(processed_input_str)
475
511
  system_content = self._compose_system_content(argument)
476
512
  user_content = self._compose_user_content(argument)
477
- system_content, user_content = self._apply_self_prompt_if_needed(argument, system_content, user_content)
513
+ system_content, user_content = self._apply_self_prompt_if_needed(
514
+ argument, system_content, user_content
515
+ )
478
516
 
479
517
  user_prompt = self._build_user_prompt(media_content, user_content)
480
518
  argument.prop.prepared_input = (system_content, [user_prompt])
@@ -484,12 +522,14 @@ class GeminiXReasoningEngine(Engine, GoogleMixin):
484
522
  _non_verbose_output = """<META_INSTRUCTION/>\nYou do not output anything else, like verbose preambles or post explanation, such as "Sure, let me...", "Hope that was helpful...", "Yes, I can help you with that...", etc. Consider well formatted output, e.g. for sentences use punctuation, spaces etc. or for code use indentation, etc. Never add meta instructions information to your output!\n\n"""
485
523
  if argument.prop.suppress_verbose_output:
486
524
  system_content += _non_verbose_output
487
- system_content = f'{system_content}\n' if system_content and len(system_content) > 0 else ''
525
+ system_content = f"{system_content}\n" if system_content and len(system_content) > 0 else ""
488
526
  if argument.prop.response_format:
489
527
  response_format = argument.prop.response_format
490
- assert response_format.get('type') is not None, 'Response format type is required!'
528
+ assert response_format.get("type") is not None, "Response format type is required!"
491
529
  if response_format["type"] == "json_object":
492
- system_content += '<RESPONSE_FORMAT/>\nYou are a helpful assistant designed to output JSON.\n\n'
530
+ system_content += (
531
+ "<RESPONSE_FORMAT/>\nYou are a helpful assistant designed to output JSON.\n\n"
532
+ )
493
533
  ref = argument.prop.instance
494
534
  static_ctxt, dyn_ctxt = ref.global_context
495
535
  if len(static_ctxt) > 0:
@@ -507,7 +547,7 @@ class GeminiXReasoningEngine(Engine, GoogleMixin):
507
547
  val = self._remove_media_patterns(val)
508
548
  system_content += f"<INSTRUCTION/>\n{val}\n\n"
509
549
  if argument.prop.template_suffix:
510
- system_content += f' You will only generate content for the placeholder `{argument.prop.template_suffix!s}` following the instructions and the provided context information.\n\n'
550
+ system_content += f" You will only generate content for the placeholder `{argument.prop.template_suffix!s}` following the instructions and the provided context information.\n\n"
511
551
  return system_content
512
552
 
513
553
  def _compose_user_content(self, argument) -> str:
@@ -516,17 +556,17 @@ class GeminiXReasoningEngine(Engine, GoogleMixin):
516
556
  return f"{suffix}"
517
557
 
518
558
  def _apply_self_prompt_if_needed(self, argument, system_content: str, user_content: str):
519
- if argument.prop.instance._kwargs.get('self_prompt', False) or argument.prop.self_prompt:
559
+ if argument.prop.instance._kwargs.get("self_prompt", False) or argument.prop.self_prompt:
520
560
  self_prompter = SelfPrompt()
521
561
  res = self_prompter(
522
- {'user': user_content, 'system': system_content},
523
- max_tokens=argument.kwargs.get('max_tokens', self.max_response_tokens),
524
- thinking=argument.kwargs.get('thinking', None),
562
+ {"user": user_content, "system": system_content},
563
+ max_tokens=argument.kwargs.get("max_tokens", self.max_response_tokens),
564
+ thinking=argument.kwargs.get("thinking", None),
525
565
  )
526
566
  if res is None:
527
567
  UserMessage("Self-prompting failed!", raise_with=ValueError)
528
- user_content = res['user']
529
- system_content = res['system']
568
+ user_content = res["user"]
569
+ system_content = res["system"]
530
570
  return system_content, user_content
531
571
 
532
572
  def _build_user_prompt(self, media_content, user_content: str) -> dict:
@@ -535,38 +575,40 @@ class GeminiXReasoningEngine(Engine, GoogleMixin):
535
575
  all_user_content.append(genai.types.Part(text=user_content.strip()))
536
576
  if not all_user_content:
537
577
  all_user_content = [genai.types.Part(text="N/A")]
538
- return {'role': 'user', 'content': all_user_content}
578
+ return {"role": "user", "content": all_user_content}
539
579
 
540
580
  def _prepare_request_payload(self, argument):
541
581
  kwargs = argument.kwargs
542
582
 
543
583
  payload = {
544
- "max_output_tokens": kwargs.get('max_tokens', self.max_response_tokens),
545
- "temperature": kwargs.get('temperature', 1.0),
546
- "top_p": kwargs.get('top_p', 0.95),
547
- "top_k": kwargs.get('top_k', 40),
548
- "stop_sequences": kwargs.get('stop', None),
549
- "stream": kwargs.get('stream', False),
584
+ "max_output_tokens": kwargs.get("max_tokens", self.max_response_tokens),
585
+ "temperature": kwargs.get("temperature", 1.0),
586
+ "top_p": kwargs.get("top_p", 0.95),
587
+ "top_k": kwargs.get("top_k", 40),
588
+ "stop_sequences": kwargs.get("stop", None),
589
+ "stream": kwargs.get("stream", False),
550
590
  }
551
591
 
552
592
  system, _ = argument.prop.prepared_input
553
593
  if system and system.strip():
554
- payload['system_instruction'] = system.strip()
594
+ payload["system_instruction"] = system.strip()
555
595
 
556
- thinking_arg = kwargs.get('thinking', None)
596
+ thinking_arg = kwargs.get("thinking", None)
557
597
  if thinking_arg and isinstance(thinking_arg, dict):
558
598
  thinking_budget = thinking_arg.get("thinking_budget", 1024)
559
- payload['thinking_config'] = types.ThinkingConfig(include_thoughts=True, thinking_budget=thinking_budget)
599
+ payload["thinking_config"] = types.ThinkingConfig(
600
+ include_thoughts=True, thinking_budget=thinking_budget
601
+ )
560
602
 
561
- response_format = kwargs.get('response_format', None)
562
- if response_format and response_format.get('type') == 'json_object':
563
- payload['response_mime_type'] = 'application/json'
603
+ response_format = kwargs.get("response_format", None)
604
+ if response_format and response_format.get("type") == "json_object":
605
+ payload["response_mime_type"] = "application/json"
564
606
 
565
- tools = kwargs.get('tools')
607
+ tools = kwargs.get("tools")
566
608
  if tools:
567
- payload['tools'] = self._convert_tools_format(tools)
568
- payload['automatic_function_calling'] = types.AutomaticFunctionCallingConfig(
569
- disable=kwargs.get('automatic_function_calling', True)
609
+ payload["tools"] = self._convert_tools_format(tools)
610
+ payload["automatic_function_calling"] = types.AutomaticFunctionCallingConfig(
611
+ disable=kwargs.get("automatic_function_calling", True)
570
612
  )
571
613
 
572
614
  return payload
@@ -585,7 +627,9 @@ class GeminiXReasoningEngine(Engine, GoogleMixin):
585
627
  elif isinstance(tool_item, types.FunctionDeclaration):
586
628
  processed_tools.append(types.Tool(function_declarations=[tool_item]))
587
629
  else:
588
- UserMessage(f"Ignoring invalid tool format. Expected a callable, google.genai.types.Tool, or google.genai.types.FunctionDeclaration: {tool_item}")
630
+ UserMessage(
631
+ f"Ignoring invalid tool format. Expected a callable, google.genai.types.Tool, or google.genai.types.FunctionDeclaration: {tool_item}"
632
+ )
589
633
 
590
634
  if not processed_tools:
591
635
  return None