symbolicai 1.0.0__py3-none-any.whl → 1.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (129) hide show
  1. symai/__init__.py +198 -134
  2. symai/backend/base.py +51 -51
  3. symai/backend/engines/drawing/engine_bfl.py +33 -33
  4. symai/backend/engines/drawing/engine_gpt_image.py +4 -10
  5. symai/backend/engines/embedding/engine_llama_cpp.py +50 -35
  6. symai/backend/engines/embedding/engine_openai.py +22 -16
  7. symai/backend/engines/execute/engine_python.py +16 -16
  8. symai/backend/engines/files/engine_io.py +51 -49
  9. symai/backend/engines/imagecaptioning/engine_blip2.py +27 -23
  10. symai/backend/engines/imagecaptioning/engine_llavacpp_client.py +53 -46
  11. symai/backend/engines/index/engine_pinecone.py +116 -88
  12. symai/backend/engines/index/engine_qdrant.py +1011 -0
  13. symai/backend/engines/index/engine_vectordb.py +78 -52
  14. symai/backend/engines/lean/engine_lean4.py +65 -25
  15. symai/backend/engines/neurosymbolic/__init__.py +35 -28
  16. symai/backend/engines/neurosymbolic/engine_anthropic_claudeX_chat.py +137 -135
  17. symai/backend/engines/neurosymbolic/engine_anthropic_claudeX_reasoning.py +145 -152
  18. symai/backend/engines/neurosymbolic/engine_cerebras.py +328 -0
  19. symai/backend/engines/neurosymbolic/engine_deepseekX_reasoning.py +75 -49
  20. symai/backend/engines/neurosymbolic/engine_google_geminiX_reasoning.py +199 -155
  21. symai/backend/engines/neurosymbolic/engine_groq.py +106 -72
  22. symai/backend/engines/neurosymbolic/engine_huggingface.py +100 -67
  23. symai/backend/engines/neurosymbolic/engine_llama_cpp.py +121 -93
  24. symai/backend/engines/neurosymbolic/engine_openai_gptX_chat.py +213 -132
  25. symai/backend/engines/neurosymbolic/engine_openai_gptX_reasoning.py +180 -137
  26. symai/backend/engines/ocr/engine_apilayer.py +18 -20
  27. symai/backend/engines/output/engine_stdout.py +9 -9
  28. symai/backend/engines/{webscraping → scrape}/engine_requests.py +25 -11
  29. symai/backend/engines/search/engine_openai.py +95 -83
  30. symai/backend/engines/search/engine_parallel.py +665 -0
  31. symai/backend/engines/search/engine_perplexity.py +40 -41
  32. symai/backend/engines/search/engine_serpapi.py +33 -28
  33. symai/backend/engines/speech_to_text/engine_local_whisper.py +37 -27
  34. symai/backend/engines/symbolic/engine_wolframalpha.py +14 -8
  35. symai/backend/engines/text_to_speech/engine_openai.py +15 -19
  36. symai/backend/engines/text_vision/engine_clip.py +34 -28
  37. symai/backend/engines/userinput/engine_console.py +3 -4
  38. symai/backend/mixin/__init__.py +4 -0
  39. symai/backend/mixin/anthropic.py +48 -40
  40. symai/backend/mixin/cerebras.py +9 -0
  41. symai/backend/mixin/deepseek.py +4 -5
  42. symai/backend/mixin/google.py +5 -4
  43. symai/backend/mixin/groq.py +2 -4
  44. symai/backend/mixin/openai.py +132 -110
  45. symai/backend/settings.py +14 -14
  46. symai/chat.py +164 -94
  47. symai/collect/dynamic.py +13 -11
  48. symai/collect/pipeline.py +39 -31
  49. symai/collect/stats.py +109 -69
  50. symai/components.py +578 -238
  51. symai/constraints.py +14 -5
  52. symai/core.py +1495 -1210
  53. symai/core_ext.py +55 -50
  54. symai/endpoints/api.py +113 -58
  55. symai/extended/api_builder.py +22 -17
  56. symai/extended/arxiv_pdf_parser.py +13 -5
  57. symai/extended/bibtex_parser.py +8 -4
  58. symai/extended/conversation.py +88 -69
  59. symai/extended/document.py +40 -27
  60. symai/extended/file_merger.py +45 -7
  61. symai/extended/graph.py +38 -24
  62. symai/extended/html_style_template.py +17 -11
  63. symai/extended/interfaces/blip_2.py +1 -1
  64. symai/extended/interfaces/clip.py +4 -2
  65. symai/extended/interfaces/console.py +5 -3
  66. symai/extended/interfaces/dall_e.py +3 -1
  67. symai/extended/interfaces/file.py +2 -0
  68. symai/extended/interfaces/flux.py +3 -1
  69. symai/extended/interfaces/gpt_image.py +15 -6
  70. symai/extended/interfaces/input.py +2 -1
  71. symai/extended/interfaces/llava.py +1 -1
  72. symai/extended/interfaces/{naive_webscraping.py → naive_scrape.py} +3 -2
  73. symai/extended/interfaces/naive_vectordb.py +2 -2
  74. symai/extended/interfaces/ocr.py +4 -2
  75. symai/extended/interfaces/openai_search.py +2 -0
  76. symai/extended/interfaces/parallel.py +30 -0
  77. symai/extended/interfaces/perplexity.py +2 -0
  78. symai/extended/interfaces/pinecone.py +6 -4
  79. symai/extended/interfaces/python.py +2 -0
  80. symai/extended/interfaces/serpapi.py +2 -0
  81. symai/extended/interfaces/terminal.py +0 -1
  82. symai/extended/interfaces/tts.py +2 -1
  83. symai/extended/interfaces/whisper.py +2 -1
  84. symai/extended/interfaces/wolframalpha.py +1 -0
  85. symai/extended/metrics/__init__.py +1 -1
  86. symai/extended/metrics/similarity.py +5 -2
  87. symai/extended/os_command.py +31 -22
  88. symai/extended/packages/symdev.py +39 -34
  89. symai/extended/packages/sympkg.py +30 -27
  90. symai/extended/packages/symrun.py +46 -35
  91. symai/extended/repo_cloner.py +10 -9
  92. symai/extended/seo_query_optimizer.py +15 -12
  93. symai/extended/solver.py +104 -76
  94. symai/extended/summarizer.py +8 -7
  95. symai/extended/taypan_interpreter.py +10 -9
  96. symai/extended/vectordb.py +28 -15
  97. symai/formatter/formatter.py +39 -31
  98. symai/formatter/regex.py +46 -44
  99. symai/functional.py +184 -86
  100. symai/imports.py +85 -51
  101. symai/interfaces.py +1 -1
  102. symai/memory.py +33 -24
  103. symai/menu/screen.py +28 -19
  104. symai/misc/console.py +27 -27
  105. symai/misc/loader.py +4 -3
  106. symai/models/base.py +147 -76
  107. symai/models/errors.py +1 -1
  108. symai/ops/__init__.py +1 -1
  109. symai/ops/measures.py +17 -14
  110. symai/ops/primitives.py +933 -635
  111. symai/post_processors.py +28 -24
  112. symai/pre_processors.py +58 -52
  113. symai/processor.py +15 -9
  114. symai/prompts.py +714 -649
  115. symai/server/huggingface_server.py +115 -32
  116. symai/server/llama_cpp_server.py +14 -6
  117. symai/server/qdrant_server.py +206 -0
  118. symai/shell.py +98 -39
  119. symai/shellsv.py +307 -223
  120. symai/strategy.py +135 -81
  121. symai/symbol.py +276 -225
  122. symai/utils.py +62 -46
  123. {symbolicai-1.0.0.dist-info → symbolicai-1.1.1.dist-info}/METADATA +19 -9
  124. symbolicai-1.1.1.dist-info/RECORD +169 -0
  125. symbolicai-1.0.0.dist-info/RECORD +0 -163
  126. {symbolicai-1.0.0.dist-info → symbolicai-1.1.1.dist-info}/WHEEL +0 -0
  127. {symbolicai-1.0.0.dist-info → symbolicai-1.1.1.dist-info}/entry_points.txt +0 -0
  128. {symbolicai-1.0.0.dist-info → symbolicai-1.1.1.dist-info}/licenses/LICENSE +0 -0
  129. {symbolicai-1.0.0.dist-info → symbolicai-1.1.1.dist-info}/top_level.txt +0 -0
@@ -38,18 +38,19 @@ class TokenizerWrapper:
38
38
  def encode(self, text: str) -> int:
39
39
  return self.compute_tokens_func([{"role": "user", "content": text}])
40
40
 
41
+
41
42
  class ClaudeXReasoningEngine(Engine, AnthropicMixin):
42
43
  def __init__(self, api_key: str | None = None, model: str | None = None):
43
44
  super().__init__()
44
45
  self.config = deepcopy(SYMAI_CONFIG)
45
46
  # In case we use EngineRepository.register to inject the api_key and model => dynamically change the engine at runtime
46
47
  if api_key is not None and model is not None:
47
- self.config['NEUROSYMBOLIC_ENGINE_API_KEY'] = api_key
48
- self.config['NEUROSYMBOLIC_ENGINE_MODEL'] = model
49
- if self.id() != 'neurosymbolic':
50
- return # do not initialize if not neurosymbolic; avoids conflict with llama.cpp check in EngineRepository.register_from_package
51
- anthropic.api_key = self.config['NEUROSYMBOLIC_ENGINE_API_KEY']
52
- self.model = self.config['NEUROSYMBOLIC_ENGINE_MODEL']
48
+ self.config["NEUROSYMBOLIC_ENGINE_API_KEY"] = api_key
49
+ self.config["NEUROSYMBOLIC_ENGINE_MODEL"] = model
50
+ if self.id() != "neurosymbolic":
51
+ return # do not initialize if not neurosymbolic; avoids conflict with llama.cpp check in EngineRepository.register_from_package
52
+ anthropic.api_key = self.config["NEUROSYMBOLIC_ENGINE_API_KEY"]
53
+ self.model = self.config["NEUROSYMBOLIC_ENGINE_MODEL"]
53
54
  self.name = self.__class__.__name__
54
55
  self.tokenizer = TokenizerWrapper(self.compute_required_tokens)
55
56
  self.max_context_tokens = self.api_max_context_tokens()
@@ -57,21 +58,25 @@ class ClaudeXReasoningEngine(Engine, AnthropicMixin):
57
58
  self.client = anthropic.Anthropic(api_key=anthropic.api_key)
58
59
 
59
60
  def id(self) -> str:
60
- if self.config.get('NEUROSYMBOLIC_ENGINE_MODEL') and \
61
- self.config.get('NEUROSYMBOLIC_ENGINE_MODEL').startswith('claude') and \
62
- ('3-7' in self.config.get('NEUROSYMBOLIC_ENGINE_MODEL') or \
63
- '4-0' in self.config.get('NEUROSYMBOLIC_ENGINE_MODEL') or \
64
- '4-1' in self.config.get('NEUROSYMBOLIC_ENGINE_MODEL') or \
65
- '4-5' in self.config.get('NEUROSYMBOLIC_ENGINE_MODEL')):
66
- return 'neurosymbolic'
67
- return super().id() # default to unregistered
61
+ if (
62
+ self.config.get("NEUROSYMBOLIC_ENGINE_MODEL")
63
+ and self.config.get("NEUROSYMBOLIC_ENGINE_MODEL").startswith("claude")
64
+ and (
65
+ "3-7" in self.config.get("NEUROSYMBOLIC_ENGINE_MODEL")
66
+ or "4-0" in self.config.get("NEUROSYMBOLIC_ENGINE_MODEL")
67
+ or "4-1" in self.config.get("NEUROSYMBOLIC_ENGINE_MODEL")
68
+ or "4-5" in self.config.get("NEUROSYMBOLIC_ENGINE_MODEL")
69
+ )
70
+ ):
71
+ return "neurosymbolic"
72
+ return super().id() # default to unregistered
68
73
 
69
74
  def command(self, *args, **kwargs):
70
75
  super().command(*args, **kwargs)
71
- if 'NEUROSYMBOLIC_ENGINE_API_KEY' in kwargs:
72
- anthropic.api_key = kwargs['NEUROSYMBOLIC_ENGINE_API_KEY']
73
- if 'NEUROSYMBOLIC_ENGINE_MODEL' in kwargs:
74
- self.model = kwargs['NEUROSYMBOLIC_ENGINE_MODEL']
76
+ if "NEUROSYMBOLIC_ENGINE_API_KEY" in kwargs:
77
+ anthropic.api_key = kwargs["NEUROSYMBOLIC_ENGINE_API_KEY"]
78
+ if "NEUROSYMBOLIC_ENGINE_MODEL" in kwargs:
79
+ self.model = kwargs["NEUROSYMBOLIC_ENGINE_MODEL"]
75
80
 
76
81
  def compute_required_tokens(self, messages) -> int:
77
82
  claude_messages, system_content = self._normalize_messages_for_claude(messages)
@@ -80,12 +85,9 @@ class ClaudeXReasoningEngine(Engine, AnthropicMixin):
80
85
  return 0
81
86
 
82
87
  try:
83
- count_params = {
84
- 'model': self.model,
85
- 'messages': claude_messages
86
- }
88
+ count_params = {"model": self.model, "messages": claude_messages}
87
89
  if system_content:
88
- count_params['system'] = system_content
90
+ count_params["system"] = system_content
89
91
  count_response = self.client.messages.count_tokens(**count_params)
90
92
  return count_response.input_tokens
91
93
  except Exception as e:
@@ -100,11 +102,11 @@ class ClaudeXReasoningEngine(Engine, AnthropicMixin):
100
102
  msg_parts = msg if isinstance(msg, list) else [msg]
101
103
  for part in msg_parts:
102
104
  role, content_str = self._extract_role_and_content(part)
103
- if role == 'system':
105
+ if role == "system":
104
106
  system_content = content_str
105
107
  continue
106
108
 
107
- if role in ['user', 'assistant']:
109
+ if role in ["user", "assistant"]:
108
110
  message_payload = self._build_message_payload(role, content_str)
109
111
  if message_payload:
110
112
  claude_messages.append(message_payload)
@@ -113,11 +115,11 @@ class ClaudeXReasoningEngine(Engine, AnthropicMixin):
113
115
 
114
116
  def _extract_role_and_content(self, part):
115
117
  if isinstance(part, str):
116
- return 'user', part
118
+ return "user", part
117
119
  if isinstance(part, dict):
118
- return part.get('role'), str(part.get('content', ''))
120
+ return part.get("role"), str(part.get("content", ""))
119
121
  UserMessage(f"Unsupported message part type: {type(part)}", raise_with=ValueError)
120
- return None, ''
122
+ return None, ""
121
123
 
122
124
  def _build_message_payload(self, role, content_str):
123
125
  message_content = []
@@ -127,36 +129,28 @@ class ClaudeXReasoningEngine(Engine, AnthropicMixin):
127
129
 
128
130
  text_content = self._remove_vision_pattern(content_str)
129
131
  if text_content:
130
- message_content.append({
131
- "type": "text",
132
- "text": text_content
133
- })
132
+ message_content.append({"type": "text", "text": text_content})
134
133
 
135
134
  if not message_content:
136
135
  return None
137
136
 
138
- if len(message_content) == 1 and message_content[0].get('type') == 'text':
139
- return {
140
- 'role': role,
141
- 'content': message_content[0]['text']
142
- }
137
+ if len(message_content) == 1 and message_content[0].get("type") == "text":
138
+ return {"role": role, "content": message_content[0]["text"]}
143
139
 
144
- return {
145
- 'role': role,
146
- 'content': message_content
147
- }
140
+ return {"role": role, "content": message_content}
148
141
 
149
142
  def compute_remaining_tokens(self, _prompts: list) -> int:
150
- UserMessage('Method not implemented.', raise_with=NotImplementedError)
143
+ UserMessage("Method not implemented.", raise_with=NotImplementedError)
151
144
 
152
145
  def _handle_image_content(self, content: str) -> list:
153
146
  """Handle image content by processing vision patterns and returning image file data."""
147
+
154
148
  def extract_pattern(text):
155
- pattern = r'<<vision:(.*?):>>'
149
+ pattern = r"<<vision:(.*?):>>"
156
150
  return re.findall(pattern, text)
157
151
 
158
152
  image_files = []
159
- if '<<vision:' in content:
153
+ if "<<vision:" in content:
160
154
  parts = extract_pattern(content)
161
155
  for p in parts:
162
156
  img_ = p.strip()
@@ -164,92 +158,97 @@ class ClaudeXReasoningEngine(Engine, AnthropicMixin):
164
158
  max_used_frames = 10
165
159
  buffer, ext = encode_media_frames(img_)
166
160
  if len(buffer) > 1:
167
- step = len(buffer) // max_frames_spacing # max frames spacing
161
+ step = len(buffer) // max_frames_spacing # max frames spacing
168
162
  frames = []
169
163
  indices = list(range(0, len(buffer), step))[:max_used_frames]
170
164
  for i in indices:
171
- frames.append({'data': buffer[i], 'media_type': f'image/{ext}', 'type': 'base64'})
165
+ frames.append(
166
+ {"data": buffer[i], "media_type": f"image/{ext}", "type": "base64"}
167
+ )
172
168
  image_files.extend(frames)
173
169
  elif len(buffer) == 1:
174
- image_files.append({'data': buffer[0], 'media_type': f'image/{ext}', 'type': 'base64'})
170
+ image_files.append(
171
+ {"data": buffer[0], "media_type": f"image/{ext}", "type": "base64"}
172
+ )
175
173
  else:
176
- UserMessage('No frames found for image!')
174
+ UserMessage("No frames found for image!")
177
175
  return image_files
178
176
 
179
177
  def _remove_vision_pattern(self, text: str) -> str:
180
178
  """Remove vision patterns from text."""
181
- pattern = r'<<vision:(.*?):>>'
182
- return re.sub(pattern, '', text)
179
+ pattern = r"<<vision:(.*?):>>"
180
+ return re.sub(pattern, "", text)
183
181
 
184
182
  def forward(self, argument):
185
183
  kwargs = argument.kwargs
186
184
  system, messages = argument.prop.prepared_input
187
185
  payload = self._prepare_request_payload(argument)
188
- except_remedy = kwargs.get('except_remedy')
186
+ except_remedy = kwargs.get("except_remedy")
189
187
 
190
188
  try:
191
- res = self.client.messages.create(
192
- system=system,
193
- messages=messages,
194
- **payload
195
- )
189
+ res = self.client.messages.create(system=system, messages=messages, **payload)
196
190
  except Exception as e:
197
- if anthropic.api_key is None or anthropic.api_key == '':
198
- msg = 'Anthropic API key is not set. Please set it in the config file or pass it as an argument to the command method.'
191
+ if anthropic.api_key is None or anthropic.api_key == "":
192
+ msg = "Anthropic API key is not set. Please set it in the config file or pass it as an argument to the command method."
199
193
  UserMessage(msg)
200
- if self.config['NEUROSYMBOLIC_ENGINE_API_KEY'] is None or self.config['NEUROSYMBOLIC_ENGINE_API_KEY'] == '':
194
+ if (
195
+ self.config["NEUROSYMBOLIC_ENGINE_API_KEY"] is None
196
+ or self.config["NEUROSYMBOLIC_ENGINE_API_KEY"] == ""
197
+ ):
201
198
  UserMessage(msg, raise_with=ValueError)
202
- anthropic.api_key = self.config['NEUROSYMBOLIC_ENGINE_API_KEY']
199
+ anthropic.api_key = self.config["NEUROSYMBOLIC_ENGINE_API_KEY"]
203
200
 
204
201
  callback = self.client.messages.create
205
- kwargs['model'] = kwargs.get('model', self.model)
202
+ kwargs["model"] = kwargs.get("model", self.model)
206
203
 
207
204
  if except_remedy is not None:
208
205
  res = except_remedy(self, e, callback, argument)
209
206
  else:
210
- UserMessage(f'Error during generation. Caused by: {e}', raise_with=ValueError)
207
+ UserMessage(f"Error during generation. Caused by: {e}", raise_with=ValueError)
211
208
 
212
- if payload['stream']:
213
- res = list(res) # Unpack the iterator to a list
214
- metadata = {'raw_output': res}
209
+ if payload["stream"]:
210
+ res = list(res) # Unpack the iterator to a list
211
+ metadata = {"raw_output": res}
215
212
  response_data = self._collect_response(res)
216
213
 
217
- if response_data.get('function_call'):
218
- metadata['function_call'] = response_data['function_call']
214
+ if response_data.get("function_call"):
215
+ metadata["function_call"] = response_data["function_call"]
219
216
 
220
- if response_data.get('thinking') and len(response_data['thinking']) > 0:
221
- metadata['thinking'] = response_data['thinking']
217
+ if response_data.get("thinking") and len(response_data["thinking"]) > 0:
218
+ metadata["thinking"] = response_data["thinking"]
222
219
 
223
- text_output = response_data.get('text', '')
220
+ text_output = response_data.get("text", "")
224
221
  if argument.prop.response_format:
225
222
  # Anthropic returns JSON in markdown format
226
- text_output = text_output.replace('```json', '').replace('```', '')
223
+ text_output = text_output.replace("```json", "").replace("```", "")
227
224
 
228
225
  return [text_output], metadata
229
226
 
230
227
  def _prepare_raw_input(self, argument):
231
228
  if not argument.prop.processed_input:
232
- msg = 'Need to provide a prompt instruction to the engine if `raw_input` is enabled!'
229
+ msg = "Need to provide a prompt instruction to the engine if `raw_input` is enabled!"
233
230
  UserMessage(msg)
234
231
  raise ValueError(msg)
235
232
  system = NOT_GIVEN
236
233
  prompt = copy(argument.prop.processed_input)
237
234
  if not isinstance(prompt, list):
238
235
  if not isinstance(prompt, dict):
239
- prompt = {'role': 'user', 'content': str(prompt)}
236
+ prompt = {"role": "user", "content": str(prompt)}
240
237
  prompt = [prompt]
241
238
  if len(prompt) > 1:
242
239
  # assert there are not more than 1 system instruction
243
- assert len([p for p in prompt if p['role'] == 'system']) <= 1, 'Only one system instruction is allowed!'
240
+ assert len([p for p in prompt if p["role"] == "system"]) <= 1, (
241
+ "Only one system instruction is allowed!"
242
+ )
244
243
  for p in prompt:
245
- if p['role'] == 'system':
246
- system = p['content']
244
+ if p["role"] == "system":
245
+ system = p["content"]
247
246
  prompt.remove(p)
248
247
  break
249
248
  return system, prompt
250
249
 
251
250
  def prepare(self, argument):
252
- #@NOTE: OpenAI compatibility at high level
251
+ # @NOTE: OpenAI compatibility at high level
253
252
  if argument.prop.raw_input:
254
253
  argument.prop.prepared_input = self._prepare_raw_input(argument)
255
254
  return
@@ -264,10 +263,7 @@ class ClaudeXReasoningEngine(Engine, AnthropicMixin):
264
263
  user_text = "N/A"
265
264
 
266
265
  system, user_prompt = self._apply_self_prompt_if_needed(
267
- argument,
268
- system,
269
- user_text,
270
- image_files
266
+ argument, system, user_text, image_files
271
267
  )
272
268
 
273
269
  argument.prop.prepared_input = (system, [user_prompt])
@@ -280,8 +276,11 @@ class ClaudeXReasoningEngine(Engine, AnthropicMixin):
280
276
 
281
277
  if argument.prop.response_format:
282
278
  response_format = argument.prop.response_format
283
- if not (response_format.get('type') is not None):
284
- UserMessage('Response format type is required! Expected format `{"type": "json_object"}` or other supported types. Refer to Anthropic documentation for details.', raise_with=AssertionError)
279
+ if not (response_format.get("type") is not None):
280
+ UserMessage(
281
+ 'Response format type is required! Expected format `{"type": "json_object"}` or other supported types. Refer to Anthropic documentation for details.',
282
+ raise_with=AssertionError,
283
+ )
285
284
  system += non_verbose_output
286
285
  system += f"<RESPONSE_FORMAT/>\n{response_format['type']}\n\n"
287
286
 
@@ -318,8 +317,8 @@ class ClaudeXReasoningEngine(Engine, AnthropicMixin):
318
317
  def _append_template_suffix(self, system, template_suffix):
319
318
  if template_suffix:
320
319
  return system + (
321
- f' You will only generate content for the placeholder `{template_suffix!s}` '
322
- 'following the instructions and the provided context information.\n\n'
320
+ f" You will only generate content for the placeholder `{template_suffix!s}` "
321
+ "following the instructions and the provided context information.\n\n"
323
322
  )
324
323
  return system
325
324
 
@@ -329,59 +328,54 @@ class ClaudeXReasoningEngine(Engine, AnthropicMixin):
329
328
 
330
329
  self_prompter = SelfPrompt()
331
330
  response = self_prompter(
332
- {'user': user_text, 'system': system},
333
- max_tokens=argument.kwargs.get('max_tokens', self.max_response_tokens),
334
- thinking=argument.kwargs.get('thinking', NOT_GIVEN),
331
+ {"user": user_text, "system": system},
332
+ max_tokens=argument.kwargs.get("max_tokens", self.max_response_tokens),
333
+ thinking=argument.kwargs.get("thinking", NOT_GIVEN),
335
334
  )
336
335
  if response is None:
337
336
  UserMessage("Self-prompting failed to return a response.", raise_with=ValueError)
338
337
 
339
- updated_prompt = self._format_user_prompt(response['user'], image_files)
340
- return response['system'], updated_prompt
338
+ updated_prompt = self._format_user_prompt(response["user"], image_files)
339
+ return response["system"], updated_prompt
341
340
 
342
341
  def _is_self_prompt_enabled(self, argument):
343
- return argument.prop.instance._kwargs.get('self_prompt', False) or argument.prop.self_prompt
342
+ return argument.prop.instance._kwargs.get("self_prompt", False) or argument.prop.self_prompt
344
343
 
345
344
  def _format_user_prompt(self, user_text, image_files):
346
345
  if len(image_files) > 0:
347
- images = [{'type': 'image', 'source': im} for im in image_files]
348
- return {
349
- 'role': 'user',
350
- 'content': [
351
- *images,
352
- {'type': 'text', 'text': user_text}
353
- ]
354
- }
346
+ images = [{"type": "image", "source": im} for im in image_files]
347
+ return {"role": "user", "content": [*images, {"type": "text", "text": user_text}]}
355
348
 
356
- return {'role': 'user', 'content': user_text}
349
+ return {"role": "user", "content": user_text}
357
350
 
358
351
  def _prepare_request_payload(self, argument):
359
352
  kwargs = argument.kwargs
360
- model = kwargs.get('model', self.model)
361
- stop = kwargs.get('stop', NOT_GIVEN)
362
- temperature = kwargs.get('temperature', 1)
363
- thinking_arg = kwargs.get('thinking', NOT_GIVEN)
353
+ model = kwargs.get("model", self.model)
354
+ stop = kwargs.get("stop", NOT_GIVEN)
355
+ temperature = kwargs.get("temperature", 1)
356
+ thinking_arg = kwargs.get("thinking", NOT_GIVEN)
364
357
  thinking = NOT_GIVEN
365
358
  if thinking_arg and isinstance(thinking_arg, dict):
366
- thinking = {
367
- "type": "enabled",
368
- "budget_tokens": thinking_arg.get("budget_tokens", 1024)
369
- }
370
- top_p = kwargs.get('top_p', NOT_GIVEN if temperature is not None else 1) #@NOTE:'You should either alter temperature or top_p, but not both.'
371
- top_k = kwargs.get('top_k', NOT_GIVEN)
372
- stream = kwargs.get('stream', True) # Do NOT remove this default value! Getting tons of API errors because they can't process requests >10m
373
- tools = kwargs.get('tools', NOT_GIVEN)
374
- tool_choice = kwargs.get('tool_choice', NOT_GIVEN)
375
- metadata_anthropic = kwargs.get('metadata', NOT_GIVEN)
376
- max_tokens = kwargs.get('max_tokens', self.max_response_tokens)
359
+ thinking = {"type": "enabled", "budget_tokens": thinking_arg.get("budget_tokens", 1024)}
360
+ top_p = kwargs.get(
361
+ "top_p", NOT_GIVEN if temperature is not None else 1
362
+ ) # @NOTE:'You should either alter temperature or top_p, but not both.'
363
+ top_k = kwargs.get("top_k", NOT_GIVEN)
364
+ stream = kwargs.get(
365
+ "stream", True
366
+ ) # Do NOT remove this default value! Getting tons of API errors because they can't process requests >10m
367
+ tools = kwargs.get("tools", NOT_GIVEN)
368
+ tool_choice = kwargs.get("tool_choice", NOT_GIVEN)
369
+ metadata_anthropic = kwargs.get("metadata", NOT_GIVEN)
370
+ max_tokens = kwargs.get("max_tokens", self.max_response_tokens)
377
371
 
378
372
  if stop != NOT_GIVEN and not isinstance(stop, list):
379
373
  stop = [stop]
380
374
 
381
- #@NOTE: Anthropic fails if stop is not raw string, so cast it to r'…'
375
+ # @NOTE: Anthropic fails if stop is not raw string, so cast it to r'…'
382
376
  # E.g. when we use defaults in core.py, i.e. stop=['\n']
383
377
  if stop != NOT_GIVEN:
384
- stop = [r'{s}' for s in stop]
378
+ stop = [r"{s}" for s in stop]
385
379
 
386
380
  return {
387
381
  "model": model,
@@ -394,7 +388,7 @@ class ClaudeXReasoningEngine(Engine, AnthropicMixin):
394
388
  "stream": stream,
395
389
  "metadata": metadata_anthropic,
396
390
  "tools": tools,
397
- "tool_choice": tool_choice
391
+ "tool_choice": tool_choice,
398
392
  }
399
393
 
400
394
  def _collect_response(self, res):
@@ -404,11 +398,13 @@ class ClaudeXReasoningEngine(Engine, AnthropicMixin):
404
398
  if isinstance(res, Message):
405
399
  return self._collect_message_response(res)
406
400
 
407
- UserMessage(f"Unexpected response type from Anthropic API: {type(res)}", raise_with=ValueError)
401
+ UserMessage(
402
+ f"Unexpected response type from Anthropic API: {type(res)}", raise_with=ValueError
403
+ )
408
404
  return {}
409
405
 
410
406
  def _collect_stream_response(self, response_chunks):
411
- accumulators = {'thinking': '', 'text': ''}
407
+ accumulators = {"thinking": "", "text": ""}
412
408
  tool_calls_raw = []
413
409
  active_tool_calls = {}
414
410
 
@@ -417,9 +413,9 @@ class ClaudeXReasoningEngine(Engine, AnthropicMixin):
417
413
 
418
414
  function_call_data = self._extract_function_call(tool_calls_raw)
419
415
  return {
420
- 'thinking': accumulators['thinking'],
421
- 'text': accumulators['text'],
422
- 'function_call': function_call_data
416
+ "thinking": accumulators["thinking"],
417
+ "text": accumulators["text"],
418
+ "function_call": function_call_data,
423
419
  }
424
420
 
425
421
  def _process_stream_chunk(self, chunk, accumulators, active_tool_calls, tool_calls_raw):
@@ -433,18 +429,18 @@ class ClaudeXReasoningEngine(Engine, AnthropicMixin):
433
429
  def _register_tool_call(self, chunk, active_tool_calls):
434
430
  if isinstance(chunk.content_block, ToolUseBlock):
435
431
  active_tool_calls[chunk.index] = {
436
- 'id': chunk.content_block.id,
437
- 'name': chunk.content_block.name,
438
- 'input_json_str': ""
432
+ "id": chunk.content_block.id,
433
+ "name": chunk.content_block.name,
434
+ "input_json_str": "",
439
435
  }
440
436
 
441
437
  def _handle_delta_chunk(self, chunk, accumulators, active_tool_calls):
442
438
  if isinstance(chunk.delta, ThinkingDelta):
443
- accumulators['thinking'] += chunk.delta.thinking
439
+ accumulators["thinking"] += chunk.delta.thinking
444
440
  elif isinstance(chunk.delta, TextDelta):
445
- accumulators['text'] += chunk.delta.text
441
+ accumulators["text"] += chunk.delta.text
446
442
  elif isinstance(chunk.delta, InputJSONDelta) and chunk.index in active_tool_calls:
447
- active_tool_calls[chunk.index]['input_json_str'] += chunk.delta.partial_json
443
+ active_tool_calls[chunk.index]["input_json_str"] += chunk.delta.partial_json
448
444
 
449
445
  def _finalize_tool_call(self, chunk, active_tool_calls, tool_calls_raw):
450
446
  if chunk.index not in active_tool_calls:
@@ -452,12 +448,12 @@ class ClaudeXReasoningEngine(Engine, AnthropicMixin):
452
448
 
453
449
  tool_call_info = active_tool_calls.pop(chunk.index)
454
450
  try:
455
- tool_call_info['input'] = json.loads(tool_call_info['input_json_str'])
451
+ tool_call_info["input"] = json.loads(tool_call_info["input_json_str"])
456
452
  except json.JSONDecodeError as error:
457
453
  UserMessage(
458
454
  f"Failed to parse JSON for tool call {tool_call_info['name']}: {error}. Raw JSON: '{tool_call_info['input_json_str']}'"
459
455
  )
460
- tool_call_info['input'] = {}
456
+ tool_call_info["input"] = {}
461
457
  tool_calls_raw.append(tool_call_info)
462
458
 
463
459
  def _extract_function_call(self, tool_calls_raw):
@@ -465,50 +461,47 @@ class ClaudeXReasoningEngine(Engine, AnthropicMixin):
465
461
  return None
466
462
 
467
463
  if len(tool_calls_raw) > 1:
468
- UserMessage("Multiple tool calls detected in the stream but only the first one will be processed.")
464
+ UserMessage(
465
+ "Multiple tool calls detected in the stream but only the first one will be processed."
466
+ )
469
467
 
470
468
  first_call = tool_calls_raw[0]
471
- return {
472
- 'name': first_call['name'],
473
- 'arguments': first_call['input']
474
- }
469
+ return {"name": first_call["name"], "arguments": first_call["input"]}
475
470
 
476
471
  def _collect_message_response(self, message):
477
- accumulators = {'thinking': '', 'text': ''}
472
+ accumulators = {"thinking": "", "text": ""}
478
473
  function_call_data = None
479
474
  tool_call_detected = False
480
475
 
481
476
  for content_block in message.content:
482
477
  function_call_data, tool_call_detected = self._process_message_block(
483
- content_block,
484
- accumulators,
485
- function_call_data,
486
- tool_call_detected
478
+ content_block, accumulators, function_call_data, tool_call_detected
487
479
  )
488
480
 
489
481
  return {
490
- 'thinking': accumulators['thinking'],
491
- 'text': accumulators['text'],
492
- 'function_call': function_call_data
482
+ "thinking": accumulators["thinking"],
483
+ "text": accumulators["text"],
484
+ "function_call": function_call_data,
493
485
  }
494
486
 
495
- def _process_message_block(self, content_block, accumulators, function_call_data, tool_call_detected):
487
+ def _process_message_block(
488
+ self, content_block, accumulators, function_call_data, tool_call_detected
489
+ ):
496
490
  if isinstance(content_block, ThinkingBlock):
497
- accumulators['thinking'] += content_block.thinking
491
+ accumulators["thinking"] += content_block.thinking
498
492
  return function_call_data, tool_call_detected
499
493
 
500
494
  if isinstance(content_block, TextBlock):
501
- accumulators['text'] += content_block.text
495
+ accumulators["text"] += content_block.text
502
496
  return function_call_data, tool_call_detected
503
497
 
504
498
  if isinstance(content_block, ToolUseBlock):
505
499
  if tool_call_detected:
506
- UserMessage("Multiple tool use blocks detected in the response but only the first one will be processed.")
500
+ UserMessage(
501
+ "Multiple tool use blocks detected in the response but only the first one will be processed."
502
+ )
507
503
  return function_call_data, tool_call_detected
508
504
 
509
- return {
510
- 'name': content_block.name,
511
- 'arguments': content_block.input
512
- }, True
505
+ return {"name": content_block.name, "arguments": content_block.input}, True
513
506
 
514
507
  return function_call_data, tool_call_detected