symbolicai 0.21.0__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (134) hide show
  1. symai/__init__.py +269 -173
  2. symai/backend/base.py +123 -110
  3. symai/backend/engines/drawing/engine_bfl.py +45 -44
  4. symai/backend/engines/drawing/engine_gpt_image.py +112 -97
  5. symai/backend/engines/embedding/engine_llama_cpp.py +63 -52
  6. symai/backend/engines/embedding/engine_openai.py +25 -21
  7. symai/backend/engines/execute/engine_python.py +19 -18
  8. symai/backend/engines/files/engine_io.py +104 -95
  9. symai/backend/engines/imagecaptioning/engine_blip2.py +28 -24
  10. symai/backend/engines/imagecaptioning/engine_llavacpp_client.py +102 -79
  11. symai/backend/engines/index/engine_pinecone.py +124 -97
  12. symai/backend/engines/index/engine_qdrant.py +1011 -0
  13. symai/backend/engines/index/engine_vectordb.py +84 -56
  14. symai/backend/engines/lean/engine_lean4.py +96 -52
  15. symai/backend/engines/neurosymbolic/__init__.py +41 -13
  16. symai/backend/engines/neurosymbolic/engine_anthropic_claudeX_chat.py +330 -248
  17. symai/backend/engines/neurosymbolic/engine_anthropic_claudeX_reasoning.py +329 -264
  18. symai/backend/engines/neurosymbolic/engine_cerebras.py +328 -0
  19. symai/backend/engines/neurosymbolic/engine_deepseekX_reasoning.py +118 -88
  20. symai/backend/engines/neurosymbolic/engine_google_geminiX_reasoning.py +344 -299
  21. symai/backend/engines/neurosymbolic/engine_groq.py +173 -115
  22. symai/backend/engines/neurosymbolic/engine_huggingface.py +114 -84
  23. symai/backend/engines/neurosymbolic/engine_llama_cpp.py +144 -118
  24. symai/backend/engines/neurosymbolic/engine_openai_gptX_chat.py +415 -307
  25. symai/backend/engines/neurosymbolic/engine_openai_gptX_reasoning.py +394 -231
  26. symai/backend/engines/ocr/engine_apilayer.py +23 -27
  27. symai/backend/engines/output/engine_stdout.py +10 -13
  28. symai/backend/engines/{webscraping → scrape}/engine_requests.py +101 -54
  29. symai/backend/engines/search/engine_openai.py +100 -88
  30. symai/backend/engines/search/engine_parallel.py +665 -0
  31. symai/backend/engines/search/engine_perplexity.py +44 -45
  32. symai/backend/engines/search/engine_serpapi.py +37 -34
  33. symai/backend/engines/speech_to_text/engine_local_whisper.py +54 -51
  34. symai/backend/engines/symbolic/engine_wolframalpha.py +15 -9
  35. symai/backend/engines/text_to_speech/engine_openai.py +20 -26
  36. symai/backend/engines/text_vision/engine_clip.py +39 -37
  37. symai/backend/engines/userinput/engine_console.py +5 -6
  38. symai/backend/mixin/__init__.py +13 -0
  39. symai/backend/mixin/anthropic.py +48 -38
  40. symai/backend/mixin/deepseek.py +6 -5
  41. symai/backend/mixin/google.py +7 -4
  42. symai/backend/mixin/groq.py +2 -4
  43. symai/backend/mixin/openai.py +140 -110
  44. symai/backend/settings.py +87 -20
  45. symai/chat.py +216 -123
  46. symai/collect/__init__.py +7 -1
  47. symai/collect/dynamic.py +80 -70
  48. symai/collect/pipeline.py +67 -51
  49. symai/collect/stats.py +161 -109
  50. symai/components.py +707 -360
  51. symai/constraints.py +24 -12
  52. symai/core.py +1857 -1233
  53. symai/core_ext.py +83 -80
  54. symai/endpoints/api.py +166 -104
  55. symai/extended/.DS_Store +0 -0
  56. symai/extended/__init__.py +46 -12
  57. symai/extended/api_builder.py +29 -21
  58. symai/extended/arxiv_pdf_parser.py +23 -14
  59. symai/extended/bibtex_parser.py +9 -6
  60. symai/extended/conversation.py +156 -126
  61. symai/extended/document.py +50 -30
  62. symai/extended/file_merger.py +57 -14
  63. symai/extended/graph.py +51 -32
  64. symai/extended/html_style_template.py +18 -14
  65. symai/extended/interfaces/blip_2.py +2 -3
  66. symai/extended/interfaces/clip.py +4 -3
  67. symai/extended/interfaces/console.py +9 -1
  68. symai/extended/interfaces/dall_e.py +4 -2
  69. symai/extended/interfaces/file.py +2 -0
  70. symai/extended/interfaces/flux.py +4 -2
  71. symai/extended/interfaces/gpt_image.py +16 -7
  72. symai/extended/interfaces/input.py +2 -1
  73. symai/extended/interfaces/llava.py +1 -2
  74. symai/extended/interfaces/{naive_webscraping.py → naive_scrape.py} +4 -3
  75. symai/extended/interfaces/naive_vectordb.py +9 -10
  76. symai/extended/interfaces/ocr.py +5 -3
  77. symai/extended/interfaces/openai_search.py +2 -0
  78. symai/extended/interfaces/parallel.py +30 -0
  79. symai/extended/interfaces/perplexity.py +2 -0
  80. symai/extended/interfaces/pinecone.py +12 -9
  81. symai/extended/interfaces/python.py +2 -0
  82. symai/extended/interfaces/serpapi.py +3 -1
  83. symai/extended/interfaces/terminal.py +2 -4
  84. symai/extended/interfaces/tts.py +3 -2
  85. symai/extended/interfaces/whisper.py +3 -2
  86. symai/extended/interfaces/wolframalpha.py +2 -1
  87. symai/extended/metrics/__init__.py +11 -1
  88. symai/extended/metrics/similarity.py +14 -13
  89. symai/extended/os_command.py +39 -29
  90. symai/extended/packages/__init__.py +29 -3
  91. symai/extended/packages/symdev.py +51 -43
  92. symai/extended/packages/sympkg.py +41 -35
  93. symai/extended/packages/symrun.py +63 -50
  94. symai/extended/repo_cloner.py +14 -12
  95. symai/extended/seo_query_optimizer.py +15 -13
  96. symai/extended/solver.py +116 -91
  97. symai/extended/summarizer.py +12 -10
  98. symai/extended/taypan_interpreter.py +17 -18
  99. symai/extended/vectordb.py +122 -92
  100. symai/formatter/__init__.py +9 -1
  101. symai/formatter/formatter.py +51 -47
  102. symai/formatter/regex.py +70 -69
  103. symai/functional.py +325 -176
  104. symai/imports.py +190 -147
  105. symai/interfaces.py +57 -28
  106. symai/memory.py +45 -35
  107. symai/menu/screen.py +28 -19
  108. symai/misc/console.py +66 -56
  109. symai/misc/loader.py +8 -5
  110. symai/models/__init__.py +17 -1
  111. symai/models/base.py +395 -236
  112. symai/models/errors.py +1 -2
  113. symai/ops/__init__.py +32 -22
  114. symai/ops/measures.py +24 -25
  115. symai/ops/primitives.py +1149 -731
  116. symai/post_processors.py +58 -50
  117. symai/pre_processors.py +86 -82
  118. symai/processor.py +21 -13
  119. symai/prompts.py +764 -685
  120. symai/server/huggingface_server.py +135 -49
  121. symai/server/llama_cpp_server.py +21 -11
  122. symai/server/qdrant_server.py +206 -0
  123. symai/shell.py +100 -42
  124. symai/shellsv.py +700 -492
  125. symai/strategy.py +630 -346
  126. symai/symbol.py +368 -322
  127. symai/utils.py +100 -78
  128. {symbolicai-0.21.0.dist-info → symbolicai-1.1.0.dist-info}/METADATA +22 -10
  129. symbolicai-1.1.0.dist-info/RECORD +168 -0
  130. symbolicai-0.21.0.dist-info/RECORD +0 -162
  131. {symbolicai-0.21.0.dist-info → symbolicai-1.1.0.dist-info}/WHEEL +0 -0
  132. {symbolicai-0.21.0.dist-info → symbolicai-1.1.0.dist-info}/entry_points.txt +0 -0
  133. {symbolicai-0.21.0.dist-info → symbolicai-1.1.0.dist-info}/licenses/LICENSE +0 -0
  134. {symbolicai-0.21.0.dist-info → symbolicai-1.1.0.dist-info}/top_level.txt +0 -0
@@ -2,15 +2,14 @@ import asyncio
2
2
  import json
3
3
  import logging
4
4
  from copy import deepcopy
5
+ from typing import Any, ClassVar
5
6
 
6
7
  import aiohttp
7
- import httpx
8
8
  import nest_asyncio
9
- import requests
10
9
 
11
10
  from ....core import Argument
12
11
  from ....core_ext import retry
13
- from ....utils import CustomUserWarning
12
+ from ....utils import UserMessage
14
13
  from ...base import Engine
15
14
  from ...settings import SYMAI_CONFIG, SYMSERVER_CONFIG
16
15
 
@@ -25,14 +24,17 @@ class LlamaCppTokenizer:
25
24
 
26
25
  @staticmethod
27
26
  async def _encode(text: str) -> list[int]:
28
- async with aiohttp.ClientSession() as session:
29
- async with session.post(f"{LlamaCppTokenizer._server_endpoint}/tokenize", json={
30
- "content": text,
31
- }) as res:
32
- if res.status != 200:
33
- CustomUserWarning(f"Request failed with status code: {res.status}", raise_with=ValueError)
34
- res = await res.json()
35
- return res['tokens']
27
+ async with (
28
+ aiohttp.ClientSession() as session,
29
+ session.post(
30
+ f"{LlamaCppTokenizer._server_endpoint}/tokenize",
31
+ json={"content": text},
32
+ ) as res,
33
+ ):
34
+ if res.status != 200:
35
+ UserMessage(f"Request failed with status code: {res.status}", raise_with=ValueError)
36
+ response_json = await res.json()
37
+ return response_json["tokens"]
36
38
 
37
39
  @staticmethod
38
40
  def encode(text: str) -> list[int]:
@@ -45,14 +47,17 @@ class LlamaCppTokenizer:
45
47
 
46
48
  @staticmethod
47
49
  async def _decode(tokens: list[int]) -> str:
48
- async with aiohttp.ClientSession() as session:
49
- async with session.post(f"{LlamaCppTokenizer._server_endpoint}/detokenize", json={
50
- "tokens": tokens,
51
- }) as res:
52
- if res.status != 200:
53
- CustomUserWarning(f"Request failed with status code: {res.status}", raise_with=ValueError)
54
- res = await res.json()
55
- return res['content']
50
+ async with (
51
+ aiohttp.ClientSession() as session,
52
+ session.post(
53
+ f"{LlamaCppTokenizer._server_endpoint}/detokenize",
54
+ json={"tokens": tokens},
55
+ ) as res,
56
+ ):
57
+ if res.status != 200:
58
+ UserMessage(f"Request failed with status code: {res.status}", raise_with=ValueError)
59
+ response_json = await res.json()
60
+ return response_json["content"]
56
61
 
57
62
  @staticmethod
58
63
  def decode(tokens: list[int]) -> str:
@@ -65,71 +70,84 @@ class LlamaCppTokenizer:
65
70
 
66
71
 
67
72
  class LlamaCppEngine(Engine):
68
- _retry_params = {
69
- 'tries': 5,
70
- 'delay': 2,
71
- 'max_delay': 60,
72
- 'backoff': 2,
73
- 'jitter': (1, 5),
74
- 'graceful': True
73
+ _retry_params: ClassVar[dict[str, Any]] = {
74
+ "tries": 5,
75
+ "delay": 2,
76
+ "max_delay": 60,
77
+ "backoff": 2,
78
+ "jitter": (1, 5),
79
+ "graceful": True,
75
80
  }
76
- _timeout_params = {
77
- 'read': None,
78
- 'connect': None,
81
+ _timeout_params: ClassVar[dict[str, Any]] = {
82
+ "read": None,
83
+ "connect": None,
79
84
  }
85
+
80
86
  def __init__(
81
- self,
82
- model: str | None = None,
83
- retry_params: dict = _retry_params,
84
- timeout_params: dict = _timeout_params,
85
- ):
87
+ self,
88
+ model: str | None = None,
89
+ retry_params: dict = _retry_params,
90
+ timeout_params: dict = _timeout_params,
91
+ ):
86
92
  super().__init__()
87
93
  self.config = deepcopy(SYMAI_CONFIG)
88
94
  # In case we use EngineRepository.register to inject the api_key and model => dynamically change the engine at runtime
89
95
  if model is not None:
90
- self.config['NEUROSYMBOLIC_ENGINE_MODEL'] = model
91
- if self.id() != 'neurosymbolic':
96
+ self.config["NEUROSYMBOLIC_ENGINE_MODEL"] = model
97
+ if self.id() != "neurosymbolic":
92
98
  return
93
- if not SYMSERVER_CONFIG.get('online'):
94
- CustomUserWarning('You are using the llama.cpp engine, but the server endpoint is not started. Please start the server with `symserver [--args]` or run `symserver --help` to see the available options for this engine.', raise_with=ValueError)
95
- self.server_endpoint = f"http://{SYMSERVER_CONFIG.get('--host')}:{SYMSERVER_CONFIG.get('--port')}"
96
- self.tokenizer = LlamaCppTokenizer # backwards compatibility with how we handle tokenization, i.e. self.tokenizer().encode(...)
99
+ if not SYMSERVER_CONFIG.get("online"):
100
+ UserMessage(
101
+ "You are using the llama.cpp engine, but the server endpoint is not started. Please start the server with `symserver [--args]` or run `symserver --help` to see the available options for this engine.",
102
+ raise_with=ValueError,
103
+ )
104
+ self.server_endpoint = (
105
+ f"http://{SYMSERVER_CONFIG.get('--host')}:{SYMSERVER_CONFIG.get('--port')}"
106
+ )
107
+ self.tokenizer = LlamaCppTokenizer # backwards compatibility with how we handle tokenization, i.e. self.tokenizer().encode(...)
97
108
  self.timeout_params = self._validate_timeout_params(timeout_params)
98
109
  self.retry_params = self._validate_retry_params(retry_params)
99
110
  self.name = self.__class__.__name__
100
111
 
101
112
  def id(self) -> str:
102
- if self.config.get('NEUROSYMBOLIC_ENGINE_MODEL') and self.config.get('NEUROSYMBOLIC_ENGINE_MODEL').startswith('llama'):
103
- return 'neurosymbolic'
104
- return super().id() # default to unregistered
113
+ if self.config.get("NEUROSYMBOLIC_ENGINE_MODEL") and self.config.get(
114
+ "NEUROSYMBOLIC_ENGINE_MODEL"
115
+ ).startswith("llama"):
116
+ return "neurosymbolic"
117
+ return super().id() # default to unregistered
105
118
 
106
119
  def command(self, *args, **kwargs):
107
120
  super().command(*args, **kwargs)
108
- if 'NEUROSYMBOLIC_ENGINE_MODEL' in kwargs:
109
- self.model = kwargs['NEUROSYMBOLIC_ENGINE_MODEL']
110
- if 'seed' in kwargs:
111
- self.seed = kwargs['seed']
112
- if 'except_remedy' in kwargs:
113
- self.except_remedy = kwargs['except_remedy']
121
+ if "NEUROSYMBOLIC_ENGINE_MODEL" in kwargs:
122
+ self.model = kwargs["NEUROSYMBOLIC_ENGINE_MODEL"]
123
+ if "seed" in kwargs:
124
+ self.seed = kwargs["seed"]
125
+ if "except_remedy" in kwargs:
126
+ self.except_remedy = kwargs["except_remedy"]
114
127
 
115
- def compute_required_tokens(self, messages) -> int:
116
- #@TODO: quite non-trivial how to handle this with the llama.cpp server
117
- CustomUserWarning('Not implemented for llama.cpp!', raise_with=NotImplementedError)
128
+ def compute_required_tokens(self, _messages) -> int:
129
+ # @TODO: quite non-trivial how to handle this with the llama.cpp server
130
+ UserMessage("Not implemented for llama.cpp!", raise_with=NotImplementedError)
118
131
 
119
- def compute_remaining_tokens(self, prompts: list) -> int:
120
- #@TODO: quite non-trivial how to handle this with the llama.cpp server
121
- CustomUserWarning('Not implemented for llama.cpp!', raise_with=NotImplementedError)
132
+ def compute_remaining_tokens(self, _prompts: list) -> int:
133
+ # @TODO: quite non-trivial how to handle this with the llama.cpp server
134
+ UserMessage("Not implemented for llama.cpp!", raise_with=NotImplementedError)
122
135
 
123
136
  def _validate_timeout_params(self, timeout_params):
124
137
  if not isinstance(timeout_params, dict):
125
- CustomUserWarning("timeout_params must be a dictionary", raise_with=ValueError)
126
- assert all(key in timeout_params for key in ['read', 'connect']), "Available keys: ['read', 'connect']"
138
+ UserMessage("timeout_params must be a dictionary", raise_with=ValueError)
139
+ assert all(key in timeout_params for key in ["read", "connect"]), (
140
+ "Available keys: ['read', 'connect']"
141
+ )
127
142
  return timeout_params
128
143
 
129
144
  def _validate_retry_params(self, retry_params):
130
145
  if not isinstance(retry_params, dict):
131
- CustomUserWarning("retry_params must be a dictionary", raise_with=ValueError)
132
- assert all(key in retry_params for key in ['tries', 'delay', 'max_delay', 'backoff', 'jitter', 'graceful']), "Available keys: ['tries', 'delay', 'max_delay', 'backoff', 'jitter', 'graceful']"
146
+ UserMessage("retry_params must be a dictionary", raise_with=ValueError)
147
+ assert all(
148
+ key in retry_params
149
+ for key in ["tries", "delay", "max_delay", "backoff", "jitter", "graceful"]
150
+ ), "Available keys: ['tries', 'delay', 'max_delay', 'backoff', 'jitter', 'graceful']"
133
151
  return retry_params
134
152
 
135
153
  @staticmethod
@@ -138,7 +156,7 @@ class LlamaCppEngine(Engine):
138
156
  try:
139
157
  current_loop = asyncio.get_event_loop()
140
158
  if current_loop.is_closed():
141
- CustomUserWarning("Event loop is closed.", raise_with=RuntimeError)
159
+ UserMessage("Event loop is closed.", raise_with=RuntimeError)
142
160
  return current_loop
143
161
  except RuntimeError:
144
162
  new_loop = asyncio.new_event_loop()
@@ -150,35 +168,35 @@ class LlamaCppEngine(Engine):
150
168
  kwargs = argument.kwargs
151
169
  payload = {
152
170
  "messages": argument.prop.prepared_input,
153
- "temperature": kwargs.get('temperature', 0.6),
154
- "frequency_penalty": kwargs.get('frequency_penalty', 0),
155
- "presence_penalty": kwargs.get('presence_penalty', 0),
156
- "top_p": kwargs.get('top_p', 0.95),
157
- "min_p": kwargs.get('min_p', 0.05),
158
- "stop": kwargs.get('stop'),
159
- "seed": kwargs.get('seed'),
160
- "max_tokens": kwargs.get('max_tokens'),
161
- "top_k": kwargs.get('top_k', 40),
162
- "repeat_penalty": kwargs.get('repeat_penalty', 1),
163
- "logits_bias": kwargs.get('logits_bias'),
164
- "logprobs": kwargs.get('logprobs', False),
165
- "grammar": kwargs.get('grammar'),
166
- "response_format": kwargs.get('response_format'),
171
+ "temperature": kwargs.get("temperature", 0.6),
172
+ "frequency_penalty": kwargs.get("frequency_penalty", 0),
173
+ "presence_penalty": kwargs.get("presence_penalty", 0),
174
+ "top_p": kwargs.get("top_p", 0.95),
175
+ "min_p": kwargs.get("min_p", 0.05),
176
+ "stop": kwargs.get("stop"),
177
+ "seed": kwargs.get("seed"),
178
+ "max_tokens": kwargs.get("max_tokens"),
179
+ "top_k": kwargs.get("top_k", 40),
180
+ "repeat_penalty": kwargs.get("repeat_penalty", 1),
181
+ "logits_bias": kwargs.get("logits_bias"),
182
+ "logprobs": kwargs.get("logprobs", False),
183
+ "grammar": kwargs.get("grammar"),
184
+ "response_format": kwargs.get("response_format"),
167
185
  }
168
186
 
169
- model = SYMSERVER_CONFIG.get('-m') or SYMSERVER_CONFIG.get('--model')
187
+ model = SYMSERVER_CONFIG.get("-m") or SYMSERVER_CONFIG.get("--model")
170
188
  if model:
171
189
  payload["model"] = model
172
190
 
173
- tools = kwargs.get('tools')
191
+ tools = kwargs.get("tools")
174
192
  if tools:
175
193
  payload["tools"] = tools
176
194
 
177
- tool_choice = kwargs.get('tool_choice')
195
+ tool_choice = kwargs.get("tool_choice")
178
196
  if tool_choice is not None:
179
197
  payload["tool_choice"] = tool_choice
180
198
 
181
- extra_body = kwargs.get('extra_body')
199
+ extra_body = kwargs.get("extra_body")
182
200
  if isinstance(extra_body, dict):
183
201
  payload.update(extra_body)
184
202
 
@@ -186,20 +204,21 @@ class LlamaCppEngine(Engine):
186
204
 
187
205
  async def _arequest(self, payload: dict) -> dict:
188
206
  """Makes an async HTTP request to the llama.cpp server."""
207
+
189
208
  @retry(**self.retry_params)
190
209
  async def _make_request():
191
210
  timeout = aiohttp.ClientTimeout(
192
- sock_connect=self.timeout_params['connect'],
193
- sock_read=self.timeout_params['read']
211
+ sock_connect=self.timeout_params["connect"], sock_read=self.timeout_params["read"]
194
212
  )
195
- async with aiohttp.ClientSession(timeout=timeout) as session:
196
- async with session.post(
197
- f"{self.server_endpoint}/v1/chat/completions",
198
- json=payload
199
- ) as res:
200
- if res.status != 200:
201
- CustomUserWarning(f"Request failed with status code: {res.status}", raise_with=ValueError)
202
- return await res.json()
213
+ async with (
214
+ aiohttp.ClientSession(timeout=timeout) as session,
215
+ session.post(f"{self.server_endpoint}/v1/chat/completions", json=payload) as res,
216
+ ):
217
+ if res.status != 200:
218
+ UserMessage(
219
+ f"Request failed with status code: {res.status}", raise_with=ValueError
220
+ )
221
+ return await res.json()
203
222
 
204
223
  return await _make_request()
205
224
 
@@ -208,12 +227,12 @@ class LlamaCppEngine(Engine):
208
227
  """Extract reasoning traces from llama.cpp responses."""
209
228
  if not isinstance(response, dict):
210
229
  return None
211
- choices = response.get('choices', [])
230
+ choices = response.get("choices", [])
212
231
  if not isinstance(choices, list) or not choices:
213
232
  return None
214
233
  for choice in choices:
215
- if isinstance(choice, dict) and isinstance(choice.get('message'), dict):
216
- return choice['message'].get('reasoning_content')
234
+ if isinstance(choice, dict) and isinstance(choice.get("message"), dict):
235
+ return choice["message"].get("reasoning_content")
217
236
  return None
218
237
 
219
238
  def forward(self, argument):
@@ -225,50 +244,54 @@ class LlamaCppEngine(Engine):
225
244
  try:
226
245
  res = loop.run_until_complete(self._arequest(payload))
227
246
  except Exception as e:
228
- CustomUserWarning(f'Error during generation. Caused by: {e}', raise_with=ValueError)
247
+ UserMessage(f"Error during generation. Caused by: {e}", raise_with=ValueError)
229
248
 
230
- metadata = {'raw_output': res}
249
+ metadata = {"raw_output": res}
231
250
 
232
- if payload.get('tools'):
251
+ if payload.get("tools"):
233
252
  metadata = self._process_tool_calls(res, metadata)
234
253
 
235
254
  thinking = self._extract_thinking(res)
236
255
  if thinking:
237
- metadata['thinking'] = thinking
256
+ metadata["thinking"] = thinking
238
257
 
239
- output = [r['message']['content'] for r in res['choices']]
258
+ output = [r["message"]["content"] for r in res["choices"]]
240
259
  output = output if isinstance(argument.prop.prepared_input, list) else output[0]
241
260
 
242
261
  return output, metadata
243
262
 
244
263
  @staticmethod
245
264
  def _process_tool_calls(res, metadata):
246
- choices = res.get('choices') if isinstance(res, dict) else None
265
+ choices = res.get("choices") if isinstance(res, dict) else None
247
266
  if not choices:
248
267
  return metadata
249
268
  hit = False
250
269
  for choice in choices:
251
270
  if not isinstance(choice, dict):
252
271
  continue
253
- message = choice.get('message') or {}
254
- tool_calls = message.get('tool_calls') or []
272
+ message = choice.get("message") or {}
273
+ tool_calls = message.get("tool_calls") or []
255
274
  if not tool_calls:
256
275
  continue
257
276
  for tool_call in tool_calls:
258
277
  if not isinstance(tool_call, dict):
259
278
  continue
260
- function = tool_call.get('function') or {}
279
+ function = tool_call.get("function") or {}
261
280
  if hit:
262
- CustomUserWarning("Multiple function calls detected in the response but only the first one will be processed.")
281
+ UserMessage(
282
+ "Multiple function calls detected in the response but only the first one will be processed."
283
+ )
263
284
  return metadata
264
- arguments = function.get('arguments')
285
+ arguments = function.get("arguments")
265
286
  try:
266
- args_dict = json.loads(arguments) if isinstance(arguments, str) else arguments or {}
287
+ args_dict = (
288
+ json.loads(arguments) if isinstance(arguments, str) else arguments or {}
289
+ )
267
290
  except json.JSONDecodeError:
268
291
  args_dict = {}
269
- metadata['function_call'] = {
270
- 'name': function.get('name'),
271
- 'arguments': args_dict or {}
292
+ metadata["function_call"] = {
293
+ "name": function.get("name"),
294
+ "arguments": args_dict or {},
272
295
  }
273
296
  hit = True
274
297
  break
@@ -278,11 +301,14 @@ class LlamaCppEngine(Engine):
278
301
 
279
302
  def _prepare_raw_input(self, argument):
280
303
  if not argument.prop.processed_input:
281
- CustomUserWarning('Need to provide a prompt instruction to the engine if raw_input is enabled.', raise_with=ValueError)
304
+ UserMessage(
305
+ "Need to provide a prompt instruction to the engine if raw_input is enabled.",
306
+ raise_with=ValueError,
307
+ )
282
308
  value = argument.prop.processed_input
283
- if type(value) != list:
284
- if type(value) != dict:
285
- value = {'role': 'user', 'content': str(value)}
309
+ if not isinstance(value, list):
310
+ if not isinstance(value, dict):
311
+ value = {"role": "user", "content": str(value)}
286
312
  value = [value]
287
313
  return value
288
314
 
@@ -293,15 +319,15 @@ class LlamaCppEngine(Engine):
293
319
 
294
320
  _non_verbose_output = """<META_INSTRUCTION/>\n You will NOT output verbose preambles or post explanation, such as "Sure, let me...", "Hope that was helpful...", "Yes, I can help you with that...", etc. You will consider well formatted output, e.g. for sentences you will use punctuation, spaces, etc. or for code indentation, etc.\n"""
295
321
 
296
- #@TODO: Non-trivial how to handle user/system/assistant roles;
322
+ # @TODO: Non-trivial how to handle user/system/assistant roles;
297
323
  # For instance Mixtral-8x7B can't use the system role with llama.cpp while other models can, or Mixtral-8x22B expects the conversation roles must
298
324
  # alternate user/assistant/user/assistant/..., so how to handle this?
299
325
  # For now just use the user, as one can rephrase the system from the user perspective.
300
- user: str = ""
326
+ user: str = ""
301
327
 
302
328
  if argument.prop.suppress_verbose_output:
303
329
  user += _non_verbose_output
304
- user = f'{user}\n' if user and len(user) > 0 else ''
330
+ user = f"{user}\n" if user and len(user) > 0 else ""
305
331
 
306
332
  ref = argument.prop.instance
307
333
  static_ctxt, dyn_ctxt = ref.global_context
@@ -313,20 +339,20 @@ class LlamaCppEngine(Engine):
313
339
 
314
340
  payload = argument.prop.payload
315
341
  if argument.prop.payload:
316
- user += f"<ADDITIONAL_CONTEXT/>\n{str(payload)}\n\n"
342
+ user += f"<ADDITIONAL_CONTEXT/>\n{payload!s}\n\n"
317
343
 
318
344
  examples: list[str] = argument.prop.examples
319
345
  if examples and len(examples) > 0:
320
- user += f"<EXAMPLES/>\n{str(examples)}\n\n"
346
+ user += f"<EXAMPLES/>\n{examples!s}\n\n"
321
347
 
322
348
  if argument.prop.prompt is not None and len(argument.prop.prompt) > 0:
323
- user += f"<INSTRUCTION/>\n{str(argument.prop.prompt)}\n\n"
349
+ user += f"<INSTRUCTION/>\n{argument.prop.prompt!s}\n\n"
324
350
 
325
351
  if argument.prop.template_suffix:
326
- user += f" You will only generate content for the placeholder `{str(argument.prop.template_suffix)}` following the instructions and the provided context information.\n\n"
352
+ user += f" You will only generate content for the placeholder `{argument.prop.template_suffix!s}` following the instructions and the provided context information.\n\n"
327
353
 
328
354
  user += str(argument.prop.processed_input)
329
355
 
330
356
  argument.prop.prepared_input = [
331
- { "role": "user", "content": user },
357
+ {"role": "user", "content": user},
332
358
  ]