symbolicai 1.0.0__py3-none-any.whl → 1.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (129) hide show
  1. symai/__init__.py +198 -134
  2. symai/backend/base.py +51 -51
  3. symai/backend/engines/drawing/engine_bfl.py +33 -33
  4. symai/backend/engines/drawing/engine_gpt_image.py +4 -10
  5. symai/backend/engines/embedding/engine_llama_cpp.py +50 -35
  6. symai/backend/engines/embedding/engine_openai.py +22 -16
  7. symai/backend/engines/execute/engine_python.py +16 -16
  8. symai/backend/engines/files/engine_io.py +51 -49
  9. symai/backend/engines/imagecaptioning/engine_blip2.py +27 -23
  10. symai/backend/engines/imagecaptioning/engine_llavacpp_client.py +53 -46
  11. symai/backend/engines/index/engine_pinecone.py +116 -88
  12. symai/backend/engines/index/engine_qdrant.py +1011 -0
  13. symai/backend/engines/index/engine_vectordb.py +78 -52
  14. symai/backend/engines/lean/engine_lean4.py +65 -25
  15. symai/backend/engines/neurosymbolic/__init__.py +35 -28
  16. symai/backend/engines/neurosymbolic/engine_anthropic_claudeX_chat.py +137 -135
  17. symai/backend/engines/neurosymbolic/engine_anthropic_claudeX_reasoning.py +145 -152
  18. symai/backend/engines/neurosymbolic/engine_cerebras.py +328 -0
  19. symai/backend/engines/neurosymbolic/engine_deepseekX_reasoning.py +75 -49
  20. symai/backend/engines/neurosymbolic/engine_google_geminiX_reasoning.py +199 -155
  21. symai/backend/engines/neurosymbolic/engine_groq.py +106 -72
  22. symai/backend/engines/neurosymbolic/engine_huggingface.py +100 -67
  23. symai/backend/engines/neurosymbolic/engine_llama_cpp.py +121 -93
  24. symai/backend/engines/neurosymbolic/engine_openai_gptX_chat.py +213 -132
  25. symai/backend/engines/neurosymbolic/engine_openai_gptX_reasoning.py +180 -137
  26. symai/backend/engines/ocr/engine_apilayer.py +18 -20
  27. symai/backend/engines/output/engine_stdout.py +9 -9
  28. symai/backend/engines/{webscraping → scrape}/engine_requests.py +25 -11
  29. symai/backend/engines/search/engine_openai.py +95 -83
  30. symai/backend/engines/search/engine_parallel.py +665 -0
  31. symai/backend/engines/search/engine_perplexity.py +40 -41
  32. symai/backend/engines/search/engine_serpapi.py +33 -28
  33. symai/backend/engines/speech_to_text/engine_local_whisper.py +37 -27
  34. symai/backend/engines/symbolic/engine_wolframalpha.py +14 -8
  35. symai/backend/engines/text_to_speech/engine_openai.py +15 -19
  36. symai/backend/engines/text_vision/engine_clip.py +34 -28
  37. symai/backend/engines/userinput/engine_console.py +3 -4
  38. symai/backend/mixin/__init__.py +4 -0
  39. symai/backend/mixin/anthropic.py +48 -40
  40. symai/backend/mixin/cerebras.py +9 -0
  41. symai/backend/mixin/deepseek.py +4 -5
  42. symai/backend/mixin/google.py +5 -4
  43. symai/backend/mixin/groq.py +2 -4
  44. symai/backend/mixin/openai.py +132 -110
  45. symai/backend/settings.py +14 -14
  46. symai/chat.py +164 -94
  47. symai/collect/dynamic.py +13 -11
  48. symai/collect/pipeline.py +39 -31
  49. symai/collect/stats.py +109 -69
  50. symai/components.py +578 -238
  51. symai/constraints.py +14 -5
  52. symai/core.py +1495 -1210
  53. symai/core_ext.py +55 -50
  54. symai/endpoints/api.py +113 -58
  55. symai/extended/api_builder.py +22 -17
  56. symai/extended/arxiv_pdf_parser.py +13 -5
  57. symai/extended/bibtex_parser.py +8 -4
  58. symai/extended/conversation.py +88 -69
  59. symai/extended/document.py +40 -27
  60. symai/extended/file_merger.py +45 -7
  61. symai/extended/graph.py +38 -24
  62. symai/extended/html_style_template.py +17 -11
  63. symai/extended/interfaces/blip_2.py +1 -1
  64. symai/extended/interfaces/clip.py +4 -2
  65. symai/extended/interfaces/console.py +5 -3
  66. symai/extended/interfaces/dall_e.py +3 -1
  67. symai/extended/interfaces/file.py +2 -0
  68. symai/extended/interfaces/flux.py +3 -1
  69. symai/extended/interfaces/gpt_image.py +15 -6
  70. symai/extended/interfaces/input.py +2 -1
  71. symai/extended/interfaces/llava.py +1 -1
  72. symai/extended/interfaces/{naive_webscraping.py → naive_scrape.py} +3 -2
  73. symai/extended/interfaces/naive_vectordb.py +2 -2
  74. symai/extended/interfaces/ocr.py +4 -2
  75. symai/extended/interfaces/openai_search.py +2 -0
  76. symai/extended/interfaces/parallel.py +30 -0
  77. symai/extended/interfaces/perplexity.py +2 -0
  78. symai/extended/interfaces/pinecone.py +6 -4
  79. symai/extended/interfaces/python.py +2 -0
  80. symai/extended/interfaces/serpapi.py +2 -0
  81. symai/extended/interfaces/terminal.py +0 -1
  82. symai/extended/interfaces/tts.py +2 -1
  83. symai/extended/interfaces/whisper.py +2 -1
  84. symai/extended/interfaces/wolframalpha.py +1 -0
  85. symai/extended/metrics/__init__.py +1 -1
  86. symai/extended/metrics/similarity.py +5 -2
  87. symai/extended/os_command.py +31 -22
  88. symai/extended/packages/symdev.py +39 -34
  89. symai/extended/packages/sympkg.py +30 -27
  90. symai/extended/packages/symrun.py +46 -35
  91. symai/extended/repo_cloner.py +10 -9
  92. symai/extended/seo_query_optimizer.py +15 -12
  93. symai/extended/solver.py +104 -76
  94. symai/extended/summarizer.py +8 -7
  95. symai/extended/taypan_interpreter.py +10 -9
  96. symai/extended/vectordb.py +28 -15
  97. symai/formatter/formatter.py +39 -31
  98. symai/formatter/regex.py +46 -44
  99. symai/functional.py +184 -86
  100. symai/imports.py +85 -51
  101. symai/interfaces.py +1 -1
  102. symai/memory.py +33 -24
  103. symai/menu/screen.py +28 -19
  104. symai/misc/console.py +27 -27
  105. symai/misc/loader.py +4 -3
  106. symai/models/base.py +147 -76
  107. symai/models/errors.py +1 -1
  108. symai/ops/__init__.py +1 -1
  109. symai/ops/measures.py +17 -14
  110. symai/ops/primitives.py +933 -635
  111. symai/post_processors.py +28 -24
  112. symai/pre_processors.py +58 -52
  113. symai/processor.py +15 -9
  114. symai/prompts.py +714 -649
  115. symai/server/huggingface_server.py +115 -32
  116. symai/server/llama_cpp_server.py +14 -6
  117. symai/server/qdrant_server.py +206 -0
  118. symai/shell.py +98 -39
  119. symai/shellsv.py +307 -223
  120. symai/strategy.py +135 -81
  121. symai/symbol.py +276 -225
  122. symai/utils.py +62 -46
  123. {symbolicai-1.0.0.dist-info → symbolicai-1.1.1.dist-info}/METADATA +19 -9
  124. symbolicai-1.1.1.dist-info/RECORD +169 -0
  125. symbolicai-1.0.0.dist-info/RECORD +0 -163
  126. {symbolicai-1.0.0.dist-info → symbolicai-1.1.1.dist-info}/WHEEL +0 -0
  127. {symbolicai-1.0.0.dist-info → symbolicai-1.1.1.dist-info}/entry_points.txt +0 -0
  128. {symbolicai-1.0.0.dist-info → symbolicai-1.1.1.dist-info}/licenses/LICENSE +0 -0
  129. {symbolicai-1.0.0.dist-info → symbolicai-1.1.1.dist-info}/top_level.txt +0 -0
@@ -24,14 +24,17 @@ class LlamaCppTokenizer:
24
24
 
25
25
  @staticmethod
26
26
  async def _encode(text: str) -> list[int]:
27
- async with aiohttp.ClientSession() as session, session.post(
28
- f"{LlamaCppTokenizer._server_endpoint}/tokenize",
29
- json={"content": text},
30
- ) as res:
27
+ async with (
28
+ aiohttp.ClientSession() as session,
29
+ session.post(
30
+ f"{LlamaCppTokenizer._server_endpoint}/tokenize",
31
+ json={"content": text},
32
+ ) as res,
33
+ ):
31
34
  if res.status != 200:
32
35
  UserMessage(f"Request failed with status code: {res.status}", raise_with=ValueError)
33
36
  response_json = await res.json()
34
- return response_json['tokens']
37
+ return response_json["tokens"]
35
38
 
36
39
  @staticmethod
37
40
  def encode(text: str) -> list[int]:
@@ -44,14 +47,17 @@ class LlamaCppTokenizer:
44
47
 
45
48
  @staticmethod
46
49
  async def _decode(tokens: list[int]) -> str:
47
- async with aiohttp.ClientSession() as session, session.post(
48
- f"{LlamaCppTokenizer._server_endpoint}/detokenize",
49
- json={"tokens": tokens},
50
- ) as res:
50
+ async with (
51
+ aiohttp.ClientSession() as session,
52
+ session.post(
53
+ f"{LlamaCppTokenizer._server_endpoint}/detokenize",
54
+ json={"tokens": tokens},
55
+ ) as res,
56
+ ):
51
57
  if res.status != 200:
52
58
  UserMessage(f"Request failed with status code: {res.status}", raise_with=ValueError)
53
59
  response_json = await res.json()
54
- return response_json['content']
60
+ return response_json["content"]
55
61
 
56
62
  @staticmethod
57
63
  def decode(tokens: list[int]) -> str:
@@ -65,70 +71,83 @@ class LlamaCppTokenizer:
65
71
 
66
72
  class LlamaCppEngine(Engine):
67
73
  _retry_params: ClassVar[dict[str, Any]] = {
68
- 'tries': 5,
69
- 'delay': 2,
70
- 'max_delay': 60,
71
- 'backoff': 2,
72
- 'jitter': (1, 5),
73
- 'graceful': True
74
+ "tries": 5,
75
+ "delay": 2,
76
+ "max_delay": 60,
77
+ "backoff": 2,
78
+ "jitter": (1, 5),
79
+ "graceful": True,
74
80
  }
75
81
  _timeout_params: ClassVar[dict[str, Any]] = {
76
- 'read': None,
77
- 'connect': None,
82
+ "read": None,
83
+ "connect": None,
78
84
  }
85
+
79
86
  def __init__(
80
- self,
81
- model: str | None = None,
82
- retry_params: dict = _retry_params,
83
- timeout_params: dict = _timeout_params,
84
- ):
87
+ self,
88
+ model: str | None = None,
89
+ retry_params: dict = _retry_params,
90
+ timeout_params: dict = _timeout_params,
91
+ ):
85
92
  super().__init__()
86
93
  self.config = deepcopy(SYMAI_CONFIG)
87
94
  # In case we use EngineRepository.register to inject the api_key and model => dynamically change the engine at runtime
88
95
  if model is not None:
89
- self.config['NEUROSYMBOLIC_ENGINE_MODEL'] = model
90
- if self.id() != 'neurosymbolic':
96
+ self.config["NEUROSYMBOLIC_ENGINE_MODEL"] = model
97
+ if self.id() != "neurosymbolic":
91
98
  return
92
- if not SYMSERVER_CONFIG.get('online'):
93
- UserMessage('You are using the llama.cpp engine, but the server endpoint is not started. Please start the server with `symserver [--args]` or run `symserver --help` to see the available options for this engine.', raise_with=ValueError)
94
- self.server_endpoint = f"http://{SYMSERVER_CONFIG.get('--host')}:{SYMSERVER_CONFIG.get('--port')}"
95
- self.tokenizer = LlamaCppTokenizer # backwards compatibility with how we handle tokenization, i.e. self.tokenizer().encode(...)
99
+ if not SYMSERVER_CONFIG.get("online"):
100
+ UserMessage(
101
+ "You are using the llama.cpp engine, but the server endpoint is not started. Please start the server with `symserver [--args]` or run `symserver --help` to see the available options for this engine.",
102
+ raise_with=ValueError,
103
+ )
104
+ self.server_endpoint = (
105
+ f"http://{SYMSERVER_CONFIG.get('--host')}:{SYMSERVER_CONFIG.get('--port')}"
106
+ )
107
+ self.tokenizer = LlamaCppTokenizer # backwards compatibility with how we handle tokenization, i.e. self.tokenizer().encode(...)
96
108
  self.timeout_params = self._validate_timeout_params(timeout_params)
97
109
  self.retry_params = self._validate_retry_params(retry_params)
98
110
  self.name = self.__class__.__name__
99
111
 
100
112
  def id(self) -> str:
101
- if self.config.get('NEUROSYMBOLIC_ENGINE_MODEL') and self.config.get('NEUROSYMBOLIC_ENGINE_MODEL').startswith('llama'):
102
- return 'neurosymbolic'
103
- return super().id() # default to unregistered
113
+ if self.config.get("NEUROSYMBOLIC_ENGINE_MODEL") and self.config.get(
114
+ "NEUROSYMBOLIC_ENGINE_MODEL"
115
+ ).startswith("llama"):
116
+ return "neurosymbolic"
117
+ return super().id() # default to unregistered
104
118
 
105
119
  def command(self, *args, **kwargs):
106
120
  super().command(*args, **kwargs)
107
- if 'NEUROSYMBOLIC_ENGINE_MODEL' in kwargs:
108
- self.model = kwargs['NEUROSYMBOLIC_ENGINE_MODEL']
109
- if 'seed' in kwargs:
110
- self.seed = kwargs['seed']
111
- if 'except_remedy' in kwargs:
112
- self.except_remedy = kwargs['except_remedy']
121
+ if "NEUROSYMBOLIC_ENGINE_MODEL" in kwargs:
122
+ self.model = kwargs["NEUROSYMBOLIC_ENGINE_MODEL"]
123
+ if "seed" in kwargs:
124
+ self.seed = kwargs["seed"]
125
+ if "except_remedy" in kwargs:
126
+ self.except_remedy = kwargs["except_remedy"]
113
127
 
114
128
  def compute_required_tokens(self, _messages) -> int:
115
- #@TODO: quite non-trivial how to handle this with the llama.cpp server
116
- UserMessage('Not implemented for llama.cpp!', raise_with=NotImplementedError)
129
+ # @TODO: quite non-trivial how to handle this with the llama.cpp server
130
+ UserMessage("Not implemented for llama.cpp!", raise_with=NotImplementedError)
117
131
 
118
132
  def compute_remaining_tokens(self, _prompts: list) -> int:
119
- #@TODO: quite non-trivial how to handle this with the llama.cpp server
120
- UserMessage('Not implemented for llama.cpp!', raise_with=NotImplementedError)
133
+ # @TODO: quite non-trivial how to handle this with the llama.cpp server
134
+ UserMessage("Not implemented for llama.cpp!", raise_with=NotImplementedError)
121
135
 
122
136
  def _validate_timeout_params(self, timeout_params):
123
137
  if not isinstance(timeout_params, dict):
124
138
  UserMessage("timeout_params must be a dictionary", raise_with=ValueError)
125
- assert all(key in timeout_params for key in ['read', 'connect']), "Available keys: ['read', 'connect']"
139
+ assert all(key in timeout_params for key in ["read", "connect"]), (
140
+ "Available keys: ['read', 'connect']"
141
+ )
126
142
  return timeout_params
127
143
 
128
144
  def _validate_retry_params(self, retry_params):
129
145
  if not isinstance(retry_params, dict):
130
146
  UserMessage("retry_params must be a dictionary", raise_with=ValueError)
131
- assert all(key in retry_params for key in ['tries', 'delay', 'max_delay', 'backoff', 'jitter', 'graceful']), "Available keys: ['tries', 'delay', 'max_delay', 'backoff', 'jitter', 'graceful']"
147
+ assert all(
148
+ key in retry_params
149
+ for key in ["tries", "delay", "max_delay", "backoff", "jitter", "graceful"]
150
+ ), "Available keys: ['tries', 'delay', 'max_delay', 'backoff', 'jitter', 'graceful']"
132
151
  return retry_params
133
152
 
134
153
  @staticmethod
@@ -149,35 +168,35 @@ class LlamaCppEngine(Engine):
149
168
  kwargs = argument.kwargs
150
169
  payload = {
151
170
  "messages": argument.prop.prepared_input,
152
- "temperature": kwargs.get('temperature', 0.6),
153
- "frequency_penalty": kwargs.get('frequency_penalty', 0),
154
- "presence_penalty": kwargs.get('presence_penalty', 0),
155
- "top_p": kwargs.get('top_p', 0.95),
156
- "min_p": kwargs.get('min_p', 0.05),
157
- "stop": kwargs.get('stop'),
158
- "seed": kwargs.get('seed'),
159
- "max_tokens": kwargs.get('max_tokens'),
160
- "top_k": kwargs.get('top_k', 40),
161
- "repeat_penalty": kwargs.get('repeat_penalty', 1),
162
- "logits_bias": kwargs.get('logits_bias'),
163
- "logprobs": kwargs.get('logprobs', False),
164
- "grammar": kwargs.get('grammar'),
165
- "response_format": kwargs.get('response_format'),
171
+ "temperature": kwargs.get("temperature", 0.6),
172
+ "frequency_penalty": kwargs.get("frequency_penalty", 0),
173
+ "presence_penalty": kwargs.get("presence_penalty", 0),
174
+ "top_p": kwargs.get("top_p", 0.95),
175
+ "min_p": kwargs.get("min_p", 0.05),
176
+ "stop": kwargs.get("stop"),
177
+ "seed": kwargs.get("seed"),
178
+ "max_tokens": kwargs.get("max_tokens"),
179
+ "top_k": kwargs.get("top_k", 40),
180
+ "repeat_penalty": kwargs.get("repeat_penalty", 1),
181
+ "logits_bias": kwargs.get("logits_bias"),
182
+ "logprobs": kwargs.get("logprobs", False),
183
+ "grammar": kwargs.get("grammar"),
184
+ "response_format": kwargs.get("response_format"),
166
185
  }
167
186
 
168
- model = SYMSERVER_CONFIG.get('-m') or SYMSERVER_CONFIG.get('--model')
187
+ model = SYMSERVER_CONFIG.get("-m") or SYMSERVER_CONFIG.get("--model")
169
188
  if model:
170
189
  payload["model"] = model
171
190
 
172
- tools = kwargs.get('tools')
191
+ tools = kwargs.get("tools")
173
192
  if tools:
174
193
  payload["tools"] = tools
175
194
 
176
- tool_choice = kwargs.get('tool_choice')
195
+ tool_choice = kwargs.get("tool_choice")
177
196
  if tool_choice is not None:
178
197
  payload["tool_choice"] = tool_choice
179
198
 
180
- extra_body = kwargs.get('extra_body')
199
+ extra_body = kwargs.get("extra_body")
181
200
  if isinstance(extra_body, dict):
182
201
  payload.update(extra_body)
183
202
 
@@ -185,18 +204,20 @@ class LlamaCppEngine(Engine):
185
204
 
186
205
  async def _arequest(self, payload: dict) -> dict:
187
206
  """Makes an async HTTP request to the llama.cpp server."""
207
+
188
208
  @retry(**self.retry_params)
189
209
  async def _make_request():
190
210
  timeout = aiohttp.ClientTimeout(
191
- sock_connect=self.timeout_params['connect'],
192
- sock_read=self.timeout_params['read']
211
+ sock_connect=self.timeout_params["connect"], sock_read=self.timeout_params["read"]
193
212
  )
194
- async with aiohttp.ClientSession(timeout=timeout) as session, session.post(
195
- f"{self.server_endpoint}/v1/chat/completions",
196
- json=payload
197
- ) as res:
213
+ async with (
214
+ aiohttp.ClientSession(timeout=timeout) as session,
215
+ session.post(f"{self.server_endpoint}/v1/chat/completions", json=payload) as res,
216
+ ):
198
217
  if res.status != 200:
199
- UserMessage(f"Request failed with status code: {res.status}", raise_with=ValueError)
218
+ UserMessage(
219
+ f"Request failed with status code: {res.status}", raise_with=ValueError
220
+ )
200
221
  return await res.json()
201
222
 
202
223
  return await _make_request()
@@ -206,12 +227,12 @@ class LlamaCppEngine(Engine):
206
227
  """Extract reasoning traces from llama.cpp responses."""
207
228
  if not isinstance(response, dict):
208
229
  return None
209
- choices = response.get('choices', [])
230
+ choices = response.get("choices", [])
210
231
  if not isinstance(choices, list) or not choices:
211
232
  return None
212
233
  for choice in choices:
213
- if isinstance(choice, dict) and isinstance(choice.get('message'), dict):
214
- return choice['message'].get('reasoning_content')
234
+ if isinstance(choice, dict) and isinstance(choice.get("message"), dict):
235
+ return choice["message"].get("reasoning_content")
215
236
  return None
216
237
 
217
238
  def forward(self, argument):
@@ -223,50 +244,54 @@ class LlamaCppEngine(Engine):
223
244
  try:
224
245
  res = loop.run_until_complete(self._arequest(payload))
225
246
  except Exception as e:
226
- UserMessage(f'Error during generation. Caused by: {e}', raise_with=ValueError)
247
+ UserMessage(f"Error during generation. Caused by: {e}", raise_with=ValueError)
227
248
 
228
- metadata = {'raw_output': res}
249
+ metadata = {"raw_output": res}
229
250
 
230
- if payload.get('tools'):
251
+ if payload.get("tools"):
231
252
  metadata = self._process_tool_calls(res, metadata)
232
253
 
233
254
  thinking = self._extract_thinking(res)
234
255
  if thinking:
235
- metadata['thinking'] = thinking
256
+ metadata["thinking"] = thinking
236
257
 
237
- output = [r['message']['content'] for r in res['choices']]
258
+ output = [r["message"]["content"] for r in res["choices"]]
238
259
  output = output if isinstance(argument.prop.prepared_input, list) else output[0]
239
260
 
240
261
  return output, metadata
241
262
 
242
263
  @staticmethod
243
264
  def _process_tool_calls(res, metadata):
244
- choices = res.get('choices') if isinstance(res, dict) else None
265
+ choices = res.get("choices") if isinstance(res, dict) else None
245
266
  if not choices:
246
267
  return metadata
247
268
  hit = False
248
269
  for choice in choices:
249
270
  if not isinstance(choice, dict):
250
271
  continue
251
- message = choice.get('message') or {}
252
- tool_calls = message.get('tool_calls') or []
272
+ message = choice.get("message") or {}
273
+ tool_calls = message.get("tool_calls") or []
253
274
  if not tool_calls:
254
275
  continue
255
276
  for tool_call in tool_calls:
256
277
  if not isinstance(tool_call, dict):
257
278
  continue
258
- function = tool_call.get('function') or {}
279
+ function = tool_call.get("function") or {}
259
280
  if hit:
260
- UserMessage("Multiple function calls detected in the response but only the first one will be processed.")
281
+ UserMessage(
282
+ "Multiple function calls detected in the response but only the first one will be processed."
283
+ )
261
284
  return metadata
262
- arguments = function.get('arguments')
285
+ arguments = function.get("arguments")
263
286
  try:
264
- args_dict = json.loads(arguments) if isinstance(arguments, str) else arguments or {}
287
+ args_dict = (
288
+ json.loads(arguments) if isinstance(arguments, str) else arguments or {}
289
+ )
265
290
  except json.JSONDecodeError:
266
291
  args_dict = {}
267
- metadata['function_call'] = {
268
- 'name': function.get('name'),
269
- 'arguments': args_dict or {}
292
+ metadata["function_call"] = {
293
+ "name": function.get("name"),
294
+ "arguments": args_dict or {},
270
295
  }
271
296
  hit = True
272
297
  break
@@ -276,11 +301,14 @@ class LlamaCppEngine(Engine):
276
301
 
277
302
  def _prepare_raw_input(self, argument):
278
303
  if not argument.prop.processed_input:
279
- UserMessage('Need to provide a prompt instruction to the engine if raw_input is enabled.', raise_with=ValueError)
304
+ UserMessage(
305
+ "Need to provide a prompt instruction to the engine if raw_input is enabled.",
306
+ raise_with=ValueError,
307
+ )
280
308
  value = argument.prop.processed_input
281
309
  if not isinstance(value, list):
282
310
  if not isinstance(value, dict):
283
- value = {'role': 'user', 'content': str(value)}
311
+ value = {"role": "user", "content": str(value)}
284
312
  value = [value]
285
313
  return value
286
314
 
@@ -291,15 +319,15 @@ class LlamaCppEngine(Engine):
291
319
 
292
320
  _non_verbose_output = """<META_INSTRUCTION/>\n You will NOT output verbose preambles or post explanation, such as "Sure, let me...", "Hope that was helpful...", "Yes, I can help you with that...", etc. You will consider well formatted output, e.g. for sentences you will use punctuation, spaces, etc. or for code indentation, etc.\n"""
293
321
 
294
- #@TODO: Non-trivial how to handle user/system/assistant roles;
322
+ # @TODO: Non-trivial how to handle user/system/assistant roles;
295
323
  # For instance Mixtral-8x7B can't use the system role with llama.cpp while other models can, or Mixtral-8x22B expects the conversation roles must
296
324
  # alternate user/assistant/user/assistant/..., so how to handle this?
297
325
  # For now just use the user, as one can rephrase the system from the user perspective.
298
- user: str = ""
326
+ user: str = ""
299
327
 
300
328
  if argument.prop.suppress_verbose_output:
301
329
  user += _non_verbose_output
302
- user = f'{user}\n' if user and len(user) > 0 else ''
330
+ user = f"{user}\n" if user and len(user) > 0 else ""
303
331
 
304
332
  ref = argument.prop.instance
305
333
  static_ctxt, dyn_ctxt = ref.global_context
@@ -326,5 +354,5 @@ class LlamaCppEngine(Engine):
326
354
  user += str(argument.prop.processed_input)
327
355
 
328
356
  argument.prop.prepared_input = [
329
- { "role": "user", "content": user },
357
+ {"role": "user", "content": user},
330
358
  ]