alita-sdk 0.3.392__py3-none-any.whl → 0.3.409__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

@@ -568,7 +568,7 @@ class AlitaClient:
568
568
  def predict_agent(self, llm: ChatOpenAI, instructions: str = "You are a helpful assistant.",
569
569
  tools: Optional[list] = None, chat_history: Optional[List[Any]] = None,
570
570
  memory=None, runtime='langchain', variables: Optional[list] = None,
571
- store: Optional[BaseStore] = None):
571
+ store: Optional[BaseStore] = None, debug_mode: Optional[bool] = False):
572
572
  """
573
573
  Create a predict-type agent with minimal configuration.
574
574
 
@@ -581,6 +581,7 @@ class AlitaClient:
581
581
  runtime: Runtime type (default: 'langchain')
582
582
  variables: Optional list of variables for the agent
583
583
  store: Optional store for memory
584
+ debug_mode: Enable debug mode for cases when assistant can be initialized without tools
584
585
 
585
586
  Returns:
586
587
  Runnable agent ready for execution
@@ -600,7 +601,7 @@ class AlitaClient:
600
601
  'variables': variables
601
602
  }
602
603
  return LangChainAssistant(self, agent_data, llm,
603
- chat_history, "predict", memory=memory, store=store).runnable()
604
+ chat_history, "predict", memory=memory, store=store, debug_mode=debug_mode).runnable()
604
605
 
605
606
  def test_toolkit_tool(self, toolkit_config: dict, tool_name: str, tool_params: dict = None,
606
607
  runtime_config: dict = None, llm_model: str = None,
@@ -17,6 +17,7 @@ from .constants import REACT_ADDON, REACT_VARS, XML_ADDON
17
17
  from .chat_message_template import Jinja2TemplatedChatMessagesTemplate
18
18
  from ..tools.echo import EchoTool
19
19
  from langchain_core.tools import BaseTool, ToolException
20
+ from jinja2 import Environment, DebugUndefined
20
21
 
21
22
  logger = logging.getLogger(__name__)
22
23
 
@@ -29,7 +30,8 @@ class Assistant:
29
30
  app_type: str = "openai",
30
31
  tools: Optional[list] = [],
31
32
  memory: Optional[Any] = None,
32
- store: Optional[BaseStore] = None):
33
+ store: Optional[BaseStore] = None,
34
+ debug_mode: Optional[bool] = False):
33
35
 
34
36
  self.app_type = app_type
35
37
  self.memory = memory
@@ -87,7 +89,7 @@ class Assistant:
87
89
  for internal_tool_name in meta.get("internal_tools"):
88
90
  version_tools.append({"type": "internal_tool", "name": internal_tool_name})
89
91
 
90
- self.tools = get_tools(version_tools, alita_client=alita, llm=self.client, memory_store=self.store)
92
+ self.tools = get_tools(version_tools, alita_client=alita, llm=self.client, memory_store=self.store, debug_mode=debug_mode)
91
93
  if tools:
92
94
  self.tools += tools
93
95
  # Handle prompt setup
@@ -282,6 +284,10 @@ class Assistant:
282
284
  'task': {
283
285
  'type': 'variable',
284
286
  'value': 'input'
287
+ },
288
+ 'chat_history': {
289
+ 'type': 'variable',
290
+ 'value': 'messages'
285
291
  }
286
292
  },
287
293
  'input': ['messages'],
@@ -344,5 +350,7 @@ class Assistant:
344
350
  continue
345
351
  # take only the content of the system message from the openai prompt
346
352
  if isinstance(message, SystemMessage):
347
- return message.content
353
+ environment = Environment(undefined=DebugUndefined)
354
+ template = environment.from_string(message.content)
355
+ return template.render(self.prompt.partial_variables)
348
356
  return None
@@ -1,4 +1,5 @@
1
1
  import re
2
+ import uuid
2
3
  from io import BytesIO
3
4
 
4
5
  import mammoth.images
@@ -8,6 +9,9 @@ from langchain_core.document_loaders import BaseLoader
8
9
  from langchain_core.documents import Document
9
10
  from mammoth import convert_to_html
10
11
  from markdownify import markdownify
12
+ from docx import Document as DocxDocument
13
+ from docx.oxml.ns import qn
14
+ from bs4 import BeautifulSoup
11
15
 
12
16
  from alita_sdk.tools.chunkers.sematic.markdown_chunker import markdown_by_headers_chunker
13
17
  from .utils import perform_llm_prediction_for_image_bytes
@@ -17,6 +21,7 @@ class AlitaDocxMammothLoader(BaseLoader):
17
21
  """
18
22
  Loader for Docx files using Mammoth to convert to HTML, with image handling,
19
23
  and then Markdownify to convert HTML to markdown.
24
+ Detects bordered paragraphs and text boxes and treats them as code blocks.
20
25
  """
21
26
  def __init__(self, **kwargs):
22
27
  """
@@ -97,6 +102,295 @@ class AlitaDocxMammothLoader(BaseLoader):
97
102
  new_md = pattern.sub(replace_placeholder, original_md)
98
103
  return new_md
99
104
 
105
+ def __has_border(self, paragraph):
106
+ """
107
+ Check if a paragraph has border formatting.
108
+
109
+ Args:
110
+ paragraph: A python-docx Paragraph object.
111
+
112
+ Returns:
113
+ bool: True if paragraph has any border, False otherwise.
114
+ """
115
+ pPr = paragraph._element.pPr
116
+ if pPr is not None:
117
+ pBdr = pPr.find(qn('w:pBdr'))
118
+ if pBdr is not None:
119
+ # Check if any border side exists (top, bottom, left, right)
120
+ for side in ['top', 'bottom', 'left', 'right']:
121
+ border = pBdr.find(qn(f'w:{side}'))
122
+ if border is not None:
123
+ # Check if border is not "none" or has a width
124
+ val = border.get(qn('w:val'))
125
+ if val and val != 'none':
126
+ return True
127
+ return False
128
+
129
+ def __find_text_boxes(self, doc):
130
+ """
131
+ Find all text boxes in document by searching OOXML structure.
132
+ Text boxes are typically in w:txbxContent elements.
133
+
134
+ Args:
135
+ doc: A python-docx Document object.
136
+
137
+ Returns:
138
+ list: List of tuples (element, paragraphs_inside_textbox).
139
+ """
140
+ text_boxes = []
141
+
142
+ # Iterate through document body XML to find text box content elements
143
+ for element in doc.element.body.iter():
144
+ # Look for text box content elements
145
+ if element.tag.endswith('txbxContent'):
146
+ # Collect all paragraphs inside this text box
147
+ txbx_paragraphs = []
148
+ for txbx_para_element in element.iter():
149
+ if txbx_para_element.tag.endswith('p'):
150
+ txbx_paragraphs.append(txbx_para_element)
151
+
152
+ if txbx_paragraphs:
153
+ text_boxes.append((element, txbx_paragraphs))
154
+
155
+ return text_boxes
156
+
157
+ def __create_marker_paragraph(self, marker_text):
158
+ """
159
+ Create a paragraph element with marker text.
160
+
161
+ Args:
162
+ marker_text (str): The marker text to insert.
163
+
164
+ Returns:
165
+ Element: An OOXML paragraph element.
166
+ """
167
+ from docx.oxml import OxmlElement
168
+
169
+ p = OxmlElement('w:p')
170
+ r = OxmlElement('w:r')
171
+ t = OxmlElement('w:t')
172
+ t.text = marker_text
173
+ r.append(t)
174
+ p.append(r)
175
+ return p
176
+
177
+ def __inject_markers_for_paragraph(self, paragraph, start_marker, end_marker):
178
+ """
179
+ Inject marker paragraphs before and after a bordered paragraph.
180
+
181
+ Args:
182
+ paragraph: A python-docx Paragraph object.
183
+ start_marker (str): The start marker text.
184
+ end_marker (str): The end marker text.
185
+ """
186
+ # Insert start marker paragraph before
187
+ marker_p_start = self.__create_marker_paragraph(start_marker)
188
+ paragraph._element.addprevious(marker_p_start)
189
+
190
+ # Insert end marker paragraph after
191
+ marker_p_end = self.__create_marker_paragraph(end_marker)
192
+ paragraph._element.addnext(marker_p_end)
193
+
194
+ def __inject_markers_for_textbox(self, textbox_element, paragraph_elements, start_marker, end_marker):
195
+ """
196
+ Inject markers around text box content.
197
+
198
+ Args:
199
+ textbox_element: The w:txbxContent element.
200
+ paragraph_elements: List of paragraph elements inside the text box.
201
+ start_marker (str): The start marker text.
202
+ end_marker (str): The end marker text.
203
+ """
204
+ if not paragraph_elements:
205
+ return
206
+
207
+ # Insert start marker before first paragraph in text box
208
+ first_para = paragraph_elements[0]
209
+ marker_p_start = self.__create_marker_paragraph(start_marker)
210
+ first_para.addprevious(marker_p_start)
211
+
212
+ # Insert end marker after last paragraph in text box
213
+ last_para = paragraph_elements[-1]
214
+ marker_p_end = self.__create_marker_paragraph(end_marker)
215
+ last_para.addnext(marker_p_end)
216
+
217
+ def __detect_and_mark_bordered_content(self, docx_stream):
218
+ """
219
+ Detects bordered paragraphs and text boxes, injects unique markers around them.
220
+ Groups consecutive bordered paragraphs into single code blocks.
221
+
222
+ Args:
223
+ docx_stream: A file-like object containing the DOCX document.
224
+
225
+ Returns:
226
+ tuple: (modified_docx_stream, start_marker, end_marker)
227
+ """
228
+ # Load document with python-docx
229
+ doc = DocxDocument(docx_stream)
230
+
231
+ # Generate unique markers to avoid conflicts with document content
232
+ unique_id = uuid.uuid4().hex[:8]
233
+ start_marker = f"<<<BORDERED_BLOCK_START_{unique_id}>>>"
234
+ end_marker = f"<<<BORDERED_BLOCK_END_{unique_id}>>>"
235
+
236
+ # Group consecutive bordered paragraphs together
237
+ bordered_groups = []
238
+ current_group = []
239
+
240
+ for para in doc.paragraphs:
241
+ if self.__has_border(para):
242
+ current_group.append(para)
243
+ else:
244
+ if current_group:
245
+ # End of a bordered group
246
+ bordered_groups.append(current_group)
247
+ current_group = []
248
+
249
+ # Don't forget the last group if document ends with bordered paragraphs
250
+ if current_group:
251
+ bordered_groups.append(current_group)
252
+
253
+ # Collect all text boxes
254
+ # text_boxes = self.__find_text_boxes(doc)
255
+
256
+ # Inject markers around each group of consecutive bordered paragraphs
257
+ for group in bordered_groups:
258
+ if group:
259
+ # Add start marker before first paragraph in group
260
+ first_para = group[0]
261
+ marker_p_start = self.__create_marker_paragraph(start_marker)
262
+ first_para._element.addprevious(marker_p_start)
263
+
264
+ # Add end marker after last paragraph in group
265
+ last_para = group[-1]
266
+ marker_p_end = self.__create_marker_paragraph(end_marker)
267
+ last_para._element.addnext(marker_p_end)
268
+
269
+ # Inject markers around text box content
270
+ # for textbox_element, para_elements in text_boxes:
271
+ # self.__inject_markers_for_textbox(textbox_element, para_elements, start_marker, end_marker)
272
+
273
+ # Save modified document to BytesIO
274
+ output = BytesIO()
275
+ doc.save(output)
276
+ output.seek(0)
277
+
278
+ return output, start_marker, end_marker
279
+
280
+ def __contains_complex_structure(self, content_html):
281
+ """
282
+ Check if HTML content contains tables, lists, or other complex structures.
283
+
284
+ Args:
285
+ content_html (str): HTML content to analyze.
286
+
287
+ Returns:
288
+ bool: True if content contains tables/lists, False otherwise.
289
+ """
290
+ content_soup = BeautifulSoup(content_html, 'html.parser')
291
+
292
+ # Check for tables
293
+ if content_soup.find('table'):
294
+ return True
295
+
296
+ # Check for lists (ul, ol)
297
+ if content_soup.find('ul') or content_soup.find('ol'):
298
+ return True
299
+
300
+ return False
301
+
302
+ def __escape_hash_symbols(self, html_content):
303
+ """
304
+ Escape hash (#) symbols at the beginning of lines in HTML to prevent
305
+ them from being treated as markdown headers.
306
+
307
+ Args:
308
+ html_content (str): HTML content.
309
+
310
+ Returns:
311
+ str: HTML with escaped hash symbols.
312
+ """
313
+ soup = BeautifulSoup(html_content, 'html.parser')
314
+
315
+ # Process all text-containing elements
316
+ for element in soup.find_all(['p', 'li', 'td', 'th', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6']):
317
+ if element.string:
318
+ text = element.string
319
+ # If line starts with #, escape it
320
+ if text.strip().startswith('#'):
321
+ element.string = text.replace('#', '\\#', 1)
322
+
323
+ return str(soup)
324
+
325
+ def __wrap_marked_sections_in_code_blocks(self, html, start_marker, end_marker):
326
+ """
327
+ Find content between markers and wrap appropriately:
328
+ - Simple text/code → <pre><code> block
329
+ - Tables/lists → Custom wrapper with preserved structure
330
+
331
+ Args:
332
+ html (str): The HTML content from Mammoth.
333
+ start_marker (str): The start marker text.
334
+ end_marker (str): The end marker text.
335
+
336
+ Returns:
337
+ str: HTML with marked sections wrapped appropriately.
338
+ """
339
+ import html as html_module
340
+
341
+ # Mammoth escapes < and > to &lt; and &gt;, so we need to escape our markers too
342
+ escaped_start = html_module.escape(start_marker)
343
+ escaped_end = html_module.escape(end_marker)
344
+
345
+ # Pattern to find content between HTML-escaped markers (including HTML tags)
346
+ # The markers will be in separate <p> tags, and content in between
347
+ pattern = re.compile(
348
+ f'<p>{re.escape(escaped_start)}</p>(.*?)<p>{re.escape(escaped_end)}</p>',
349
+ re.DOTALL
350
+ )
351
+
352
+ def replace_with_appropriate_wrapper(match):
353
+ content = match.group(1)
354
+
355
+ # Detect if content has complex structure (tables, lists)
356
+ has_complex_structure = self.__contains_complex_structure(content)
357
+
358
+ if has_complex_structure:
359
+ # Preserve structure: keep HTML as-is, escape # symbols
360
+ escaped_content = self.__escape_hash_symbols(content)
361
+ # Wrap in a div with special class for potential custom handling
362
+ return f'<div class="alita-bordered-content">{escaped_content}</div>'
363
+ else:
364
+ # Simple text/code: extract as plain text and wrap in code block
365
+ content_soup = BeautifulSoup(content, 'html.parser')
366
+
367
+ # Extract text from each paragraph separately to preserve line breaks
368
+ lines = []
369
+ for element in content_soup.find_all(['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6']):
370
+ # Replace <br /> within paragraphs with newlines
371
+ for br in element.find_all('br'):
372
+ br.replace_with('\n')
373
+ text = element.get_text()
374
+ # Preserve leading whitespace (indentation), only strip trailing
375
+ lines.append(text.rstrip())
376
+
377
+ # If no paragraphs found, just get all text
378
+ if not lines:
379
+ content = content.replace('<br />', '\n').replace('<br/>', '\n').replace('<br>', '\n')
380
+ content_text = content_soup.get_text()
381
+ lines = [line.rstrip() for line in content_text.split('\n')]
382
+
383
+ # Join lines, strip only leading/trailing empty lines
384
+ content_text = '\n'.join(lines).strip()
385
+ # Return as code block (need to HTML-escape the content)
386
+ content_escaped = html_module.escape(content_text)
387
+ return f'<pre><code>{content_escaped}</code></pre>'
388
+
389
+ # Replace all marked sections with appropriate wrappers
390
+ result_html = pattern.sub(replace_with_appropriate_wrapper, html)
391
+
392
+ return result_html
393
+
100
394
  def load(self):
101
395
  """
102
396
  Loads and converts the Docx file to markdown format.
@@ -131,6 +425,7 @@ class AlitaDocxMammothLoader(BaseLoader):
131
425
  def _convert_docx_to_markdown(self, docx_file):
132
426
  """
133
427
  Converts the content of a Docx file to markdown format.
428
+ Detects bordered content and treats it as code blocks.
134
429
 
135
430
  Args:
136
431
  docx_file (BinaryIO): The Docx file object.
@@ -138,11 +433,28 @@ class AlitaDocxMammothLoader(BaseLoader):
138
433
  Returns:
139
434
  str: The markdown content extracted from the Docx file.
140
435
  """
436
+ # Step 1: Detect and mark bordered content
437
+ # Reset stream position if needed
438
+ if hasattr(docx_file, 'seek'):
439
+ docx_file.seek(0)
440
+
441
+ marked_docx, start_marker, end_marker = self.__detect_and_mark_bordered_content(docx_file)
442
+
443
+ # Step 2: Convert marked DOCX to HTML using Mammoth
141
444
  if self.extract_images:
142
445
  # Extract images using the provided image handler
143
- result = convert_to_html(docx_file, convert_image=mammoth.images.img_element(self.__handle_image))
446
+ result = convert_to_html(marked_docx, convert_image=mammoth.images.img_element(self.__handle_image))
144
447
  else:
145
448
  # Ignore images
146
- result = convert_to_html(docx_file, convert_image=lambda image: "")
147
- content = markdownify(result.value, heading_style="ATX")
449
+ result = convert_to_html(marked_docx, convert_image=lambda image: "")
450
+
451
+ # Step 3: Wrap marked sections in <pre><code> tags
452
+ html_with_code_blocks = self.__wrap_marked_sections_in_code_blocks(
453
+ result.value, start_marker, end_marker
454
+ )
455
+
456
+ # Step 4: Convert HTML to markdown
457
+ content = markdownify(html_with_code_blocks, heading_style="ATX")
458
+
459
+ # Step 5: Post-process markdown (for image transcripts, etc.)
148
460
  return self.__postprocess_original_md(content)
@@ -348,8 +348,8 @@ class StateModifierNode(Runnable):
348
348
  return result
349
349
 
350
350
 
351
-
352
- def prepare_output_schema(lg_builder, memory, store, debug=False, interrupt_before=None, interrupt_after=None, state_class=None, output_variables=None):
351
+ def prepare_output_schema(lg_builder, memory, store, debug=False, interrupt_before=None, interrupt_after=None,
352
+ state_class=None, output_variables=None):
353
353
  # prepare output channels
354
354
  if interrupt_after is None:
355
355
  interrupt_after = []
@@ -466,7 +466,7 @@ def create_graph(
466
466
  elif node_type == 'agent':
467
467
  input_params = node.get('input', ['messages'])
468
468
  input_mapping = node.get('input_mapping',
469
- {'messages': {'type': 'variable', 'value': 'messages'}})
469
+ {'messages': {'type': 'variable', 'value': 'messages'}})
470
470
  lg_builder.add_node(node_id, FunctionTool(
471
471
  client=client, tool=tool,
472
472
  name=node_id, return_type='str',
@@ -481,7 +481,8 @@ def create_graph(
481
481
  # wrap with mappings
482
482
  pipeline_name = node.get('tool', None)
483
483
  if not pipeline_name:
484
- raise ValueError("Subgraph must have a 'tool' node: add required tool to the subgraph node")
484
+ raise ValueError(
485
+ "Subgraph must have a 'tool' node: add required tool to the subgraph node")
485
486
  node_fn = SubgraphRunnable(
486
487
  inner=tool.graph,
487
488
  name=pipeline_name,
@@ -520,7 +521,8 @@ def create_graph(
520
521
  loop_toolkit_name = node.get('loop_toolkit_name')
521
522
  loop_tool_name = node.get('loop_tool')
522
523
  if (loop_toolkit_name and loop_tool_name) or loop_tool_name:
523
- loop_tool_name = f"{clean_string(loop_toolkit_name)}{TOOLKIT_SPLITTER}{loop_tool_name}" if loop_toolkit_name else clean_string(loop_tool_name)
524
+ loop_tool_name = f"{clean_string(loop_toolkit_name)}{TOOLKIT_SPLITTER}{loop_tool_name}" if loop_toolkit_name else clean_string(
525
+ loop_tool_name)
524
526
  for t in tools:
525
527
  if t.name == loop_tool_name:
526
528
  logger.debug(f"Loop tool discovered: {t}")
@@ -555,7 +557,8 @@ def create_graph(
555
557
  break
556
558
  elif node_type == 'code':
557
559
  from ..tools.sandbox import create_sandbox_tool
558
- sandbox_tool = create_sandbox_tool(stateful=False, allow_net=True, alita_client=kwargs.get('alita_client', None))
560
+ sandbox_tool = create_sandbox_tool(stateful=False, allow_net=True,
561
+ alita_client=kwargs.get('alita_client', None))
559
562
  code_data = node.get('code', {'type': 'fixed', 'value': "return 'Code block is empty'"})
560
563
  lg_builder.add_node(node_id, FunctionTool(
561
564
  tool=sandbox_tool, name=node['id'], return_type='dict',
@@ -777,20 +780,46 @@ class LangGraphAgentRunnable(CompiledStateGraph):
777
780
  # Convert chat history dict messages to LangChain message objects
778
781
  chat_history = input.pop('chat_history')
779
782
  input['messages'] = [convert_dict_to_message(msg) for msg in chat_history]
780
-
783
+
784
+ # handler for LLM node: if no input (Chat perspective), then take last human message
785
+ if not input.get('input'):
786
+ if input.get('messages'):
787
+ input['input'] = [next((msg for msg in reversed(input['messages']) if isinstance(msg, HumanMessage)),
788
+ None)]
789
+
781
790
  # Append current input to existing messages instead of overwriting
782
791
  if input.get('input'):
783
792
  if isinstance(input['input'], str):
784
793
  current_message = input['input']
785
794
  else:
786
795
  current_message = input.get('input')[-1]
796
+
787
797
  # TODO: add handler after we add 2+ inputs (filterByType, etc.)
788
- input['input'] = current_message if isinstance(current_message, str) else str(current_message)
798
+ if isinstance(current_message, HumanMessage):
799
+ current_content = current_message.content
800
+ if isinstance(current_content, list):
801
+ text_contents = [
802
+ item['text'] if isinstance(item, dict) and item.get('type') == 'text'
803
+ else item if isinstance(item, str)
804
+ else None
805
+ for item in current_content
806
+ ]
807
+ text_contents = [text for text in text_contents if text is not None]
808
+ input['input'] = ". ".join(text_contents)
809
+ elif isinstance(current_content, str):
810
+ # on regenerate case
811
+ input['input'] = current_content
812
+ else:
813
+ input['input'] = str(current_content)
814
+ elif isinstance(current_message, str):
815
+ input['input'] = current_message
816
+ else:
817
+ input['input'] = str(current_message)
789
818
  if input.get('messages'):
790
819
  # Ensure existing messages are LangChain objects
791
820
  input['messages'] = [convert_dict_to_message(msg) for msg in input['messages']]
792
821
  # Append to existing messages
793
- input['messages'].append(current_message)
822
+ # input['messages'].append(current_message)
794
823
  else:
795
824
  # No existing messages, create new list
796
825
  input['messages'] = [current_message]
@@ -801,7 +830,8 @@ class LangGraphAgentRunnable(CompiledStateGraph):
801
830
  else:
802
831
  result = super().invoke(input, config=config, *args, **kwargs)
803
832
  try:
804
- output = next((msg.content for msg in reversed(result['messages']) if not isinstance(msg, HumanMessage)), result['messages'][-1].content)
833
+ output = next((msg.content for msg in reversed(result['messages']) if not isinstance(msg, HumanMessage)),
834
+ result['messages'][-1].content)
805
835
  except:
806
836
  output = list(result.values())[-1]
807
837
  config_state = self.get_state(config)
@@ -809,8 +839,6 @@ class LangGraphAgentRunnable(CompiledStateGraph):
809
839
  if is_execution_finished:
810
840
  thread_id = None
811
841
 
812
-
813
-
814
842
  result_with_state = {
815
843
  "output": output,
816
844
  "thread_id": thread_id,
@@ -39,7 +39,14 @@ class ApplicationToolkit(BaseToolkit):
39
39
  description=app_details.get("description"),
40
40
  application=app,
41
41
  args_schema=applicationToolSchema,
42
- return_type='str')])
42
+ return_type='str',
43
+ client=client,
44
+ args_runnable={
45
+ "application_id": application_id,
46
+ "application_version_id": application_version_id,
47
+ "store": store,
48
+ "llm": client.get_llm(version_details['llm_settings']['model_name'], model_settings),
49
+ })])
43
50
 
44
51
  def get_tools(self):
45
52
  return self.tools