alita-sdk 0.3.457__py3-none-any.whl → 0.3.465__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

Files changed (37) hide show
  1. alita_sdk/cli/__init__.py +10 -0
  2. alita_sdk/cli/__main__.py +17 -0
  3. alita_sdk/cli/agent/__init__.py +0 -0
  4. alita_sdk/cli/agent/default.py +176 -0
  5. alita_sdk/cli/agent_executor.py +155 -0
  6. alita_sdk/cli/agent_loader.py +197 -0
  7. alita_sdk/cli/agent_ui.py +218 -0
  8. alita_sdk/cli/agents.py +1911 -0
  9. alita_sdk/cli/callbacks.py +576 -0
  10. alita_sdk/cli/cli.py +159 -0
  11. alita_sdk/cli/config.py +164 -0
  12. alita_sdk/cli/formatting.py +182 -0
  13. alita_sdk/cli/input_handler.py +256 -0
  14. alita_sdk/cli/mcp_loader.py +315 -0
  15. alita_sdk/cli/toolkit.py +330 -0
  16. alita_sdk/cli/toolkit_loader.py +55 -0
  17. alita_sdk/cli/tools/__init__.py +36 -0
  18. alita_sdk/cli/tools/approval.py +224 -0
  19. alita_sdk/cli/tools/filesystem.py +905 -0
  20. alita_sdk/cli/tools/planning.py +403 -0
  21. alita_sdk/cli/tools/terminal.py +280 -0
  22. alita_sdk/runtime/clients/client.py +16 -1
  23. alita_sdk/runtime/langchain/constants.py +2 -1
  24. alita_sdk/runtime/langchain/langraph_agent.py +17 -5
  25. alita_sdk/runtime/langchain/utils.py +1 -1
  26. alita_sdk/runtime/tools/function.py +17 -5
  27. alita_sdk/runtime/tools/llm.py +65 -7
  28. alita_sdk/tools/base_indexer_toolkit.py +54 -2
  29. alita_sdk/tools/qtest/api_wrapper.py +871 -32
  30. alita_sdk/tools/sharepoint/api_wrapper.py +22 -2
  31. alita_sdk/tools/sharepoint/authorization_helper.py +17 -1
  32. {alita_sdk-0.3.457.dist-info → alita_sdk-0.3.465.dist-info}/METADATA +145 -2
  33. {alita_sdk-0.3.457.dist-info → alita_sdk-0.3.465.dist-info}/RECORD +37 -15
  34. alita_sdk-0.3.465.dist-info/entry_points.txt +2 -0
  35. {alita_sdk-0.3.457.dist-info → alita_sdk-0.3.465.dist-info}/WHEEL +0 -0
  36. {alita_sdk-0.3.457.dist-info → alita_sdk-0.3.465.dist-info}/licenses/LICENSE +0 -0
  37. {alita_sdk-0.3.457.dist-info → alita_sdk-0.3.465.dist-info}/top_level.txt +0 -0
@@ -68,6 +68,7 @@ class AlitaClient:
68
68
  self.bucket_url = f"{self.base_url}{self.api_path}/artifacts/buckets/{self.project_id}"
69
69
  self.configurations_url = f'{self.base_url}{self.api_path}/integrations/integrations/default/{self.project_id}?section=configurations&unsecret=true'
70
70
  self.ai_section_url = f'{self.base_url}{self.api_path}/integrations/integrations/default/{self.project_id}?section=ai'
71
+ self.models_url = f'{self.base_url}{self.api_path}/configurations/models/{self.project_id}?include_shared=true'
71
72
  self.image_generation_url = f"{self.base_url}{self.llm_path}/images/generations"
72
73
  self.configurations: list = configurations or []
73
74
  self.model_timeout = kwargs.get('model_timeout', 120)
@@ -175,6 +176,20 @@ class AlitaClient:
175
176
  return resp.json()
176
177
  return []
177
178
 
179
+ def get_available_models(self):
180
+ """Get list of available models from the configurations API.
181
+
182
+ Returns:
183
+ List of model dictionaries with 'name' and other properties,
184
+ or empty list if request fails.
185
+ """
186
+ resp = requests.get(self.models_url, headers=self.headers, verify=False)
187
+ if resp.ok:
188
+ data = resp.json()
189
+ # API returns {"items": [...], ...}
190
+ return data.get('items', [])
191
+ return []
192
+
178
193
  def get_embeddings(self, embedding_model: str) -> OpenAIEmbeddings:
179
194
  """
180
195
  Get an instance of OpenAIEmbeddings configured with the project ID and auth token.
@@ -565,7 +580,7 @@ class AlitaClient:
565
580
  monitoring_meta = tasknode_task.meta.get("monitoring", {})
566
581
  return monitoring_meta["user_id"]
567
582
  except Exception as e:
568
- logger.warning(f"Error: Could not determine user ID for MCP tool: {e}")
583
+ logger.debug(f"Error: Could not determine user ID for MCP tool: {e}")
569
584
  return None
570
585
 
571
586
  def predict_agent(self, llm: ChatOpenAI, instructions: str = "You are a helpful assistant.",
@@ -83,4 +83,5 @@ DEFAULT_MULTIMODAL_PROMPT = """
83
83
 
84
84
  ELITEA_RS = "elitea_response"
85
85
  PRINTER = "printer"
86
- PRINTER_NODE_RS = "printer_output"
86
+ PRINTER_NODE_RS = "printer_output"
87
+ PRINTER_COMPLETED_STATE = "PRINTER_COMPLETED"
@@ -19,7 +19,7 @@ from langgraph.managed.base import is_managed_value
19
19
  from langgraph.prebuilt import InjectedStore
20
20
  from langgraph.store.base import BaseStore
21
21
 
22
- from .constants import PRINTER_NODE_RS, PRINTER
22
+ from .constants import PRINTER_NODE_RS, PRINTER, PRINTER_COMPLETED_STATE
23
23
  from .mixedAgentRenderes import convert_message_to_json
24
24
  from .utils import create_state, propagate_the_input_mapping, safe_format
25
25
  from ..tools.function import FunctionTool
@@ -244,11 +244,19 @@ class PrinterNode(Runnable):
244
244
  result = {}
245
245
  logger.debug(f"Initial text pattern: {self.input_mapping}")
246
246
  mapping = propagate_the_input_mapping(self.input_mapping, [], state)
247
+ # for printer node we expect that all the lists will be joined into strings already
248
+ # Join any lists that haven't been converted yet
249
+ for key, value in mapping.items():
250
+ if isinstance(value, list):
251
+ mapping[key] = ', '.join(str(item) for item in value)
247
252
  if mapping.get(PRINTER) is None:
248
253
  raise ToolException(f"PrinterNode requires '{PRINTER}' field in input mapping")
249
254
  formatted_output = mapping[PRINTER]
250
255
  # add info label to the printer's output
251
- if formatted_output:
256
+ if not formatted_output == PRINTER_COMPLETED_STATE:
257
+ # convert formatted output to string if it's not
258
+ if not isinstance(formatted_output, str):
259
+ formatted_output = str(formatted_output)
252
260
  formatted_output += f"\n\n-----\n*How to proceed?*\n* *to resume the pipeline - type anything...*"
253
261
  logger.debug(f"Formatted output: {formatted_output}")
254
262
  result[PRINTER_NODE_RS] = formatted_output
@@ -666,7 +674,7 @@ def create_graph(
666
674
  # reset printer output variable to avoid carrying over
667
675
  reset_node_id = f"{node_id}_reset"
668
676
  lg_builder.add_node(reset_node_id, PrinterNode(
669
- input_mapping={'printer': {'type': 'fixed', 'value': ''}}
677
+ input_mapping={'printer': {'type': 'fixed', 'value': PRINTER_COMPLETED_STATE}}
670
678
  ))
671
679
  lg_builder.add_conditional_edges(node_id, TransitionalEdge(reset_node_id))
672
680
  lg_builder.add_conditional_edges(reset_node_id, TransitionalEdge(clean_string(node['transition'])))
@@ -900,11 +908,15 @@ class LangGraphAgentRunnable(CompiledStateGraph):
900
908
  logging.info(f"Input: {thread_id} - {input}")
901
909
  if self.checkpointer and self.checkpointer.get_tuple(config):
902
910
  self.update_state(config, input)
903
- result = super().invoke(None, config=config, *args, **kwargs)
911
+ if config.pop("should_continue", False):
912
+ invoke_input = input
913
+ else:
914
+ invoke_input = None
915
+ result = super().invoke(invoke_input, config=config, *args, **kwargs)
904
916
  else:
905
917
  result = super().invoke(input, config=config, *args, **kwargs)
906
918
  try:
907
- if not result.get(PRINTER_NODE_RS):
919
+ if result.get(PRINTER_NODE_RS) == PRINTER_COMPLETED_STATE:
908
920
  output = next((msg.content for msg in reversed(result['messages']) if not isinstance(msg, HumanMessage)),
909
921
  result['messages'][-1].content)
910
922
  else:
@@ -131,7 +131,7 @@ def parse_type(type_str):
131
131
 
132
132
 
133
133
  def create_state(data: Optional[dict] = None):
134
- state_dict = {'input': str, 'router_output': str,
134
+ state_dict = {'input': str, 'messages': 'list[str]', 'router_output': str,
135
135
  ELITEA_RS: str, PRINTER_NODE_RS: str} # Always include router_output
136
136
  types_dict = {}
137
137
  if not data:
@@ -16,6 +16,18 @@ from ..langchain.utils import propagate_the_input_mapping
16
16
  logger = logging.getLogger(__name__)
17
17
 
18
18
 
19
+ def replace_escaped_newlines(data):
20
+ """
21
+ Replace \\n with \n in all string values recursively.
22
+ Required for sanitization of state variables in code node
23
+ """
24
+ if isinstance(data, dict):
25
+ return {key: replace_escaped_newlines(value) for key, value in data.items()}
26
+ elif isinstance(data, str):
27
+ return data.replace('\\n', '\n')
28
+ else:
29
+ return data
30
+
19
31
  class FunctionTool(BaseTool):
20
32
  name: str = 'FunctionalTool'
21
33
  description: str = 'This is direct call node for tools'
@@ -30,11 +42,13 @@ class FunctionTool(BaseTool):
30
42
  def _prepare_pyodide_input(self, state: Union[str, dict, ToolCall]) -> str:
31
43
  """Prepare input for PyodideSandboxTool by injecting state into the code block."""
32
44
  # add state into the code block here since it might be changed during the execution of the code
33
- state_copy = deepcopy(state)
45
+ state_copy = replace_escaped_newlines(deepcopy(state))
34
46
 
35
47
  del state_copy['messages'] # remove messages to avoid issues with pickling without langchain-core
36
48
  # inject state into the code block as alita_state variable
37
- pyodide_predata = f"#state dict\nalita_state = {state_copy}\n"
49
+ state_json = json.dumps(state_copy, ensure_ascii=False)
50
+ pyodide_predata = f'#state dict\nimport json\nalita_state = json.loads({json.dumps(state_json)})\n'
51
+
38
52
  return pyodide_predata
39
53
 
40
54
  def _handle_pyodide_output(self, tool_result: Any) -> dict:
@@ -94,9 +108,7 @@ class FunctionTool(BaseTool):
94
108
  # special handler for PyodideSandboxTool
95
109
  if self._is_pyodide_tool():
96
110
  code = func_args['code']
97
- func_args['code'] = (f"{self._prepare_pyodide_input(state)}\n{code}"
98
- # handle new lines in the code properly
99
- .replace('\\n','\\\\n'))
111
+ func_args['code'] = f"{self._prepare_pyodide_input(state)}\n{code}"
100
112
  try:
101
113
  tool_result = self.tool.invoke(func_args, config, **kwargs)
102
114
  dispatch_custom_event(
@@ -1,3 +1,4 @@
1
+ import asyncio
1
2
  import logging
2
3
  from traceback import format_exc
3
4
  from typing import Any, Optional, List, Union
@@ -132,7 +133,9 @@ class LLMNode(BaseTool):
132
133
  struct_model = create_pydantic_model(f"LLMOutput", struct_params)
133
134
  completion = llm_client.invoke(messages, config=config)
134
135
  if hasattr(completion, 'tool_calls') and completion.tool_calls:
135
- new_messages, _ = self.__perform_tool_calling(completion, messages, llm_client, config)
136
+ new_messages, _ = self._run_async_in_sync_context(
137
+ self.__perform_tool_calling(completion, messages, llm_client, config)
138
+ )
136
139
  llm = self.__get_struct_output_model(llm_client, struct_model)
137
140
  completion = llm.invoke(new_messages, config=config)
138
141
  result = completion.model_dump()
@@ -155,7 +158,9 @@ class LLMNode(BaseTool):
155
158
  # Handle both tool-calling and regular responses
156
159
  if hasattr(completion, 'tool_calls') and completion.tool_calls:
157
160
  # Handle iterative tool-calling and execution
158
- new_messages, current_completion = self.__perform_tool_calling(completion, messages, llm_client, config)
161
+ new_messages, current_completion = self._run_async_in_sync_context(
162
+ self.__perform_tool_calling(completion, messages, llm_client, config)
163
+ )
159
164
 
160
165
  output_msgs = {"messages": new_messages}
161
166
  if self.output_variables:
@@ -190,9 +195,53 @@ class LLMNode(BaseTool):
190
195
  def _run(self, *args, **kwargs):
191
196
  # Legacy support for old interface
192
197
  return self.invoke(kwargs, **kwargs)
198
+
199
+ def _run_async_in_sync_context(self, coro):
200
+ """Run async coroutine from sync context.
201
+
202
+ For MCP tools with persistent sessions, we reuse the same event loop
203
+ that was used to create the MCP client and sessions (set by CLI).
204
+ """
205
+ try:
206
+ loop = asyncio.get_running_loop()
207
+ # Already in async context - run in thread with new loop
208
+ import threading
209
+
210
+ result_container = []
211
+
212
+ def run_in_thread():
213
+ new_loop = asyncio.new_event_loop()
214
+ asyncio.set_event_loop(new_loop)
215
+ try:
216
+ result_container.append(new_loop.run_until_complete(coro))
217
+ finally:
218
+ new_loop.close()
219
+
220
+ thread = threading.Thread(target=run_in_thread)
221
+ thread.start()
222
+ thread.join()
223
+ return result_container[0] if result_container else None
224
+
225
+ except RuntimeError:
226
+ # No event loop running - use/create persistent loop
227
+ # This loop is shared with MCP session creation for stateful tools
228
+ if not hasattr(self.__class__, '_persistent_loop') or \
229
+ self.__class__._persistent_loop is None or \
230
+ self.__class__._persistent_loop.is_closed():
231
+ self.__class__._persistent_loop = asyncio.new_event_loop()
232
+ logger.debug("Created persistent event loop for async tools")
233
+
234
+ loop = self.__class__._persistent_loop
235
+ asyncio.set_event_loop(loop)
236
+ return loop.run_until_complete(coro)
237
+
238
+ async def _arun(self, *args, **kwargs):
239
+ # Legacy async support
240
+ return self.invoke(kwargs, **kwargs)
193
241
 
194
- def __perform_tool_calling(self, completion, messages, llm_client, config):
242
+ async def __perform_tool_calling(self, completion, messages, llm_client, config):
195
243
  # Handle iterative tool-calling and execution
244
+ logger.info(f"__perform_tool_calling called with {len(completion.tool_calls) if hasattr(completion, 'tool_calls') else 0} tool calls")
196
245
  new_messages = messages + [completion]
197
246
  iteration = 0
198
247
 
@@ -230,9 +279,16 @@ class LLMNode(BaseTool):
230
279
  if tool_to_execute:
231
280
  try:
232
281
  logger.info(f"Executing tool '{tool_name}' with args: {tool_args}")
233
- # Pass the underlying config to the tool execution invoke method
234
- # since it may be another agent, graph, etc. to see it properly in thinking steps
235
- tool_result = tool_to_execute.invoke(tool_args, config=config)
282
+
283
+ # Try async invoke first (for MCP tools), fallback to sync
284
+ tool_result = None
285
+ try:
286
+ # Try async invocation first
287
+ tool_result = await tool_to_execute.ainvoke(tool_args, config=config)
288
+ except NotImplementedError:
289
+ # Tool doesn't support async, use sync invoke
290
+ logger.debug(f"Tool '{tool_name}' doesn't support async, using sync invoke")
291
+ tool_result = tool_to_execute.invoke(tool_args, config=config)
236
292
 
237
293
  # Create tool message with result - preserve structured content
238
294
  from langchain_core.messages import ToolMessage
@@ -256,7 +312,9 @@ class LLMNode(BaseTool):
256
312
  new_messages.append(tool_message)
257
313
 
258
314
  except Exception as e:
259
- logger.error(f"Error executing tool '{tool_name}': {e}")
315
+ import traceback
316
+ error_details = traceback.format_exc()
317
+ logger.error(f"Error executing tool '{tool_name}': {e}\n{error_details}")
260
318
  # Create error tool message
261
319
  from langchain_core.messages import ToolMessage
262
320
  tool_message = ToolMessage(
@@ -4,6 +4,7 @@ import logging
4
4
  import time
5
5
  from typing import Any, Optional, List, Dict, Generator
6
6
 
7
+ from langchain_core.callbacks import dispatch_custom_event
7
8
  from langchain_core.documents import Document
8
9
  from pydantic import create_model, Field, SecretStr
9
10
 
@@ -179,11 +180,13 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
179
180
  #
180
181
  results_count = result["count"]
181
182
  self.index_meta_update(index_name, IndexerKeywords.INDEX_META_COMPLETED.value, results_count)
183
+ self._emit_index_event(index_name)
182
184
  #
183
185
  return {"status": "ok", "message": f"successfully indexed {results_count} documents" if results_count > 0
184
186
  else "no new documents to index"}
185
187
  except Exception as e:
186
188
  self.index_meta_update(index_name, IndexerKeywords.INDEX_META_FAILED.value, result["count"])
189
+ self._emit_index_event(index_name, error=str(e))
187
190
  raise e
188
191
 
189
192
 
@@ -511,6 +514,54 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
511
514
  index_meta_doc = Document(page_content=index_meta_raw.get("content", ""), metadata=metadata)
512
515
  add_documents(vectorstore=self.vectorstore, documents=[index_meta_doc], ids=[index_meta_raw.get("id")])
513
516
 
517
+ def _emit_index_event(self, index_name: str, error: Optional[str] = None):
518
+ """
519
+ Emit custom event for index data operation.
520
+
521
+ Args:
522
+ index_name: The name of the index
523
+ error: Error message if the operation failed, None otherwise
524
+ """
525
+ index_meta = super().get_index_meta(index_name)
526
+
527
+ if not index_meta:
528
+ logger.warning(
529
+ f"No index_meta found for index '{index_name}'. "
530
+ "Cannot emit index event."
531
+ )
532
+ return
533
+
534
+ metadata = index_meta.get("metadata", {})
535
+
536
+ # Determine if this is a reindex operation
537
+ history_raw = metadata.get("history", "[]")
538
+ try:
539
+ history = json.loads(history_raw) if history_raw.strip() else []
540
+ is_reindex = len(history) > 1
541
+ except (json.JSONDecodeError, TypeError):
542
+ is_reindex = False
543
+
544
+ # Build event message
545
+ event_data = {
546
+ "id": index_meta.get("id"),
547
+ "index_name": index_name,
548
+ "state": metadata.get("state"),
549
+ "error": error,
550
+ "reindex": is_reindex,
551
+ "indexed": metadata.get("indexed", 0),
552
+ "updated": metadata.get("updated", 0),
553
+ }
554
+
555
+ # Emit the event
556
+ try:
557
+ dispatch_custom_event("index_data_status", event_data)
558
+ logger.debug(
559
+ f"Emitted index_data_status event for index "
560
+ f"'{index_name}': {event_data}"
561
+ )
562
+ except Exception as e:
563
+ logger.warning(f"Failed to emit index_data_status event: {e}")
564
+
514
565
  def get_available_tools(self):
515
566
  """
516
567
  Returns the standardized vector search tools (search operations only).
@@ -564,6 +615,7 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
564
615
  "mode": "list_collections",
565
616
  "ref": self.list_collections,
566
617
  "description": self.list_collections.__doc__,
567
- "args_schema": create_model("ListCollectionsParams") # No parameters
618
+ # No parameters
619
+ "args_schema": create_model("ListCollectionsParams")
568
620
  },
569
- ]
621
+ ]