alita-sdk 0.3.376__py3-none-any.whl → 0.3.435__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

Files changed (60) hide show
  1. alita_sdk/configurations/bitbucket.py +95 -0
  2. alita_sdk/configurations/confluence.py +96 -1
  3. alita_sdk/configurations/gitlab.py +79 -0
  4. alita_sdk/configurations/jira.py +103 -0
  5. alita_sdk/configurations/testrail.py +88 -0
  6. alita_sdk/configurations/xray.py +93 -0
  7. alita_sdk/configurations/zephyr_enterprise.py +93 -0
  8. alita_sdk/configurations/zephyr_essential.py +75 -0
  9. alita_sdk/runtime/clients/client.py +9 -4
  10. alita_sdk/runtime/clients/mcp_discovery.py +342 -0
  11. alita_sdk/runtime/clients/mcp_manager.py +262 -0
  12. alita_sdk/runtime/clients/sandbox_client.py +8 -0
  13. alita_sdk/runtime/langchain/assistant.py +41 -38
  14. alita_sdk/runtime/langchain/constants.py +5 -1
  15. alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
  16. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +4 -1
  17. alita_sdk/runtime/langchain/document_loaders/constants.py +28 -12
  18. alita_sdk/runtime/langchain/langraph_agent.py +91 -27
  19. alita_sdk/runtime/langchain/utils.py +24 -4
  20. alita_sdk/runtime/models/mcp_models.py +57 -0
  21. alita_sdk/runtime/toolkits/__init__.py +24 -0
  22. alita_sdk/runtime/toolkits/application.py +8 -1
  23. alita_sdk/runtime/toolkits/mcp.py +787 -0
  24. alita_sdk/runtime/toolkits/tools.py +98 -50
  25. alita_sdk/runtime/tools/__init__.py +7 -2
  26. alita_sdk/runtime/tools/application.py +7 -0
  27. alita_sdk/runtime/tools/function.py +20 -28
  28. alita_sdk/runtime/tools/graph.py +10 -4
  29. alita_sdk/runtime/tools/image_generation.py +104 -8
  30. alita_sdk/runtime/tools/llm.py +146 -114
  31. alita_sdk/runtime/tools/mcp_inspect_tool.py +284 -0
  32. alita_sdk/runtime/tools/mcp_server_tool.py +79 -10
  33. alita_sdk/runtime/tools/sandbox.py +166 -63
  34. alita_sdk/runtime/tools/vectorstore.py +3 -2
  35. alita_sdk/runtime/tools/vectorstore_base.py +4 -3
  36. alita_sdk/runtime/utils/streamlit.py +34 -3
  37. alita_sdk/runtime/utils/toolkit_utils.py +5 -2
  38. alita_sdk/runtime/utils/utils.py +1 -0
  39. alita_sdk/tools/__init__.py +48 -31
  40. alita_sdk/tools/ado/work_item/ado_wrapper.py +17 -8
  41. alita_sdk/tools/base_indexer_toolkit.py +75 -66
  42. alita_sdk/tools/chunkers/sematic/proposal_chunker.py +1 -1
  43. alita_sdk/tools/code_indexer_toolkit.py +13 -3
  44. alita_sdk/tools/confluence/api_wrapper.py +29 -7
  45. alita_sdk/tools/confluence/loader.py +10 -0
  46. alita_sdk/tools/elitea_base.py +7 -7
  47. alita_sdk/tools/gitlab/api_wrapper.py +11 -7
  48. alita_sdk/tools/jira/api_wrapper.py +1 -1
  49. alita_sdk/tools/openapi/__init__.py +10 -1
  50. alita_sdk/tools/qtest/api_wrapper.py +522 -74
  51. alita_sdk/tools/sharepoint/api_wrapper.py +104 -33
  52. alita_sdk/tools/sharepoint/authorization_helper.py +175 -1
  53. alita_sdk/tools/sharepoint/utils.py +8 -2
  54. alita_sdk/tools/utils/content_parser.py +27 -16
  55. alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +19 -6
  56. {alita_sdk-0.3.376.dist-info → alita_sdk-0.3.435.dist-info}/METADATA +1 -1
  57. {alita_sdk-0.3.376.dist-info → alita_sdk-0.3.435.dist-info}/RECORD +60 -55
  58. {alita_sdk-0.3.376.dist-info → alita_sdk-0.3.435.dist-info}/WHEEL +0 -0
  59. {alita_sdk-0.3.376.dist-info → alita_sdk-0.3.435.dist-info}/licenses/LICENSE +0 -0
  60. {alita_sdk-0.3.376.dist-info → alita_sdk-0.3.435.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,5 @@
1
1
  import logging
2
+ from typing import Optional
2
3
 
3
4
  from langchain_core.tools import ToolException
4
5
  from langgraph.store.base import BaseStore
@@ -11,7 +12,10 @@ from .datasource import DatasourcesToolkit
11
12
  from .prompt import PromptToolkit
12
13
  from .subgraph import SubgraphToolkit
13
14
  from .vectorstore import VectorStoreToolkit
15
+ from .mcp import McpToolkit
14
16
  from ..tools.mcp_server_tool import McpServerTool
17
+ from ..tools.sandbox import SandboxToolkit
18
+ from ..tools.image_generation import ImageGenerationToolkit
15
19
  # Import community tools
16
20
  from ...community import get_toolkits as community_toolkits, get_tools as community_tools
17
21
  from ...tools.memory import MemoryToolkit
@@ -24,64 +28,98 @@ def get_toolkits():
24
28
  core_toolkits = [
25
29
  ArtifactToolkit.toolkit_config_schema(),
26
30
  MemoryToolkit.toolkit_config_schema(),
27
- VectorStoreToolkit.toolkit_config_schema()
31
+ VectorStoreToolkit.toolkit_config_schema(),
32
+ SandboxToolkit.toolkit_config_schema(),
33
+ ImageGenerationToolkit.toolkit_config_schema(),
34
+ McpToolkit.toolkit_config_schema()
28
35
  ]
29
36
 
30
37
  return core_toolkits + community_toolkits() + alita_toolkits()
31
38
 
32
39
 
33
- def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = None) -> list:
40
+ def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = None, debug_mode: Optional[bool] = False) -> list:
34
41
  prompts = []
35
42
  tools = []
36
43
 
37
44
  for tool in tools_list:
38
- if tool['type'] == 'datasource':
39
- tools.extend(DatasourcesToolkit.get_toolkit(
40
- alita_client,
41
- datasource_ids=[int(tool['settings']['datasource_id'])],
42
- selected_tools=tool['settings']['selected_tools'],
43
- toolkit_name=tool.get('toolkit_name', '') or tool.get('name', '')
44
- ).get_tools())
45
- elif tool['type'] == 'application' and tool.get('agent_type', '') != 'pipeline' :
46
- tools.extend(ApplicationToolkit.get_toolkit(
47
- alita_client,
48
- application_id=int(tool['settings']['application_id']),
49
- application_version_id=int(tool['settings']['application_version_id']),
50
- selected_tools=[]
51
- ).get_tools())
52
- elif tool['type'] == 'application' and tool.get('agent_type', '') == 'pipeline':
53
- # static get_toolkit returns a list of CompiledStateGraph stubs
54
- tools.extend(SubgraphToolkit.get_toolkit(
55
- alita_client,
56
- application_id=int(tool['settings']['application_id']),
57
- application_version_id=int(tool['settings']['application_version_id']),
58
- app_api_key=alita_client.auth_token,
59
- selected_tools=[],
60
- llm=llm
61
- ))
62
- elif tool['type'] == 'memory':
63
- tools += MemoryToolkit.get_toolkit(
64
- namespace=tool['settings'].get('namespace', str(tool['id'])),
65
- pgvector_configuration=tool['settings'].get('pgvector_configuration', {}),
66
- store=memory_store,
67
- ).get_tools()
68
- elif tool['type'] == 'artifact':
69
- tools.extend(ArtifactToolkit.get_toolkit(
70
- client=alita_client,
71
- bucket=tool['settings']['bucket'],
72
- toolkit_name=tool.get('toolkit_name', ''),
73
- selected_tools=tool['settings'].get('selected_tools', []),
74
- llm=llm,
75
- # indexer settings
76
- pgvector_configuration=tool['settings'].get('pgvector_configuration', {}),
77
- embedding_model=tool['settings'].get('embedding_model'),
78
- collection_name=f"{tool.get('toolkit_name')}",
79
- ).get_tools())
80
- elif tool['type'] == 'vectorstore':
81
- tools.extend(VectorStoreToolkit.get_toolkit(
82
- llm=llm,
83
- toolkit_name=tool.get('toolkit_name', ''),
84
- **tool['settings']).get_tools())
45
+ try:
46
+ if tool['type'] == 'datasource':
47
+ tools.extend(DatasourcesToolkit.get_toolkit(
48
+ alita_client,
49
+ datasource_ids=[int(tool['settings']['datasource_id'])],
50
+ selected_tools=tool['settings']['selected_tools'],
51
+ toolkit_name=tool.get('toolkit_name', '') or tool.get('name', '')
52
+ ).get_tools())
53
+ elif tool['type'] == 'application':
54
+ tools.extend(ApplicationToolkit.get_toolkit(
55
+ alita_client,
56
+ application_id=int(tool['settings']['application_id']),
57
+ application_version_id=int(tool['settings']['application_version_id']),
58
+ selected_tools=[]
59
+ ).get_tools())
60
+ # backward compatibility for pipeline application type as subgraph node
61
+ if tool.get('agent_type', '') == 'pipeline':
62
+ # static get_toolkit returns a list of CompiledStateGraph stubs
63
+ tools.extend(SubgraphToolkit.get_toolkit(
64
+ alita_client,
65
+ application_id=int(tool['settings']['application_id']),
66
+ application_version_id=int(tool['settings']['application_version_id']),
67
+ app_api_key=alita_client.auth_token,
68
+ selected_tools=[],
69
+ llm=llm
70
+ ))
71
+ elif tool['type'] == 'memory':
72
+ tools += MemoryToolkit.get_toolkit(
73
+ namespace=tool['settings'].get('namespace', str(tool['id'])),
74
+ pgvector_configuration=tool['settings'].get('pgvector_configuration', {}),
75
+ store=memory_store,
76
+ ).get_tools()
77
+ # TODO: update configuration of internal tools
78
+ elif tool['type'] == 'internal_tool':
79
+ if tool['name'] == 'pyodide':
80
+ tools += SandboxToolkit.get_toolkit(
81
+ stateful=False,
82
+ allow_net=True,
83
+ alita_client=alita_client,
84
+ ).get_tools()
85
+ elif tool['name'] == 'image_generation':
86
+ if alita_client and alita_client.model_image_generation:
87
+ tools += ImageGenerationToolkit.get_toolkit(
88
+ client=alita_client,
89
+ ).get_tools()
90
+ else:
91
+ logger.warning("Image generation internal tool requested "
92
+ "but no image generation model configured")
93
+ elif tool['type'] == 'artifact':
94
+ tools.extend(ArtifactToolkit.get_toolkit(
95
+ client=alita_client,
96
+ bucket=tool['settings']['bucket'],
97
+ toolkit_name=tool.get('toolkit_name', ''),
98
+ selected_tools=tool['settings'].get('selected_tools', []),
99
+ llm=llm,
100
+ # indexer settings
101
+ pgvector_configuration=tool['settings'].get('pgvector_configuration', {}),
102
+ embedding_model=tool['settings'].get('embedding_model'),
103
+ collection_name=f"{tool.get('toolkit_name')}",
104
+ collection_schema = str(tool['id'])
105
+ ).get_tools())
106
+ elif tool['type'] == 'vectorstore':
107
+ tools.extend(VectorStoreToolkit.get_toolkit(
108
+ llm=llm,
109
+ toolkit_name=tool.get('toolkit_name', ''),
110
+ **tool['settings']).get_tools())
111
+ elif tool['type'] == 'mcp':
112
+ tools.extend(McpToolkit.get_toolkit(
113
+ toolkit_name=tool.get('toolkit_name', ''),
114
+ client=alita_client,
115
+ **tool['settings']).get_tools())
116
+ except Exception as e:
117
+ logger.error(f"Error initializing toolkit for tool '{tool.get('name', 'unknown')}': {e}", exc_info=True)
118
+ if debug_mode:
119
+ logger.info("Skipping tool initialization error due to debug mode.")
120
+ continue
121
+ else:
122
+ raise ToolException(f"Error initializing toolkit for tool '{tool.get('name', 'unknown')}': {e}")
85
123
 
86
124
  if len(prompts) > 0:
87
125
  tools += PromptToolkit.get_toolkit(alita_client, prompts).get_tools()
@@ -90,7 +128,8 @@ def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = Non
90
128
  tools += community_tools(tools_list, alita_client, llm)
91
129
  # Add alita tools
92
130
  tools += alita_tools(tools_list, alita_client, llm, memory_store)
93
- # Add MCP tools
131
+ # Add MCP tools registered via alita-mcp CLI (static registry)
132
+ # Note: Tools with type='mcp' are already handled in main loop above
94
133
  tools += _mcp_tools(tools_list, alita_client)
95
134
 
96
135
  # Sanitize tool names to meet OpenAI's function naming requirements
@@ -145,6 +184,10 @@ def _sanitize_tool_names(tools: list) -> list:
145
184
 
146
185
 
147
186
  def _mcp_tools(tools_list, alita):
187
+ """
188
+ Handle MCP tools registered via alita-mcp CLI (static registry).
189
+ Skips tools with type='mcp' as those are handled by dynamic discovery.
190
+ """
148
191
  try:
149
192
  all_available_toolkits = alita.get_mcp_toolkits()
150
193
  toolkit_lookup = {tk["name"]: tk for tk in all_available_toolkits}
@@ -152,6 +195,11 @@ def _mcp_tools(tools_list, alita):
152
195
  #
153
196
  for selected_toolkit in tools_list:
154
197
  server_toolkit_name = selected_toolkit['type']
198
+
199
+ # Skip tools with type='mcp' - they're handled by dynamic discovery
200
+ if server_toolkit_name == 'mcp':
201
+ continue
202
+
155
203
  toolkit_conf = toolkit_lookup.get(server_toolkit_name)
156
204
  #
157
205
  if not toolkit_conf:
@@ -5,7 +5,11 @@ This module provides various tools that can be used within LangGraph agents.
5
5
 
6
6
  from .sandbox import PyodideSandboxTool, StatefulPyodideSandboxTool, create_sandbox_tool
7
7
  from .echo import EchoTool
8
- from .image_generation import ImageGenerationTool, create_image_generation_tool
8
+ from .image_generation import (
9
+ ImageGenerationTool,
10
+ create_image_generation_tool,
11
+ ImageGenerationToolkit
12
+ )
9
13
 
10
14
  __all__ = [
11
15
  "PyodideSandboxTool",
@@ -13,5 +17,6 @@ __all__ = [
13
17
  "create_sandbox_tool",
14
18
  "EchoTool",
15
19
  "ImageGenerationTool",
20
+ "ImageGenerationToolkit",
16
21
  "create_image_generation_tool"
17
- ]
22
+ ]
@@ -50,6 +50,8 @@ class Application(BaseTool):
50
50
  application: Any
51
51
  args_schema: Type[BaseModel] = applicationToolSchema
52
52
  return_type: str = "str"
53
+ client: Any
54
+ args_runnable: dict = {}
53
55
 
54
56
  @field_validator('name', mode='before')
55
57
  @classmethod
@@ -66,6 +68,11 @@ class Application(BaseTool):
66
68
  return self._run(*config, **all_kwargs)
67
69
 
68
70
  def _run(self, *args, **kwargs):
71
+ if self.client and self.args_runnable:
72
+ # Recreate new LanggraphAgentRunnable in order to reflect the current input_mapping (it can be dynamic for pipelines).
73
+ # Actually, for pipelines agent toolkits LanggraphAgentRunnable is created (for LLMNode) before pipeline's schema parsing.
74
+ application_variables = {k: {"name": k, "value": v} for k, v in kwargs.items()}
75
+ self.application = self.client.application(**self.args_runnable, application_variables=application_variables)
69
76
  response = self.application.invoke(formulate_query(kwargs))
70
77
  if self.return_type == "str":
71
78
  return response["output"]
@@ -7,7 +7,7 @@ from langchain_core.callbacks import dispatch_custom_event
7
7
  from langchain_core.messages import ToolCall
8
8
  from langchain_core.runnables import RunnableConfig
9
9
  from langchain_core.tools import BaseTool, ToolException
10
- from typing import Any, Optional, Union, Annotated
10
+ from typing import Any, Optional, Union
11
11
  from langchain_core.utils.function_calling import convert_to_openai_tool
12
12
  from pydantic import ValidationError
13
13
 
@@ -31,31 +31,10 @@ class FunctionTool(BaseTool):
31
31
  """Prepare input for PyodideSandboxTool by injecting state into the code block."""
32
32
  # add state into the code block here since it might be changed during the execution of the code
33
33
  state_copy = deepcopy(state)
34
- # pickle state
35
- import pickle
36
34
 
37
35
  del state_copy['messages'] # remove messages to avoid issues with pickling without langchain-core
38
- serialized_state = pickle.dumps(state_copy)
39
36
  # inject state into the code block as alita_state variable
40
- pyodide_predata = f"""import pickle\nalita_state = pickle.loads({serialized_state})\n"""
41
- # add classes related to sandbox client
42
- # read the content of alita_sdk/runtime/cliens/sandbox_client.py
43
- try:
44
- import os
45
- from pathlib import Path
46
-
47
- # Get the directory of the current file and construct the path to sandbox_client.py
48
- current_dir = Path(__file__).parent
49
- sandbox_client_path = current_dir.parent / 'clients' / 'sandbox_client.py'
50
-
51
- with open(sandbox_client_path, 'r') as f:
52
- sandbox_client_code = f.read()
53
- pyodide_predata += f"\n{sandbox_client_code}\n"
54
- pyodide_predata += (f"alita_client = SandboxClient(base_url='{self.alita_client.base_url}',"
55
- f"project_id={self.alita_client.project_id},"
56
- f"auth_token='{self.alita_client.auth_token}')")
57
- except FileNotFoundError:
58
- logger.error(f"sandbox_client.py not found at {sandbox_client_path}. Ensure the file exists.")
37
+ pyodide_predata = f"#state dict\nalita_state = {state_copy}\n"
59
38
  return pyodide_predata
60
39
 
61
40
  def _handle_pyodide_output(self, tool_result: Any) -> dict:
@@ -64,6 +43,10 @@ class FunctionTool(BaseTool):
64
43
 
65
44
  if self.output_variables:
66
45
  for var in self.output_variables:
46
+ if var == "messages":
47
+ tool_result_converted.update(
48
+ {"messages": [{"role": "assistant", "content": dumps(tool_result)}]})
49
+ continue
67
50
  if isinstance(tool_result, dict) and var in tool_result:
68
51
  tool_result_converted[var] = tool_result[var]
69
52
  else:
@@ -111,7 +94,9 @@ class FunctionTool(BaseTool):
111
94
  # special handler for PyodideSandboxTool
112
95
  if self._is_pyodide_tool():
113
96
  code = func_args['code']
114
- func_args['code'] = f"{self._prepare_pyodide_input(state)}\n{code}"
97
+ func_args['code'] = (f"{self._prepare_pyodide_input(state)}\n{code}"
98
+ # handle new lines in the code properly
99
+ .replace('\\n','\\\\n'))
115
100
  try:
116
101
  tool_result = self.tool.invoke(func_args, config, **kwargs)
117
102
  dispatch_custom_event(
@@ -131,14 +116,21 @@ class FunctionTool(BaseTool):
131
116
  if not self.output_variables:
132
117
  return {"messages": [{"role": "assistant", "content": dumps(tool_result)}]}
133
118
  else:
134
- if self.output_variables[0] == "messages":
135
- return {
119
+ if "messages" in self.output_variables:
120
+ messages_dict = {
136
121
  "messages": [{
137
122
  "role": "assistant",
138
- "content": dumps(tool_result) if not isinstance(tool_result, ToolException) else str(
139
- tool_result)
123
+ "content": dumps(tool_result) if not isinstance(tool_result, ToolException)
124
+ else str(tool_result)
140
125
  }]
141
126
  }
127
+ for var in self.output_variables:
128
+ if var != "messages":
129
+ if isinstance(tool_result, dict) and var in tool_result:
130
+ messages_dict[var] = tool_result[var]
131
+ else:
132
+ messages_dict[var] = tool_result
133
+ return messages_dict
142
134
  else:
143
135
  return { self.output_variables[0]: tool_result }
144
136
  except ValidationError:
@@ -47,8 +47,8 @@ def formulate_query(kwargs):
47
47
 
48
48
 
49
49
  class GraphTool(BaseTool):
50
- name: str
51
- description: str
50
+ name: str = 'GraphTool'
51
+ description: str = 'Graph tool for tools'
52
52
  graph: CompiledStateGraph
53
53
  args_schema: Type[BaseModel] = graphToolSchema
54
54
  return_type: str = "str"
@@ -65,10 +65,16 @@ class GraphTool(BaseTool):
65
65
  all_kwargs = {**kwargs, **extras, **schema_values}
66
66
  if config is None:
67
67
  config = {}
68
- return self._run(*config, **all_kwargs)
68
+ # Pass the config to the _run empty or the one passed from the parent executor.
69
+ return self._run(config, **all_kwargs)
69
70
 
70
71
  def _run(self, *args, **kwargs):
71
- response = self.graph.invoke(formulate_query(kwargs))
72
+ config = None
73
+ # From invoke method we are passing only 1 arg so it is safe to do this condition and config assignment.
74
+ # Default to None is safe because it will be checked also on the langchain side.
75
+ if args:
76
+ config = args[0]
77
+ response = self.graph.invoke(formulate_query(kwargs), config=config)
72
78
  if self.return_type == "str":
73
79
  return response["output"]
74
80
  else:
@@ -2,16 +2,59 @@
2
2
  Image generation tool for Alita SDK.
3
3
  """
4
4
  import logging
5
- from typing import Optional, Type, Any
6
- from langchain_core.tools import BaseTool
7
- from pydantic import BaseModel, Field
5
+ from typing import Optional, Type, Any, List, Literal
6
+ from langchain_core.tools import BaseTool, BaseToolkit
7
+ from pydantic import BaseModel, Field, create_model, ConfigDict
8
8
 
9
9
  logger = logging.getLogger(__name__)
10
10
 
11
+ name = "image_generation"
12
+
13
+
14
+ def get_tools(tools_list: list, alita_client=None, llm=None,
15
+ memory_store=None):
16
+ """
17
+ Get image generation tools for the provided tool configurations.
18
+
19
+ Args:
20
+ tools_list: List of tool configurations
21
+ alita_client: Alita client instance (required for image generation)
22
+ llm: LLM client instance (unused for image generation)
23
+ memory_store: Optional memory store instance (unused)
24
+
25
+ Returns:
26
+ List of image generation tools
27
+ """
28
+ all_tools = []
29
+
30
+ for tool in tools_list:
31
+ if (tool.get('type') == 'image_generation' or
32
+ tool.get('toolkit_name') == 'image_generation'):
33
+ try:
34
+ if not alita_client:
35
+ logger.error("Alita client is required for image "
36
+ "generation tools")
37
+ continue
38
+
39
+ toolkit_instance = ImageGenerationToolkit.get_toolkit(
40
+ client=alita_client,
41
+ toolkit_name=tool.get('toolkit_name', '')
42
+ )
43
+ all_tools.extend(toolkit_instance.get_tools())
44
+ except Exception as e:
45
+ logger.error(f"Error in image generation toolkit "
46
+ f"get_tools: {e}")
47
+ logger.error(f"Tool config: {tool}")
48
+ raise
49
+
50
+ return all_tools
51
+
11
52
 
12
53
  class ImageGenerationInput(BaseModel):
13
54
  """Input schema for image generation tool."""
14
- prompt: str = Field(description="Text prompt describing the image to generate")
55
+ prompt: str = Field(
56
+ description="Text prompt describing the image to generate"
57
+ )
15
58
  n: int = Field(
16
59
  default=1, description="Number of images to generate (1-10)",
17
60
  ge=1, le=10
@@ -22,7 +65,7 @@ class ImageGenerationInput(BaseModel):
22
65
  )
23
66
  quality: str = Field(
24
67
  default="auto",
25
- description="Quality of the generated image ('low', 'medium', 'high', 'auto')"
68
+ description="Quality of the generated image ('low', 'medium', 'high')"
26
69
  )
27
70
  style: Optional[str] = Field(
28
71
  default=None, description="Style of the generated image (optional)"
@@ -69,7 +112,8 @@ class ImageGenerationTool(BaseTool):
69
112
  else:
70
113
  content_chunks.append({
71
114
  "type": "text",
72
- "text": f"Generated {len(images)} images for prompt: '{prompt}'"
115
+ "text": f"Generated {len(images)} images for "
116
+ f"prompt: '{prompt}'"
73
117
  })
74
118
 
75
119
  # Add image content for each generated image
@@ -85,7 +129,8 @@ class ImageGenerationTool(BaseTool):
85
129
  content_chunks.append({
86
130
  "type": "image_url",
87
131
  "image_url": {
88
- "url": f"data:image/png;base64,{image_data['b64_json']}"
132
+ "url": f"data:image/png;base64,"
133
+ f"{image_data['b64_json']}"
89
134
  }
90
135
  })
91
136
 
@@ -94,7 +139,8 @@ class ImageGenerationTool(BaseTool):
94
139
  # Fallback to text response if no images in result
95
140
  return [{
96
141
  "type": "text",
97
- "text": f"Image generation completed but no images returned: {result}"
142
+ "text": f"Image generation completed but no images "
143
+ f"returned: {result}"
98
144
  }]
99
145
 
100
146
  except Exception as e:
@@ -114,3 +160,53 @@ class ImageGenerationTool(BaseTool):
114
160
  def create_image_generation_tool(client):
115
161
  """Create an image generation tool with the provided Alita client."""
116
162
  return ImageGenerationTool(client=client)
163
+
164
+
165
+ class ImageGenerationToolkit(BaseToolkit):
166
+ """Toolkit for image generation tools."""
167
+ tools: List[BaseTool] = []
168
+
169
+ @staticmethod
170
+ def toolkit_config_schema() -> BaseModel:
171
+ """Get the configuration schema for the image generation toolkit."""
172
+ # Create sample tool to get schema
173
+ sample_tool = ImageGenerationTool(client=None)
174
+ selected_tools = {sample_tool.name: sample_tool.args_schema.schema()}
175
+
176
+ return create_model(
177
+ 'image_generation',
178
+ selected_tools=(
179
+ List[Literal[tuple(selected_tools)]],
180
+ Field(
181
+ default=[],
182
+ json_schema_extra={'args_schemas': selected_tools}
183
+ )
184
+ ),
185
+ __config__=ConfigDict(json_schema_extra={
186
+ 'metadata': {
187
+ "label": "Image Generation",
188
+ "icon_url": "image_generation.svg",
189
+ "hidden": True,
190
+ "categories": ["internal_tool"],
191
+ "extra_categories": ["image generation"],
192
+ }
193
+ })
194
+ )
195
+
196
+ @classmethod
197
+ def get_toolkit(cls, client=None, **kwargs):
198
+ """
199
+ Get toolkit with image generation tools.
200
+
201
+ Args:
202
+ client: Alita client instance (required)
203
+ **kwargs: Additional arguments
204
+ """
205
+ if not client:
206
+ raise ValueError("Alita client is required for image generation")
207
+
208
+ tools = [ImageGenerationTool(client=client)]
209
+ return cls(tools=tools)
210
+
211
+ def get_tools(self):
212
+ return self.tools