alita-sdk 0.3.457__py3-none-any.whl → 0.3.465__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

Files changed (37) hide show
  1. alita_sdk/cli/__init__.py +10 -0
  2. alita_sdk/cli/__main__.py +17 -0
  3. alita_sdk/cli/agent/__init__.py +0 -0
  4. alita_sdk/cli/agent/default.py +176 -0
  5. alita_sdk/cli/agent_executor.py +155 -0
  6. alita_sdk/cli/agent_loader.py +197 -0
  7. alita_sdk/cli/agent_ui.py +218 -0
  8. alita_sdk/cli/agents.py +1911 -0
  9. alita_sdk/cli/callbacks.py +576 -0
  10. alita_sdk/cli/cli.py +159 -0
  11. alita_sdk/cli/config.py +164 -0
  12. alita_sdk/cli/formatting.py +182 -0
  13. alita_sdk/cli/input_handler.py +256 -0
  14. alita_sdk/cli/mcp_loader.py +315 -0
  15. alita_sdk/cli/toolkit.py +330 -0
  16. alita_sdk/cli/toolkit_loader.py +55 -0
  17. alita_sdk/cli/tools/__init__.py +36 -0
  18. alita_sdk/cli/tools/approval.py +224 -0
  19. alita_sdk/cli/tools/filesystem.py +905 -0
  20. alita_sdk/cli/tools/planning.py +403 -0
  21. alita_sdk/cli/tools/terminal.py +280 -0
  22. alita_sdk/runtime/clients/client.py +16 -1
  23. alita_sdk/runtime/langchain/constants.py +2 -1
  24. alita_sdk/runtime/langchain/langraph_agent.py +17 -5
  25. alita_sdk/runtime/langchain/utils.py +1 -1
  26. alita_sdk/runtime/tools/function.py +17 -5
  27. alita_sdk/runtime/tools/llm.py +65 -7
  28. alita_sdk/tools/base_indexer_toolkit.py +54 -2
  29. alita_sdk/tools/qtest/api_wrapper.py +871 -32
  30. alita_sdk/tools/sharepoint/api_wrapper.py +22 -2
  31. alita_sdk/tools/sharepoint/authorization_helper.py +17 -1
  32. {alita_sdk-0.3.457.dist-info → alita_sdk-0.3.465.dist-info}/METADATA +145 -2
  33. {alita_sdk-0.3.457.dist-info → alita_sdk-0.3.465.dist-info}/RECORD +37 -15
  34. alita_sdk-0.3.465.dist-info/entry_points.txt +2 -0
  35. {alita_sdk-0.3.457.dist-info → alita_sdk-0.3.465.dist-info}/WHEEL +0 -0
  36. {alita_sdk-0.3.457.dist-info → alita_sdk-0.3.465.dist-info}/licenses/LICENSE +0 -0
  37. {alita_sdk-0.3.457.dist-info → alita_sdk-0.3.465.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,10 @@
1
+ """
2
+ Alita SDK CLI - Command-line interface for testing agents and toolkits.
3
+
4
+ This module provides a CLI alternative to the Streamlit interface, enabling
5
+ direct terminal access for GitHub Copilot integration and automation workflows.
6
+ """
7
+
8
+ from .cli import cli
9
+
10
+ __all__ = ['cli']
@@ -0,0 +1,17 @@
1
+ """
2
+ Entry point for running the Alita CLI as a module.
3
+
4
+ Usage:
5
+ python -m alita_sdk.cli [command] [options]
6
+ """
7
+
8
+ # Suppress warnings before any imports
9
+ import warnings
10
+ warnings.filterwarnings('ignore', category=DeprecationWarning)
11
+ warnings.filterwarnings('ignore', category=UserWarning)
12
+ warnings.filterwarnings('ignore', message='Unverified HTTPS request')
13
+
14
+ from .cli import cli
15
+
16
+ if __name__ == '__main__':
17
+ cli()
File without changes
@@ -0,0 +1,176 @@
1
+ DEFAULT_PROMPT = """You are **Alita**, a Testing Agent running in a terminal-based CLI assistant. Alita is an open-source, agentic testing interface. You are expected to be precise, safe, technical, and helpful.
2
+
3
+ Your capabilities:
4
+
5
+ - Receive user prompts and other context provided by the harness, such as files in the workspace, logs, test suites, reports, screenshots, API specs, and documentation.
6
+ - Communicate with the user by streaming thinking & responses, and by making & updating plans.
7
+ - Emit function calls to run terminal commands, execute test suites, inspect environments, analyze artifacts, and apply patches when tests require updates. Depending on configuration, you may request that these function calls be escalated for approval before executing.
8
+
9
+ Within this context, **Alita** refers to the open-source agentic testing interface (not any legacy language model).
10
+
11
+ ---
12
+
13
+ # How you work
14
+
15
+ ## Personality
16
+
17
+ You are concise, direct, and friendly. You communicate efficiently and always prioritize actionable test insights. You clearly state assumptions, environment prerequisites, and next steps. Unless explicitly asked, you avoid excessively verbose explanations.
18
+
19
+ ---
20
+
21
+ # AGENTS.md spec
22
+
23
+ `AGENTS.md` files in repositories may contain instructions for working in that specific container — including test conventions, folder structure, naming rules, frameworks in use, test data handling, or how to run validations.
24
+
25
+ Rules:
26
+
27
+ - The scope of an `AGENTS.md` file covers its entire directory subtree.
28
+ - Any file you touch must follow instructions from applicable `AGENTS.md` files.
29
+ - For conflicting instructions, deeper directory `AGENTS.md` takes precedence.
30
+ - Direct system/developer/user instructions always take precedence.
31
+
32
+ ---
33
+
34
+ ## Responsiveness
35
+
36
+ ### Preamble messages
37
+
38
+ Before running tool calls (executing tests, launching commands, applying patches), send a brief preface describing what you’re about to do. It should:
39
+
40
+ - Be short (8–12 words)
41
+ - Group related actions together
42
+ - Refer to previous context when relevant
43
+ - Keep a light and collaborative tone
44
+
45
+ Example patterns:
46
+
47
+ - “Analyzing failing tests next to identify the root cause.”
48
+ - “Running backend API tests now to reproduce the reported issue.”
49
+ - “About to patch selectors and re-run UI regression tests.”
50
+ - “Finished scanning logs; now checking flaky test patterns.”
51
+ - “Next I’ll generate missing test data and rerun.”
52
+
53
+ ---
54
+
55
+ ## Planning
56
+
57
+ Use `update_plan` when:
58
+
59
+ - Tasks involve multiple phases of testing
60
+ - The sequence of activities matters
61
+ - Ambiguity requires breaking down the approach
62
+ - The user requests step-wise execution
63
+
64
+ Example of a **high-quality test-oriented plan**:
65
+
66
+ 1. Reproduce failure locally
67
+ 2. Capture failing logs + stack traces
68
+ 3. Identify root cause in test or code
69
+ 4. Patch locator + stabilize assertions
70
+ 5. Run whole suite to confirm no regressions
71
+
72
+ Low-quality plans (“run tests → fix things → done”) are not acceptable.
73
+
74
+ ---
75
+
76
+ ## Task execution
77
+
78
+ You are a **testing agent**, not just a code-writing agent. Your responsibilities include:
79
+
80
+ - Executing tests across frameworks (API, UI, mobile, backend, contract, load, security)
81
+ - Analyzing logs, failures, screenshots, metrics, stack traces
82
+ - Investigating flakiness, nondeterminism, environmental issues
83
+ - Generating missing tests or aligning test coverage to requirements
84
+ - Proposing (and applying when asked) patches to fix the root cause of test failures
85
+ - Updating and creating test cases, fixtures, mocks, test data and configs
86
+ - Validating integrations (CI/CD, containers, runners, environments)
87
+ - Surfacing reliability and coverage gaps
88
+
89
+ When applying patches, follow repository style and AGENTS.md rules.
90
+ Avoid modifying unrelated code and avoid adding technical debt.
91
+
92
+ Common use cases include:
93
+
94
+ - Test execution automation
95
+ - Manual exploratory testing documentation
96
+ - Test case generation from requirements
97
+ - Assertions improvements and selector stabilization
98
+ - Test coverage analysis
99
+ - Defect reproduction and debugging
100
+ - Root cause attribution (test vs product defect)
101
+
102
+ ---
103
+
104
+ ## Sandbox and approvals
105
+
106
+ Sandboxing and approval rules are identical to coding agents, but framed around testing actions:
107
+
108
+ You may need escalation before:
109
+
110
+ - Creating or modifying files
111
+ - Installing testing dependencies
112
+ - Running network-dependent test suites
113
+ - Performing destructive cleanup actions
114
+ - Triggering CI pipelines or test runs that write outside workspace
115
+
116
+ If sandbox modes and approval rules are not specified, assume:
117
+
118
+ - Filesystem: `workspace-write`
119
+ - Network: ON
120
+ - Approval: `on-failure`
121
+
122
+ ---
123
+
124
+ ## Validating your work
125
+
126
+ Validation is core to your job.
127
+
128
+ - After fixing tests, rerun only the relevant subset first
129
+ - If stable, run broader suites to validate no regressions
130
+ - Avoid running full suites unnecessarily when in approval modes that require escalation
131
+
132
+ If there are no tests for the change you made, and the project has an established testing pattern, you may add one.
133
+
134
+ Avoid fixing unrelated tests unless the user requests it.
135
+
136
+ ---
137
+
138
+ ## Presenting your work and final message
139
+
140
+ Your final message should feel like an update from a senior test engineer handing off state.
141
+
142
+ Good patterns include:
143
+
144
+ - What was tested
145
+ - What failed and why
146
+ - What was fixed
147
+ - Where files were changed
148
+ - How to validate locally
149
+
150
+ You should not dump full file contents unless the user asks. Reference files and paths directly.
151
+
152
+ If relevant, offer optional next steps such as:
153
+
154
+ - Running full regression
155
+ - Adding missing tests
156
+ - Improving coverage or performance
157
+ - Integrating into CI
158
+
159
+ ---
160
+
161
+ ## Answer formatting rules in CLI
162
+
163
+ Keep results scannable and technical:
164
+
165
+ - Use section headers only where they improve clarity
166
+ - Use short bullet lists (4–6 key bullets)
167
+ - Use backticks for code, commands, test names, file paths
168
+ - Reference files individually to keep them clickable (e.g. `tests/ui/login.spec.ts:44`)
169
+ - Avoid nested bullet lists or long paragraphs
170
+
171
+ Tone: pragmatic, precise, and focused on improving testing reliability and coverage.
172
+
173
+ ---
174
+
175
+ In short: **Alita is a highly technical manual + automated testing agent** that plans intelligently, executes and analyzes tests across frameworks, fixes issues at their root when permitted, and keeps the user informed without noise.
176
+ """
@@ -0,0 +1,155 @@
1
+ """
2
+ Agent executor creation and management.
3
+
4
+ Creates LLM instances and agent executors with support for MCP tools.
5
+ """
6
+
7
+ from typing import Optional, Dict, Any, List, Tuple
8
+ from rich.console import Console
9
+
10
+ from .agent_loader import build_agent_data_structure
11
+ from alita_sdk.runtime.langchain.assistant import Assistant
12
+
13
+ console = Console()
14
+
15
+
16
+ def create_llm_instance(client, model: Optional[str], agent_def: Dict[str, Any],
17
+ temperature: Optional[float], max_tokens: Optional[int]):
18
+ """Create LLM instance with appropriate configuration."""
19
+ llm_model = model or agent_def.get('model', 'gpt-4o')
20
+ llm_temperature = temperature if temperature is not None else agent_def.get('temperature', 0.7)
21
+ llm_max_tokens = max_tokens or agent_def.get('max_tokens', 2000)
22
+
23
+ try:
24
+ llm = client.get_llm(
25
+ model_name=llm_model,
26
+ model_config={
27
+ 'temperature': llm_temperature,
28
+ 'max_tokens': llm_max_tokens
29
+ }
30
+ )
31
+ return llm, llm_model, llm_temperature, llm_max_tokens
32
+ except Exception as e:
33
+ console.print(f"\n✗ [red]Failed to create LLM instance:[/red] {e}")
34
+ console.print("[yellow]Hint: Make sure OPENAI_API_KEY or other LLM credentials are set[/yellow]")
35
+ raise
36
+
37
+
38
+ def _create_assistant(client, agent_data: Dict[str, Any], llm, memory, tools: List) -> Assistant:
39
+ """Create Assistant instance with given configuration.
40
+
41
+ Args:
42
+ client: Alita client instance
43
+ agent_data: Agent configuration data
44
+ llm: LLM instance
45
+ memory: Memory/checkpoint instance
46
+ tools: List of tools to add to agent
47
+
48
+ Returns:
49
+ Assistant instance
50
+ """
51
+ return Assistant(
52
+ alita=client,
53
+ data=agent_data,
54
+ client=llm,
55
+ chat_history=[],
56
+ app_type=agent_data.get('agent_type', 'react'),
57
+ tools=tools,
58
+ memory=memory,
59
+ store=None,
60
+ debug_mode=False,
61
+ mcp_tokens=None
62
+ )
63
+
64
+
65
+ def create_agent_executor(client, agent_def: Dict[str, Any], toolkit_configs: List[Dict[str, Any]],
66
+ llm, llm_model: str, llm_temperature: float, llm_max_tokens: int, memory,
67
+ filesystem_tools: Optional[List] = None, mcp_tools: Optional[List] = None,
68
+ terminal_tools: Optional[List] = None, planning_tools: Optional[List] = None):
69
+ """Create agent executor for local agents with tools (sync version).
70
+
71
+ Note: mcp_tools parameter is deprecated - use create_agent_executor_with_mcp for MCP support.
72
+ """
73
+ agent_data = build_agent_data_structure(
74
+ agent_def=agent_def,
75
+ toolkit_configs=toolkit_configs,
76
+ llm_model=llm_model,
77
+ llm_temperature=llm_temperature,
78
+ llm_max_tokens=llm_max_tokens
79
+ )
80
+
81
+ # Combine all tools
82
+ additional_tools = []
83
+ if filesystem_tools:
84
+ additional_tools.extend(filesystem_tools)
85
+ if terminal_tools:
86
+ additional_tools.extend(terminal_tools)
87
+ if planning_tools:
88
+ additional_tools.extend(planning_tools)
89
+ if mcp_tools:
90
+ additional_tools.extend(mcp_tools)
91
+
92
+ assistant = _create_assistant(client, agent_data, llm, memory, additional_tools)
93
+ return assistant.runnable()
94
+
95
+
96
+ async def create_agent_executor_with_mcp(
97
+ client,
98
+ agent_def: Dict[str, Any],
99
+ toolkit_configs: List[Dict[str, Any]],
100
+ llm,
101
+ llm_model: str,
102
+ llm_temperature: float,
103
+ llm_max_tokens: int,
104
+ memory,
105
+ filesystem_tools: Optional[List] = None,
106
+ terminal_tools: Optional[List] = None,
107
+ planning_tools: Optional[List] = None
108
+ ) -> Tuple[Any, Optional[Any]]:
109
+ """Create agent executor with MCP tools using persistent sessions.
110
+
111
+ Returns:
112
+ Tuple of (agent_executor, mcp_session_manager) where session_manager must be kept alive
113
+ to maintain stateful MCP server state (e.g., Playwright browser sessions).
114
+
115
+ See: https://github.com/langchain-ai/langchain-mcp-adapters/issues/178
116
+ """
117
+ from .mcp_loader import load_mcp_tools_async
118
+
119
+ # Separate MCP toolkit configs from regular configs
120
+ mcp_configs = [tc for tc in toolkit_configs if tc.get('toolkit_type') == 'mcp']
121
+ regular_configs = [tc for tc in toolkit_configs if tc.get('toolkit_type') != 'mcp']
122
+
123
+ # Load MCP tools with persistent sessions
124
+ mcp_session_manager = None
125
+ mcp_tools = []
126
+ if mcp_configs:
127
+ console.print("\n[cyan]Loading MCP tools with persistent sessions...[/cyan]")
128
+ mcp_session_manager, mcp_tools = await load_mcp_tools_async(mcp_configs)
129
+ if mcp_tools:
130
+ console.print(f"[green]✓ Loaded {len(mcp_tools)} MCP tools with persistent sessions[/green]\n")
131
+
132
+ # Build agent data structure
133
+ agent_data = build_agent_data_structure(
134
+ agent_def=agent_def,
135
+ toolkit_configs=regular_configs,
136
+ llm_model=llm_model,
137
+ llm_temperature=llm_temperature,
138
+ llm_max_tokens=llm_max_tokens
139
+ )
140
+
141
+ # Combine all tools
142
+ additional_tools = []
143
+ if filesystem_tools:
144
+ additional_tools.extend(filesystem_tools)
145
+ if terminal_tools:
146
+ additional_tools.extend(terminal_tools)
147
+ if planning_tools:
148
+ additional_tools.extend(planning_tools)
149
+ if mcp_tools:
150
+ additional_tools.extend(mcp_tools)
151
+
152
+ assistant = _create_assistant(client, agent_data, llm, memory, additional_tools)
153
+
154
+ # Return agent and session manager (must be kept alive for stateful MCP tools)
155
+ return assistant.runnable(), mcp_session_manager
@@ -0,0 +1,197 @@
1
+ """
2
+ Agent loading and definition management.
3
+
4
+ Handles loading agent definitions from various file formats (YAML, JSON, Markdown).
5
+ """
6
+
7
+ import json
8
+ import yaml
9
+ from pathlib import Path
10
+ from typing import Dict, Any
11
+
12
+ from .config import substitute_env_vars
13
+
14
+
15
+ def load_agent_definition(file_path: str) -> Dict[str, Any]:
16
+ """
17
+ Load agent definition from file.
18
+
19
+ Supports:
20
+ - YAML files (.yaml, .yml)
21
+ - JSON files (.json)
22
+ - Markdown files with YAML frontmatter (.md)
23
+
24
+ Args:
25
+ file_path: Path to agent definition file
26
+
27
+ Returns:
28
+ Dictionary with agent configuration
29
+ """
30
+ path = Path(file_path)
31
+
32
+ if not path.exists():
33
+ raise FileNotFoundError(f"Agent definition not found: {file_path}")
34
+
35
+ content = path.read_text()
36
+
37
+ # Handle markdown with YAML frontmatter
38
+ if path.suffix == '.md':
39
+ if content.startswith('---'):
40
+ parts = content.split('---', 2)
41
+ if len(parts) >= 3:
42
+ frontmatter = yaml.safe_load(parts[1])
43
+ system_prompt = parts[2].strip()
44
+
45
+ # Apply environment variable substitution
46
+ system_prompt = substitute_env_vars(system_prompt)
47
+
48
+ return {
49
+ 'name': frontmatter.get('name', path.stem),
50
+ 'description': frontmatter.get('description', ''),
51
+ 'system_prompt': system_prompt,
52
+ 'model': frontmatter.get('model'),
53
+ 'tools': frontmatter.get('tools', []),
54
+ 'temperature': frontmatter.get('temperature'),
55
+ 'max_tokens': frontmatter.get('max_tokens'),
56
+ 'toolkit_configs': frontmatter.get('toolkit_configs', []),
57
+ 'filesystem_tools_preset': frontmatter.get('filesystem_tools_preset'),
58
+ 'filesystem_tools_include': frontmatter.get('filesystem_tools_include'),
59
+ 'filesystem_tools_exclude': frontmatter.get('filesystem_tools_exclude'),
60
+ 'mcps': frontmatter.get('mcps', [])
61
+ }
62
+
63
+ # Plain markdown - use content as system prompt
64
+ return {
65
+ 'name': path.stem,
66
+ 'system_prompt': substitute_env_vars(content),
67
+ }
68
+
69
+ # Handle YAML
70
+ if path.suffix in ['.yaml', '.yml']:
71
+ content = substitute_env_vars(content)
72
+ config = yaml.safe_load(content)
73
+ if 'system_prompt' in config:
74
+ config['system_prompt'] = substitute_env_vars(config['system_prompt'])
75
+ return config
76
+
77
+ # Handle JSON
78
+ if path.suffix == '.json':
79
+ content = substitute_env_vars(content)
80
+ config = json.loads(content)
81
+ if 'system_prompt' in config:
82
+ config['system_prompt'] = substitute_env_vars(config['system_prompt'])
83
+ return config
84
+
85
+ raise ValueError(f"Unsupported file format: {path.suffix}")
86
+
87
+
88
+ def build_agent_data_structure(agent_def: Dict[str, Any], toolkit_configs: list,
89
+ llm_model: str, llm_temperature: float, llm_max_tokens: int) -> Dict[str, Any]:
90
+ """
91
+ Convert a local agent definition to the data structure expected by the Assistant class.
92
+
93
+ This utility function bridges between simple agent definition formats (e.g., from markdown files)
94
+ and the structured format that the Assistant class requires internally.
95
+
96
+ Args:
97
+ agent_def: The agent definition loaded from a local file (markdown, YAML, or JSON)
98
+ toolkit_configs: List of toolkit configurations to be used by the agent
99
+ llm_model: The LLM model name (e.g., 'gpt-4o')
100
+ llm_temperature: Temperature setting for the model
101
+ llm_max_tokens: Maximum tokens for model responses
102
+
103
+ Returns:
104
+ A dictionary in the format expected by the Assistant constructor with keys:
105
+ - instructions: System prompt for the agent
106
+ - tools: List of tool/toolkit configurations
107
+ - variables: Agent variables (empty for local agents)
108
+ - meta: Metadata including step_limit and internal_tools
109
+ - llm_settings: Complete LLM configuration
110
+ - agent_type: Type of agent (react, openai, etc.)
111
+ """
112
+ # Import toolkit registry to validate configs
113
+ from alita_sdk.tools import AVAILABLE_TOOLS
114
+
115
+ # Build the tools list from agent definition and toolkit configs
116
+ tools = []
117
+ processed_toolkit_names = set()
118
+
119
+ # Validate and process toolkit configs through their Pydantic schemas
120
+ validated_toolkit_configs = []
121
+ for toolkit_config in toolkit_configs:
122
+ toolkit_type = toolkit_config.get('type')
123
+ if toolkit_type and toolkit_type in AVAILABLE_TOOLS:
124
+ try:
125
+ toolkit_info = AVAILABLE_TOOLS[toolkit_type]
126
+ if 'toolkit_class' in toolkit_info:
127
+ toolkit_class = toolkit_info['toolkit_class']
128
+ if hasattr(toolkit_class, 'toolkit_config_schema'):
129
+ schema = toolkit_class.toolkit_config_schema()
130
+ validated_config = schema(**toolkit_config)
131
+ validated_dict = validated_config.model_dump()
132
+ validated_dict['type'] = toolkit_config.get('type')
133
+ validated_dict['toolkit_name'] = toolkit_config.get('toolkit_name')
134
+ validated_toolkit_configs.append(validated_dict)
135
+ else:
136
+ validated_toolkit_configs.append(toolkit_config)
137
+ else:
138
+ validated_toolkit_configs.append(toolkit_config)
139
+ except Exception:
140
+ validated_toolkit_configs.append(toolkit_config)
141
+ else:
142
+ validated_toolkit_configs.append(toolkit_config)
143
+
144
+ # Add tools from agent definition
145
+ for tool_name in agent_def.get('tools', []):
146
+ toolkit_config = next((tk for tk in validated_toolkit_configs if tk.get('toolkit_name') == tool_name), None)
147
+ if toolkit_config:
148
+ tools.append({
149
+ 'type': toolkit_config.get('type'),
150
+ 'toolkit_name': toolkit_config.get('toolkit_name'),
151
+ 'settings': toolkit_config,
152
+ 'selected_tools': toolkit_config.get('selected_tools', [])
153
+ })
154
+ processed_toolkit_names.add(tool_name)
155
+ else:
156
+ tools.append({
157
+ 'type': tool_name,
158
+ 'name': tool_name
159
+ })
160
+
161
+ # Add toolkit_configs that weren't already referenced
162
+ for toolkit_config in validated_toolkit_configs:
163
+ toolkit_name = toolkit_config.get('toolkit_name')
164
+ if toolkit_name and toolkit_name not in processed_toolkit_names:
165
+ tools.append({
166
+ 'type': toolkit_config.get('type'),
167
+ 'toolkit_name': toolkit_name,
168
+ 'settings': toolkit_config,
169
+ 'selected_tools': toolkit_config.get('selected_tools', [])
170
+ })
171
+
172
+ return {
173
+ 'instructions': agent_def.get('system_prompt', ''),
174
+ 'tools': tools,
175
+ 'variables': [],
176
+ 'meta': {
177
+ 'step_limit': agent_def.get('step_limit', 25),
178
+ 'internal_tools': agent_def.get('internal_tools', [])
179
+ },
180
+ 'llm_settings': {
181
+ 'model_name': llm_model,
182
+ 'max_tokens': llm_max_tokens,
183
+ 'temperature': llm_temperature,
184
+ 'top_p': 1.0,
185
+ 'top_k': 0,
186
+ 'integration_uid': None,
187
+ 'indexer_config': {
188
+ 'ai_model': 'langchain_openai.ChatOpenAI',
189
+ 'ai_model_params': {
190
+ 'model': llm_model,
191
+ 'temperature': llm_temperature,
192
+ 'max_tokens': llm_max_tokens
193
+ }
194
+ }
195
+ },
196
+ 'agent_type': agent_def.get('agent_type', 'react')
197
+ }