kash-shell 0.3.34__py3-none-any.whl → 0.3.36__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -22,9 +22,6 @@ class KashEnv(EnvEnum):
22
22
  KASH_SYSTEM_CACHE_DIR = "KASH_SYSTEM_CACHE_DIR"
23
23
  """The directory for system cache (caches separate from workspace caches)."""
24
24
 
25
- KASH_MCP_WS = "KASH_MCP_WS"
26
- """The directory for the workspace for MCP servers."""
27
-
28
25
  KASH_SHOW_TRACEBACK = "KASH_SHOW_TRACEBACK"
29
26
  """Whether to show tracebacks on actions and commands in the shell."""
30
27
 
kash/config/logger.py CHANGED
@@ -281,6 +281,8 @@ def _do_logging_setup(log_settings: LogSettings):
281
281
  def prefix(line: str, emoji: str = "", warn_emoji: str = "") -> str:
282
282
  prefix = task_stack_prefix_str()
283
283
  emojis = f"{warn_emoji}{emoji}".strip()
284
+ if emojis:
285
+ emojis += " "
284
286
  return "".join(filter(None, [prefix, emojis, line]))
285
287
 
286
288
 
@@ -19,7 +19,16 @@ class SuppressedWarningsStreamHandler(logging.StreamHandler):
19
19
  def basic_file_handler(path: Path, level: LogLevel | LogLevelStr) -> logging.FileHandler:
20
20
  handler = logging.FileHandler(path)
21
21
  handler.setLevel(LogLevel.parse(level).value)
22
- handler.setFormatter(Formatter("%(asctime)s %(levelname).1s %(name)s - %(message)s"))
22
+
23
+ class ThreadIdFormatter(Formatter):
24
+ def format(self, record):
25
+ # Add shortened thread ID as an attribute
26
+ record.thread_short = str(record.thread)[-5:]
27
+ return super().format(record)
28
+
29
+ handler.setFormatter(
30
+ ThreadIdFormatter("%(asctime)s %(levelname).1s [T%(thread_short)s] %(name)s - %(message)s")
31
+ )
23
32
  return handler
24
33
 
25
34
 
kash/config/settings.py CHANGED
@@ -166,9 +166,6 @@ class Settings:
166
166
  system_cache_dir: Path
167
167
  """Default global and system cache directory (for global media, content, etc)."""
168
168
 
169
- mcp_ws_dir: Path | None
170
- """The directory for the MCP workspace, if set."""
171
-
172
169
  local_server_log_path: Path
173
170
  """The path to the local server log."""
174
171
 
@@ -245,14 +242,6 @@ def _get_system_cache_dir() -> Path:
245
242
  return KashEnv.KASH_SYSTEM_CACHE_DIR.read_path(default=_get_ws_root_dir() / "cache")
246
243
 
247
244
 
248
- def _get_mcp_ws_dir() -> Path | None:
249
- mcp_dir = KashEnv.KASH_MCP_WS.read_str(default=None)
250
- if mcp_dir:
251
- return Path(mcp_dir).expanduser().resolve()
252
- else:
253
- return None
254
-
255
-
256
245
  @cache
257
246
  def _get_local_server_log_path() -> Path:
258
247
  return resolve_and_create_dirs(get_system_logs_dir() / f"{LOCAL_SERVER_LOG_NAME}.log")
@@ -266,7 +255,6 @@ def _read_settings():
266
255
  system_config_dir=_get_system_config_dir(),
267
256
  system_logs_dir=get_system_logs_dir(),
268
257
  system_cache_dir=_get_system_cache_dir(),
269
- mcp_ws_dir=_get_mcp_ws_dir(),
270
258
  local_server_log_path=_get_local_server_log_path(),
271
259
  # These default to the global but can be overridden by workspace settings.
272
260
  media_cache_dir=_get_system_cache_dir() / MEDIA_CACHE_NAME,
kash/config/setup.py CHANGED
@@ -75,6 +75,21 @@ def kash_setup(
75
75
 
76
76
 
77
77
  def _lib_setup():
78
+ import logging
79
+
80
+ log = logging.getLogger(__name__)
81
+
82
+ # Trust store integration, for consistent TLS behavior.
83
+ try:
84
+ import truststore # type: ignore
85
+
86
+ truststore.inject_into_ssl()
87
+ log.info("truststore initialized: using system TLS trust store")
88
+ except Exception as exc:
89
+ # If not installed or fails, default TLS trust will be used.
90
+ log.warning("truststore not available at import time: %s", exc)
91
+
92
+ # Handle default YAML representers.
78
93
  from sidematter_format import register_default_yaml_representers
79
94
 
80
95
  register_default_yaml_representers()
@@ -262,7 +262,7 @@ PROMPT_ASSIST = "(assistant) ❯"
262
262
 
263
263
  EMOJI_HINT = "👉"
264
264
 
265
- EMOJI_MSG_INDENT = "⋮ "
265
+ EMOJI_MSG_INDENT = "⋮ "
266
266
 
267
267
  EMOJI_START = "[➤]"
268
268
 
@@ -0,0 +1,60 @@
1
+ from funlog import log_calls
2
+
3
+ from kash.config.logger import get_logger
4
+ from kash.utils.common.import_utils import warm_import_library
5
+
6
+ log = get_logger(__name__)
7
+
8
+
9
+ @log_calls(level="info", show_timing_only=True)
10
+ def warm_slow_imports(include_extras: bool = True):
11
+ """
12
+ Pre-import slow packages to avoid delays when they are first used.
13
+
14
+ Args:
15
+ include_extras: If True, warm import optional libraries like LLM packages,
16
+ scipy, torch, etc. Set to False for minimal/faster startup.
17
+ """
18
+ try:
19
+ # Loading actions also loads any kits that are discovered.
20
+ import kash.actions # noqa: F401
21
+ import kash.local_server # noqa: F401
22
+ import kash.local_server.local_server # noqa: F401
23
+ import kash.mcp.mcp_server_sse # noqa: F401
24
+
25
+ # Core libraries that should usually be present
26
+ for lib_name, max_depth in [("xonsh", 3), ("uvicorn", 3)]:
27
+ try:
28
+ warm_import_library(lib_name, max_depth=max_depth)
29
+ except Exception as e:
30
+ log.debug(f"Could not warm import {lib_name}: {e}")
31
+
32
+ if include_extras:
33
+ # Fully warm import larger libraries (only if they're installed)
34
+ # These are optional dependencies that may not be present
35
+ optional_libraries = [
36
+ ("pydantic", 5),
37
+ ("litellm", 5),
38
+ ("openai", 5),
39
+ ("torch", 3), # torch is huge, limit depth
40
+ ("scipy", 3), # scipy has test modules we want to skip
41
+ ("marker", 4),
42
+ ("pandas", 3),
43
+ ]
44
+
45
+ for lib_name, max_depth in optional_libraries:
46
+ try:
47
+ warm_import_library(lib_name, max_depth=max_depth)
48
+ except Exception as e:
49
+ log.debug(f"Could not warm import {lib_name}: {e}")
50
+
51
+ # Initialize litellm configuration if available
52
+ try:
53
+ from kash.llm_utils.init_litellm import init_litellm
54
+
55
+ init_litellm()
56
+ except ImportError:
57
+ pass # litellm not installed
58
+
59
+ except ImportError as e:
60
+ log.warning(f"Error pre-importing packages: {e}")
@@ -204,7 +204,7 @@ def kash_action(
204
204
  precondition: Precondition = Precondition.always,
205
205
  arg_type: ArgType = ArgType.Locator,
206
206
  expected_args: ArgCount = ONE_ARG,
207
- output_type: ItemType = ItemType.doc,
207
+ output_type: ItemType | None = None,
208
208
  output_format: Format | None = None,
209
209
  expected_outputs: ArgCount = ONE_ARG,
210
210
  params: ParamDeclarations = (),
@@ -349,7 +349,7 @@ def kash_action(
349
349
  fmt_lines(self.params),
350
350
  )
351
351
  log.info(
352
- "Action function param values:\n%s",
352
+ "Action function param values: %s",
353
353
  self.param_value_summary_str(),
354
354
  )
355
355
  else:
kash/exec/action_exec.py CHANGED
@@ -107,7 +107,7 @@ def log_action(action: Action, action_input: ActionInput, operation: Operation):
107
107
  log.message("%s Action: `%s`", EMOJI_START, action.name)
108
108
  log.info("Running: `%s`", operation.command_line(with_options=True))
109
109
  if len(action.param_value_summary()) > 0:
110
- log.message("Parameters:\n%s", action.param_value_summary_str())
110
+ log.message("Parameters: %s", action.param_value_summary_str())
111
111
  log.info("Operation is: %s", operation)
112
112
  log.info("Input items are:\n%s", fmt_lines(action_input.items))
113
113
 
@@ -144,15 +144,17 @@ def fetch_url_item_content(
144
144
  if save_content:
145
145
  assert page_data.saved_content
146
146
  assert page_data.format_info
147
+ if not page_data.format_info.format:
148
+ log.warning("No format detected for content, defaulting to HTML: %s", url)
147
149
  content_item = url_item.new_copy_with(
148
150
  external_path=str(page_data.saved_content),
149
151
  # Use the original filename, not the local cache filename (which has a hash suffix).
150
152
  original_filename=item.get_filename(),
151
- format=page_data.format_info.format,
153
+ format=page_data.format_info.format or Format.html,
152
154
  )
153
155
 
154
156
  if not url_item.title:
155
- log.warning("Failed to fetch page data: title is missing: %s", item.url)
157
+ log.info("Title is missing for url item: %s", item)
156
158
 
157
159
  # Now save the updated URL item and also the content item if we have one.
158
160
  ws.save(url_item, overwrite=overwrite)
@@ -29,6 +29,7 @@ def windowed_llm_transform(
29
29
  windowing: WindowSettings | None,
30
30
  diff_filter: DiffFilter | None = None,
31
31
  check_no_results: bool = True,
32
+ enable_web_search: bool = False,
32
33
  ) -> TextDoc:
33
34
  def doc_transform(input_doc: TextDoc) -> TextDoc:
34
35
  return TextDoc.from_text(
@@ -41,6 +42,7 @@ def windowed_llm_transform(
41
42
  input=input_doc.reassemble(),
42
43
  body_template=template,
43
44
  check_no_results=check_no_results,
45
+ enable_web_search=enable_web_search,
44
46
  ).content
45
47
  )
46
48
  )
@@ -67,6 +69,7 @@ def llm_transform_str(options: LLMOptions, input_str: str, check_no_results: boo
67
69
  input_str,
68
70
  options.windowing,
69
71
  diff_filter=options.diff_filter,
72
+ enable_web_search=options.enable_web_search,
70
73
  ).reassemble()
71
74
  else:
72
75
  log.info(
@@ -81,6 +84,7 @@ def llm_transform_str(options: LLMOptions, input_str: str, check_no_results: boo
81
84
  body_template=options.body_template,
82
85
  input=input_str,
83
86
  check_no_results=check_no_results,
87
+ enable_web_search=options.enable_web_search,
84
88
  ).content
85
89
 
86
90
  return result_str
@@ -485,6 +485,9 @@ class FileStore(Workspace):
485
485
  If `with_sidematter` is true, will copy any sidematter files (metadata/assets) to
486
486
  the destination.
487
487
  """
488
+ # TODO: Make sure importing a text item that already has
489
+ # frontmatter doesn't accidentally duplicate the frontmatter
490
+
488
491
  from kash.file_storage.item_file_format import read_item
489
492
  from kash.web_content.canon_url import canonicalize_url
490
493
 
@@ -531,6 +534,7 @@ class FileStore(Workspace):
531
534
  # This will read the file with or without frontmatter.
532
535
  # We are importing so we want to drop the external path so we save the body.
533
536
  item = read_item(path, self.base_dir)
537
+ log.info("Imported text item: %s", item)
534
538
  item.external_path = None
535
539
 
536
540
  if item.type and as_type and item.type != as_type:
@@ -2,7 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  import time
4
4
  from dataclasses import dataclass
5
- from typing import TYPE_CHECKING, cast
5
+ from typing import TYPE_CHECKING, Any, cast
6
6
 
7
7
  from flowmark import Wrap, fill_text
8
8
  from funlog import format_duration, log_calls
@@ -51,6 +51,7 @@ class LLMCompletionResult:
51
51
  message: LiteLLMMessage
52
52
  content: str
53
53
  citations: CitationList | None
54
+ tool_calls: list[dict[str, Any]] | None = None
54
55
 
55
56
  @property
56
57
  def content_with_citations(self) -> str:
@@ -59,40 +60,111 @@ class LLMCompletionResult:
59
60
  content = content + "\n\n" + self.citations.as_markdown_footnotes()
60
61
  return content
61
62
 
63
+ @property
64
+ def has_tool_calls(self) -> bool:
65
+ """Check if the response contains tool calls."""
66
+ return bool(self.tool_calls)
67
+
68
+ @property
69
+ def tool_call_names(self) -> list[str]:
70
+ """Get list of tool names that were called."""
71
+ if not self.tool_calls:
72
+ return []
73
+ names = []
74
+ for call in self.tool_calls:
75
+ # Handle both LiteLLM objects and dict representations
76
+ if hasattr(call, "function") and hasattr(getattr(call, "function", None), "name"):
77
+ # LiteLLM object format
78
+ names.append(f"{call.function.name}()") # pyright: ignore[reportAttributeAccessIssue]
79
+ elif isinstance(call, dict) and call.get("function", {}).get("name"):
80
+ # Dict format
81
+ names.append(f"{call['function']['name']}()")
82
+ else:
83
+ names.append(str(call))
84
+ return names
85
+
62
86
 
63
87
  @log_calls(level="info")
64
88
  def llm_completion(
65
89
  model: LLMName,
66
90
  messages: list[dict[str, str]],
67
91
  save_objects: bool = True,
68
- response_format: dict | type[BaseModel] | None = None,
92
+ response_format: dict[str, Any] | type[BaseModel] | None = None,
93
+ tools: list[dict[str, Any]] | None = None,
94
+ enable_web_search: bool = False,
69
95
  **kwargs,
70
96
  ) -> LLMCompletionResult:
71
97
  """
72
98
  Perform an LLM completion with LiteLLM.
99
+
100
+ Args:
101
+ model: The LLM model to use
102
+ messages: Chat messages
103
+ save_objects: Whether to save chat history
104
+ response_format: Response format specification
105
+ tools: List of tools available for function calling (e.g., web_search)
106
+ enable_web_search: If True, automatically add web search tools for the model
107
+ **kwargs: Additional LiteLLM parameters
73
108
  """
74
- import litellm
75
109
  from litellm.types.utils import Choices, ModelResponse
76
110
 
77
111
  init_litellm()
78
112
 
113
+ # Prepare completion parameters
114
+ completion_params = {
115
+ "model": model.litellm_name,
116
+ "messages": messages,
117
+ **kwargs,
118
+ }
119
+
120
+ # Auto-enable web search if requested
121
+ if enable_web_search:
122
+ import litellm
123
+
124
+ if litellm.supports_web_search(model=model.litellm_name):
125
+ log.message("Enabling web search for model %s", model.litellm_name)
126
+ completion_params["web_search_options"] = {"search_context_size": "medium"}
127
+ else:
128
+ log.warning("Web search requested but not supported by model %s", model.litellm_name)
129
+
79
130
  chat_history = ChatHistory.from_dicts(messages)
131
+
132
+ # Enhanced logging to detect tool use
133
+ tools_info = f", {len(tools)} tools" if tools else ", no tools"
80
134
  log.info(
81
- "Calling LLM completion from %s on %s, response_format=%s",
135
+ "Calling LLM completion from %s on %s, response_format=%s%s",
82
136
  model.litellm_name,
83
137
  chat_history.size_summary(),
84
138
  response_format,
139
+ tools_info,
85
140
  )
86
141
 
142
+ if tools:
143
+ tool_names = []
144
+ for tool in tools:
145
+ if tool.get("type") == "function":
146
+ tool_names.append(tool.get("function", {}).get("name", "unknown"))
147
+ elif tool.get("type") == "native_web_search":
148
+ tool_names.append("native_web_search")
149
+ else:
150
+ tool_names.append(tool.get("type", "unknown"))
151
+
152
+ log.message("Tools enabled: %s", tool_names)
153
+
87
154
  start_time = time.time()
155
+
156
+ if response_format:
157
+ completion_params["response_format"] = response_format
158
+
159
+ if tools:
160
+ completion_params["tools"] = tools
161
+ log.info("Enabling function calling with %d tools", len(tools))
162
+
163
+ import litellm
164
+
88
165
  llm_output = cast(
89
166
  ModelResponse,
90
- litellm.completion(
91
- model.litellm_name,
92
- messages=messages,
93
- response_format=response_format,
94
- **kwargs,
95
- ), # pyright: ignore
167
+ litellm.completion(**completion_params), # pyright: ignore
96
168
  )
97
169
  elapsed = time.time() - start_time
98
170
 
@@ -100,23 +172,47 @@ def llm_completion(
100
172
 
101
173
  message = choices.message
102
174
 
175
+ # Extract tool calls from the response
176
+ tool_calls = getattr(message, "tool_calls", None)
177
+ tool_calls_list = list(tool_calls) if tool_calls else None
178
+
103
179
  # Just sanity checking and logging.
104
180
  content = choices.message.content
105
181
  if not content or not isinstance(content, str):
106
182
  raise ApiResultError(f"LLM completion failed: {model.litellm_name}: {llm_output}")
107
183
 
184
+ # Create the result object with tool calls
185
+ citations = llm_output.get("citations", None)
186
+ result = LLMCompletionResult(
187
+ message=message,
188
+ content=content,
189
+ citations=CitationList(citations=citations) if citations else None,
190
+ tool_calls=tool_calls_list,
191
+ )
192
+
193
+ # Log tool calls if present
194
+ if result.has_tool_calls:
195
+ tool_count = len(result.tool_calls or [])
196
+ log.message("LLM executed %d function calls: %s", tool_count, result.tool_call_names)
197
+ log.message(
198
+ "⚠️ Function calls require implementation - LLM requested tools but no handlers are implemented"
199
+ )
200
+
201
+ # Performance logging
108
202
  total_input_len = sum(len(m["content"]) for m in messages)
109
203
  speed = len(content) / elapsed
204
+ tool_count = len(result.tool_calls or []) if result.has_tool_calls else 0
205
+ tool_info = f", {tool_count} tool calls" if result.has_tool_calls else ""
110
206
  log.info(
111
207
  f"{EMOJI_TIMING} LLM completion from {model.litellm_name} in {format_duration(elapsed)}: "
112
208
  f"input {total_input_len} chars in {len(messages)} messages, output {len(content)} chars "
113
- f"({speed:.0f} char/s)"
209
+ f"({speed:.0f} char/s){tool_info}"
114
210
  )
115
211
 
116
- citations = llm_output.get("citations", None)
117
-
118
212
  if save_objects:
119
213
  metadata = {"citations": citations} if citations else {}
214
+ if result.has_tool_calls:
215
+ metadata["tool_calls"] = len(result.tool_calls or [])
120
216
  chat_history.messages.append(
121
217
  ChatMessage(role=ChatRole.assistant, content=content, metadata=metadata)
122
218
  )
@@ -128,11 +224,7 @@ def llm_completion(
128
224
  file_ext="yml",
129
225
  )
130
226
 
131
- return LLMCompletionResult(
132
- message=message,
133
- content=content,
134
- citations=CitationList(citations=citations) if citations else None,
135
- )
227
+ return result
136
228
 
137
229
 
138
230
  def llm_template_completion(
@@ -143,7 +235,9 @@ def llm_template_completion(
143
235
  previous_messages: list[dict[str, str]] | None = None,
144
236
  save_objects: bool = True,
145
237
  check_no_results: bool = True,
146
- response_format: dict | type[BaseModel] | None = None,
238
+ response_format: dict[str, Any] | type[BaseModel] | None = None,
239
+ tools: list[dict[str, Any]] | None = None,
240
+ enable_web_search: bool = False,
147
241
  **kwargs,
148
242
  ) -> LLMCompletionResult:
149
243
  """
@@ -169,6 +263,8 @@ def llm_template_completion(
169
263
  ],
170
264
  save_objects=save_objects,
171
265
  response_format=response_format,
266
+ tools=tools,
267
+ enable_web_search=enable_web_search,
172
268
  **kwargs,
173
269
  )
174
270
 
kash/llm_utils/llms.py CHANGED
@@ -28,22 +28,23 @@ class LLM(LLMName, Enum):
28
28
  gpt_4_1 = LLMName("gpt-4.1")
29
29
  gpt_4o = LLMName("gpt-4o")
30
30
  gpt_4o_mini = LLMName("gpt-4o-mini")
31
+ gpt_4o_search_preview = LLMName("gpt-4o-search-preview")
31
32
  gpt_4 = LLMName("gpt-4")
32
33
  gpt_4_1_mini = LLMName("gpt-4.1-mini")
33
34
  gpt_4_1_nano = LLMName("gpt-4.1-nano")
34
35
 
35
- # https://docs.anthropic.com/en/docs/about-claude/models/all-models
36
+ # https://docs.claude.com/en/docs/about-claude/models
36
37
 
37
- claude_4_1_opus = LLMName("claude-opus-4-1")
38
- claude_4_opus = LLMName("claude-opus-4-20250514")
39
- claude_4_sonnet = LLMName("claude-sonnet-4-20250514")
40
- claude_3_7_sonnet = LLMName("claude-3-7-sonnet-latest")
41
- claude_3_5_haiku = LLMName("claude-3-5-haiku-latest")
38
+ claude_sonnet_4_5 = LLMName("claude-sonnet-4-5-20250929")
39
+ claude_haiku_4_5 = LLMName("claude-haiku-4-5-20251001")
40
+ claude_opus_4_1 = LLMName("claude-opus-4-1-20250805")
41
+ claude_sonnet_4 = LLMName("claude-sonnet-4-20250514")
42
+ claude_opus_4 = LLMName("claude-opus-4-20250514")
42
43
 
43
44
  # https://ai.google.dev/gemini-api/docs/models
44
45
  gemini_2_5_pro = LLMName("gemini/gemini-2.5-pro")
45
46
  gemini_2_5_flash = LLMName("gemini/gemini-2.5-flash")
46
- gemini_2_5_flash_lite = LLMName("gemini-2.5-flash-lite-preview-06-17")
47
+ gemini_2_5_flash_lite = LLMName("gemini/gemini-2.5-flash-lite")
47
48
 
48
49
  # https://docs.x.ai/docs/models
49
50
  xai_grok_3 = LLMName("xai/grok-3")
kash/mcp/mcp_cli.py CHANGED
@@ -11,8 +11,14 @@ from pathlib import Path
11
11
 
12
12
  from clideps.utils.readable_argparse import ReadableColorFormatter
13
13
 
14
- from kash.config.settings import DEFAULT_MCP_SERVER_PORT, LogLevel, global_settings
14
+ from kash.config.settings import (
15
+ DEFAULT_MCP_SERVER_PORT,
16
+ LogLevel,
17
+ atomic_global_settings,
18
+ global_settings,
19
+ )
15
20
  from kash.config.setup import kash_setup
21
+ from kash.config.warm_slow_imports import warm_slow_imports
16
22
  from kash.shell.version import get_version
17
23
 
18
24
  __version__ = get_version()
@@ -26,8 +32,6 @@ log = logging.getLogger()
26
32
 
27
33
 
28
34
  def build_parser():
29
- from kash.workspaces.workspaces import global_ws_dir
30
-
31
35
  parser = argparse.ArgumentParser(description=__doc__, formatter_class=ReadableColorFormatter)
32
36
  parser.add_argument(
33
37
  "--version",
@@ -36,8 +40,8 @@ def build_parser():
36
40
  )
37
41
  parser.add_argument(
38
42
  "--workspace",
39
- default=global_ws_dir(),
40
- help=f"Set workspace directory. Defaults to kash global workspace directory: {global_ws_dir()}",
43
+ default=global_settings().global_ws_dir,
44
+ help=f"Set workspace directory. Defaults to kash global workspace directory: {global_settings().global_ws_dir}",
41
45
  )
42
46
  parser.add_argument(
43
47
  "--proxy",
@@ -95,6 +99,14 @@ def run_server(args: argparse.Namespace):
95
99
  log.warning("kash MCP CLI started, logging to: %s", MCP_CLI_LOG_PATH)
96
100
  log.warning("Current working directory: %s", Path(".").resolve())
97
101
 
102
+ # Eagerly import so the server is warmed up.
103
+ # This is important to save init time on fresh sandboxes like E2B!
104
+ warm_slow_imports(include_extras=True)
105
+
106
+ if args.workspace and args.workspace != global_settings().global_ws_dir:
107
+ with atomic_global_settings().updates() as settings:
108
+ settings.global_ws_dir = Path(args.workspace).absolute()
109
+
98
110
  ws: Workspace = get_ws(name_or_path=Path(args.workspace), auto_init=True)
99
111
  os.chdir(ws.base_dir)
100
112
  log.warning("Running in workspace: %s", ws.base_dir)
@@ -3,7 +3,9 @@ from __future__ import annotations
3
3
  import asyncio
4
4
  import pprint
5
5
  from dataclasses import dataclass
6
+ from pathlib import Path
6
7
 
8
+ from clideps.env_vars.dotenv_utils import load_dotenv_paths
7
9
  from funlog import log_calls
8
10
  from mcp.server.lowlevel import Server
9
11
  from mcp.server.lowlevel.server import StructuredContent, UnstructuredContent
@@ -237,10 +239,10 @@ def run_mcp_tool(
237
239
  """
238
240
  try:
239
241
  with captured_output() as capture:
240
- # XXX For now, unless the user has overridden the MCP workspace, we use the
241
- # current workspace, which could be changed by the user by changing working
242
- # directories. Maybe confusing?
243
- explicit_mcp_ws = global_settings().mcp_ws_dir
242
+ dotenv_paths = load_dotenv_paths(True, True, Path("."))
243
+ log.warning("Loaded .env files: %s", dotenv_paths)
244
+ # Use the global workspace default
245
+ explicit_mcp_ws = global_settings().global_ws_dir
244
246
 
245
247
  with kash_runtime(
246
248
  workspace_dir=explicit_mcp_ws,
@@ -171,6 +171,7 @@ class LLMOptions:
171
171
  body_template: MessageTemplate = MessageTemplate("{body}")
172
172
  windowing: WindowSettings = WINDOW_NONE
173
173
  diff_filter: DiffFilter | None = None
174
+ enable_web_search: bool = False
174
175
 
175
176
  def updated_with(self, param_name: str, value: Any) -> LLMOptions:
176
177
  """Update option from an action parameter."""
@@ -244,11 +245,12 @@ class Action(ABC):
244
245
  be ONE_ARG.
245
246
  """
246
247
 
247
- output_type: ItemType = ItemType.doc
248
+ output_type: ItemType | None = None
248
249
  """
249
250
  The type of the output item(s). If an action returns multiple output types,
250
251
  this will be the output type of the first output.
251
252
  This is mainly used for preassembly for the cache check if an output already exists.
253
+ None means to use the input type.
252
254
  """
253
255
 
254
256
  output_format: Format | None = None
@@ -451,7 +453,7 @@ class Action(ABC):
451
453
  return changed_params
452
454
 
453
455
  def param_value_summary_str(self) -> str:
454
- return fmt_lines(
456
+ return ", ".join(
455
457
  [format_key_value(name, value) for name, value in self.param_value_summary().items()]
456
458
  )
457
459
 
@@ -542,8 +544,8 @@ class Action(ABC):
542
544
  def preassemble_result(self, context: ActionContext) -> ActionResult | None:
543
545
  """
544
546
  Actions can have a separate preliminary step to pre-assemble outputs. This allows
545
- us to determine the title and types for the output items and check if they were
546
- already generated before running slow or expensive actions.
547
+ us to determine thew expected shape of the expected output and check if it already
548
+ exists.
547
549
 
548
550
  For now, this only applies to actions with a single output, when `self.cacheable`
549
551
  is True.
@@ -560,7 +562,17 @@ class Action(ABC):
560
562
  # Using first input to determine the output title.
561
563
  primary_input = context.action_input.items[0]
562
564
  # In this case we only expect one output, of the type specified by the action.
563
- primary_output = primary_input.derived_copy(context, 0, type=context.action.output_type)
565
+ output_type = context.action.output_type or primary_input.type
566
+ if not output_type:
567
+ log.warning(
568
+ "No output type specified for action `%s`, using `doc` for preassembly",
569
+ self.name,
570
+ )
571
+ output_type = ItemType.doc
572
+ output_format = context.action.output_format or primary_input.format
573
+ primary_output = primary_input.derived_copy(
574
+ context, 0, type=output_type, format=output_format
575
+ )
564
576
  log.info("Preassembled output: source %s, %s", primary_output.source, primary_output)
565
577
  return ActionResult([primary_output])
566
578
  else: