amd-gaia 0.15.1__py3-none-any.whl → 0.15.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. {amd_gaia-0.15.1.dist-info → amd_gaia-0.15.3.dist-info}/METADATA +2 -2
  2. {amd_gaia-0.15.1.dist-info → amd_gaia-0.15.3.dist-info}/RECORD +38 -32
  3. {amd_gaia-0.15.1.dist-info → amd_gaia-0.15.3.dist-info}/WHEEL +1 -1
  4. gaia/agents/base/agent.py +317 -113
  5. gaia/agents/base/api_agent.py +0 -1
  6. gaia/agents/base/console.py +334 -9
  7. gaia/agents/base/tools.py +7 -2
  8. gaia/agents/blender/__init__.py +7 -0
  9. gaia/agents/blender/agent.py +7 -10
  10. gaia/agents/blender/core/view.py +2 -2
  11. gaia/agents/chat/agent.py +22 -48
  12. gaia/agents/chat/app.py +7 -0
  13. gaia/agents/chat/tools/rag_tools.py +23 -8
  14. gaia/agents/chat/tools/shell_tools.py +1 -0
  15. gaia/agents/code/prompts/code_patterns.py +2 -4
  16. gaia/agents/docker/agent.py +1 -0
  17. gaia/agents/emr/agent.py +3 -5
  18. gaia/agents/emr/cli.py +1 -1
  19. gaia/agents/emr/dashboard/server.py +2 -4
  20. gaia/agents/tools/__init__.py +11 -0
  21. gaia/agents/tools/file_tools.py +715 -0
  22. gaia/apps/llm/app.py +14 -3
  23. gaia/chat/app.py +2 -4
  24. gaia/cli.py +751 -333
  25. gaia/installer/__init__.py +23 -0
  26. gaia/installer/init_command.py +1605 -0
  27. gaia/installer/lemonade_installer.py +678 -0
  28. gaia/llm/__init__.py +2 -1
  29. gaia/llm/lemonade_client.py +427 -99
  30. gaia/llm/lemonade_manager.py +55 -11
  31. gaia/llm/providers/lemonade.py +21 -14
  32. gaia/rag/sdk.py +1 -1
  33. gaia/security.py +24 -4
  34. gaia/talk/app.py +2 -4
  35. gaia/version.py +2 -2
  36. {amd_gaia-0.15.1.dist-info → amd_gaia-0.15.3.dist-info}/entry_points.txt +0 -0
  37. {amd_gaia-0.15.1.dist-info → amd_gaia-0.15.3.dist-info}/licenses/LICENSE.md +0 -0
  38. {amd_gaia-0.15.1.dist-info → amd_gaia-0.15.3.dist-info}/top_level.txt +0 -0
@@ -203,16 +203,45 @@ class LemonadeManager:
203
203
  )
204
204
  return True
205
205
  else:
206
- # Context size insufficient - warn and continue
207
- cls._log.warning(
208
- f"Lemonade running with {cls._context_size} tokens, "
209
- f"but {min_context_size} requested. "
210
- f"Restart with: lemonade-server serve --ctx-size {min_context_size}"
211
- )
212
- if not quiet:
213
- cls.print_context_message(
214
- cls._context_size, min_context_size, MessageType.WARNING
206
+ # Context size may be cached from before models were loaded
207
+ # Re-check current status to see if models are loaded now
208
+ try:
209
+ client = LemonadeClient(
210
+ host=host,
211
+ port=port,
212
+ keep_alive=True,
213
+ verbose=not quiet,
215
214
  )
215
+ status = client.get_status()
216
+ # Update cached context size
217
+ cls._context_size = status.context_size or 0
218
+
219
+ # Only warn if LLM models are loaded AND context is insufficient
220
+ # SD models don't have context size, only LLM models do
221
+ llm_models_loaded = any(
222
+ "image" not in model.get("labels", [])
223
+ for model in status.loaded_models
224
+ )
225
+
226
+ # Only warn if context_size is non-zero (0 means no model loaded or still loading)
227
+ if (
228
+ cls._context_size > 0
229
+ and cls._context_size < min_context_size
230
+ and llm_models_loaded
231
+ ):
232
+ cls._log.warning(
233
+ f"Lemonade running with {cls._context_size} tokens, "
234
+ f"but {min_context_size} requested. "
235
+ f"Restart with: lemonade-server serve --ctx-size {min_context_size}"
236
+ )
237
+ if not quiet:
238
+ cls.print_context_message(
239
+ cls._context_size,
240
+ min_context_size,
241
+ MessageType.WARNING,
242
+ )
243
+ except Exception as e:
244
+ cls._log.debug(f"Failed to re-check status: {e}")
216
245
  return True
217
246
 
218
247
  cls._log.debug(f"Initializing Lemonade (min context: {min_context_size})")
@@ -246,8 +275,23 @@ class LemonadeManager:
246
275
  f"(context: {cls._context_size} tokens)"
247
276
  )
248
277
 
249
- # Verify context size - warn if insufficient
250
- if cls._context_size < min_context_size:
278
+ # Verify context size - only warn if insufficient AND LLM models are loaded
279
+ # SD models don't have context size, only LLM models do
280
+ # Check if any loaded models are LLMs (not SD models with "image" label)
281
+ llm_models_loaded = any(
282
+ "image" not in model.get("labels", [])
283
+ for model in status.loaded_models
284
+ )
285
+
286
+ # Only warn if:
287
+ # 1. Context size is non-zero (0 means no model loaded or model still loading)
288
+ # 2. Context size is less than required
289
+ # 3. LLM models are loaded (SD models don't have context size)
290
+ if (
291
+ cls._context_size > 0
292
+ and cls._context_size < min_context_size
293
+ and llm_models_loaded
294
+ ):
251
295
  cls._log.warning(
252
296
  f"Context size {cls._context_size} is less than "
253
297
  f"requested {min_context_size}. Some features may not work correctly."
@@ -47,18 +47,13 @@ class LemonadeProvider(LLMClient):
47
47
  stream: bool = False,
48
48
  **kwargs,
49
49
  ) -> Union[str, Iterator[str]]:
50
- # Use provided model, instance model, or default CPU model
51
- effective_model = model or self._model or DEFAULT_MODEL_NAME
52
-
53
- # Default to low temperature for deterministic responses (matches old LLMClient behavior)
54
- kwargs.setdefault("temperature", 0.1)
55
-
56
- response = self._backend.completions(
57
- model=effective_model, prompt=prompt, stream=stream, **kwargs
50
+ # Use chat endpoint (completions endpoint not available in Lemonade v9.1+)
51
+ return self.chat(
52
+ [{"role": "user", "content": prompt}],
53
+ model=model,
54
+ stream=stream,
55
+ **kwargs,
58
56
  )
59
- if stream:
60
- return self._handle_stream(response)
61
- return self._extract_text(response)
62
57
 
63
58
  def chat(
64
59
  self,
@@ -84,6 +79,15 @@ class LemonadeProvider(LLMClient):
84
79
  )
85
80
  if stream:
86
81
  return self._handle_stream(response)
82
+
83
+ # Handle error responses gracefully
84
+ if not isinstance(response, dict) or "choices" not in response:
85
+ error_msg = f"Unexpected response format from Lemonade Server: {response}"
86
+ raise ValueError(error_msg)
87
+
88
+ if not response["choices"] or len(response["choices"]) == 0:
89
+ raise ValueError("Empty choices in response from Lemonade Server")
90
+
87
91
  return response["choices"][0]["message"]["content"]
88
92
 
89
93
  def embed(self, texts: list[str], **kwargs) -> list[list[float]]:
@@ -114,7 +118,10 @@ class LemonadeProvider(LLMClient):
114
118
  for chunk in response:
115
119
  if "choices" in chunk and chunk["choices"]:
116
120
  delta = chunk["choices"][0].get("delta", {})
117
- if "content" in delta:
118
- yield delta["content"]
121
+ content = delta.get("content")
122
+ if content:
123
+ yield content
119
124
  elif "text" in chunk["choices"][0]:
120
- yield chunk["choices"][0]["text"]
125
+ text = chunk["choices"][0]["text"]
126
+ if text:
127
+ yield text
gaia/rag/sdk.py CHANGED
@@ -432,7 +432,7 @@ class RAGSDK:
432
432
  vlm = None
433
433
  vlm_available = False
434
434
  try:
435
- from gaia.llm.vlm_client import VLMClient
435
+ from gaia.llm import VLMClient
436
436
  from gaia.rag.pdf_utils import (
437
437
  count_images_in_page,
438
438
  extract_images_from_page_pymupdf,
gaia/security.py CHANGED
@@ -109,14 +109,34 @@ class PathValidator:
109
109
  # Resolve path using os.path.realpath to follow symlinks
110
110
  # This prevents TOCTOU attacks by resolving at check time
111
111
  real_path = Path(os.path.realpath(path)).resolve()
112
+ real_path_str = str(real_path)
113
+
114
+ # macOS /var symlink handling: normalize by removing /private prefix
115
+ def normalize_macos(p: str) -> str:
116
+ if p.startswith("/private/"):
117
+ return p[len("/private") :]
118
+ return p
119
+
120
+ norm_real_path = normalize_macos(real_path_str)
112
121
 
113
122
  # Check if real path is within any allowed directory
114
- for allowed_path in self.allowed_paths:
123
+ for allowed_path in list(self.allowed_paths):
115
124
  try:
116
- # is_relative_to requires Python 3.9+, use alternative for compatibility
117
- real_path.relative_to(allowed_path)
125
+ # Ensure allowed_path is also resolved to handle symlinks correctly
126
+ # IMPORTANT: Use str(allowed_path) as allowed_path might already be a Path object
127
+ allowed_path_str_raw = str(allowed_path)
128
+ res_allowed = Path(os.path.realpath(allowed_path_str_raw)).resolve()
129
+ allowed_path_str = str(res_allowed)
130
+ norm_allowed_path = normalize_macos(allowed_path_str)
131
+
132
+ # Robust check using string prefix on normalized paths
133
+ if norm_real_path.startswith(norm_allowed_path):
134
+ return True
135
+
136
+ # Fallback to relative_to for safety
137
+ real_path.relative_to(res_allowed)
118
138
  return True
119
- except ValueError:
139
+ except (ValueError, RuntimeError):
120
140
  continue
121
141
 
122
142
  # If we get here, path is not allowed. Prompt user?
gaia/talk/app.py CHANGED
@@ -126,8 +126,7 @@ def print_integration_examples():
126
126
  print("INTEGRATION EXAMPLES")
127
127
  print("=" * 60)
128
128
 
129
- print(
130
- """
129
+ print("""
131
130
  Basic Integration:
132
131
  ```python
133
132
  from gaia.talk.sdk import TalkSDK, TalkConfig
@@ -174,8 +173,7 @@ from gaia.talk.sdk import quick_chat
174
173
  response = await quick_chat("Hello!")
175
174
  print(response)
176
175
  ```
177
- """
178
- )
176
+ """)
179
177
 
180
178
 
181
179
  async def main():
gaia/version.py CHANGED
@@ -6,10 +6,10 @@ import os
6
6
  import subprocess
7
7
  from importlib.metadata import version as get_package_version_metadata
8
8
 
9
- __version__ = "0.15.1"
9
+ __version__ = "0.15.3"
10
10
 
11
11
  # Lemonade version used across CI and installer
12
- LEMONADE_VERSION = "9.1.0"
12
+ LEMONADE_VERSION = "9.2.0"
13
13
 
14
14
 
15
15
  def get_package_version() -> str: