amd-gaia 0.14.3__py3-none-any.whl → 0.15.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (181) hide show
  1. {amd_gaia-0.14.3.dist-info → amd_gaia-0.15.1.dist-info}/METADATA +223 -223
  2. amd_gaia-0.15.1.dist-info/RECORD +178 -0
  3. {amd_gaia-0.14.3.dist-info → amd_gaia-0.15.1.dist-info}/entry_points.txt +1 -0
  4. {amd_gaia-0.14.3.dist-info → amd_gaia-0.15.1.dist-info}/licenses/LICENSE.md +20 -20
  5. gaia/__init__.py +29 -29
  6. gaia/agents/__init__.py +19 -19
  7. gaia/agents/base/__init__.py +9 -9
  8. gaia/agents/base/agent.py +2177 -2177
  9. gaia/agents/base/api_agent.py +120 -120
  10. gaia/agents/base/console.py +1841 -1841
  11. gaia/agents/base/errors.py +237 -237
  12. gaia/agents/base/mcp_agent.py +86 -86
  13. gaia/agents/base/tools.py +83 -83
  14. gaia/agents/blender/agent.py +556 -556
  15. gaia/agents/blender/agent_simple.py +133 -135
  16. gaia/agents/blender/app.py +211 -211
  17. gaia/agents/blender/app_simple.py +41 -41
  18. gaia/agents/blender/core/__init__.py +16 -16
  19. gaia/agents/blender/core/materials.py +506 -506
  20. gaia/agents/blender/core/objects.py +316 -316
  21. gaia/agents/blender/core/rendering.py +225 -225
  22. gaia/agents/blender/core/scene.py +220 -220
  23. gaia/agents/blender/core/view.py +146 -146
  24. gaia/agents/chat/__init__.py +9 -9
  25. gaia/agents/chat/agent.py +835 -835
  26. gaia/agents/chat/app.py +1058 -1058
  27. gaia/agents/chat/session.py +508 -508
  28. gaia/agents/chat/tools/__init__.py +15 -15
  29. gaia/agents/chat/tools/file_tools.py +96 -96
  30. gaia/agents/chat/tools/rag_tools.py +1729 -1729
  31. gaia/agents/chat/tools/shell_tools.py +436 -436
  32. gaia/agents/code/__init__.py +7 -7
  33. gaia/agents/code/agent.py +549 -549
  34. gaia/agents/code/cli.py +377 -0
  35. gaia/agents/code/models.py +135 -135
  36. gaia/agents/code/orchestration/__init__.py +24 -24
  37. gaia/agents/code/orchestration/checklist_executor.py +1763 -1763
  38. gaia/agents/code/orchestration/checklist_generator.py +713 -713
  39. gaia/agents/code/orchestration/factories/__init__.py +9 -9
  40. gaia/agents/code/orchestration/factories/base.py +63 -63
  41. gaia/agents/code/orchestration/factories/nextjs_factory.py +118 -118
  42. gaia/agents/code/orchestration/factories/python_factory.py +106 -106
  43. gaia/agents/code/orchestration/orchestrator.py +841 -841
  44. gaia/agents/code/orchestration/project_analyzer.py +391 -391
  45. gaia/agents/code/orchestration/steps/__init__.py +67 -67
  46. gaia/agents/code/orchestration/steps/base.py +188 -188
  47. gaia/agents/code/orchestration/steps/error_handler.py +314 -314
  48. gaia/agents/code/orchestration/steps/nextjs.py +828 -828
  49. gaia/agents/code/orchestration/steps/python.py +307 -307
  50. gaia/agents/code/orchestration/template_catalog.py +469 -469
  51. gaia/agents/code/orchestration/workflows/__init__.py +14 -14
  52. gaia/agents/code/orchestration/workflows/base.py +80 -80
  53. gaia/agents/code/orchestration/workflows/nextjs.py +186 -186
  54. gaia/agents/code/orchestration/workflows/python.py +94 -94
  55. gaia/agents/code/prompts/__init__.py +11 -11
  56. gaia/agents/code/prompts/base_prompt.py +77 -77
  57. gaia/agents/code/prompts/code_patterns.py +2036 -2036
  58. gaia/agents/code/prompts/nextjs_prompt.py +40 -40
  59. gaia/agents/code/prompts/python_prompt.py +109 -109
  60. gaia/agents/code/schema_inference.py +365 -365
  61. gaia/agents/code/system_prompt.py +41 -41
  62. gaia/agents/code/tools/__init__.py +42 -42
  63. gaia/agents/code/tools/cli_tools.py +1138 -1138
  64. gaia/agents/code/tools/code_formatting.py +319 -319
  65. gaia/agents/code/tools/code_tools.py +769 -769
  66. gaia/agents/code/tools/error_fixing.py +1347 -1347
  67. gaia/agents/code/tools/external_tools.py +180 -180
  68. gaia/agents/code/tools/file_io.py +845 -845
  69. gaia/agents/code/tools/prisma_tools.py +190 -190
  70. gaia/agents/code/tools/project_management.py +1016 -1016
  71. gaia/agents/code/tools/testing.py +321 -321
  72. gaia/agents/code/tools/typescript_tools.py +122 -122
  73. gaia/agents/code/tools/validation_parsing.py +461 -461
  74. gaia/agents/code/tools/validation_tools.py +806 -806
  75. gaia/agents/code/tools/web_dev_tools.py +1758 -1758
  76. gaia/agents/code/validators/__init__.py +16 -16
  77. gaia/agents/code/validators/antipattern_checker.py +241 -241
  78. gaia/agents/code/validators/ast_analyzer.py +197 -197
  79. gaia/agents/code/validators/requirements_validator.py +145 -145
  80. gaia/agents/code/validators/syntax_validator.py +171 -171
  81. gaia/agents/docker/__init__.py +7 -7
  82. gaia/agents/docker/agent.py +642 -642
  83. gaia/agents/emr/__init__.py +8 -8
  84. gaia/agents/emr/agent.py +1506 -1506
  85. gaia/agents/emr/cli.py +1322 -1322
  86. gaia/agents/emr/constants.py +475 -475
  87. gaia/agents/emr/dashboard/__init__.py +4 -4
  88. gaia/agents/emr/dashboard/server.py +1974 -1974
  89. gaia/agents/jira/__init__.py +11 -11
  90. gaia/agents/jira/agent.py +894 -894
  91. gaia/agents/jira/jql_templates.py +299 -299
  92. gaia/agents/routing/__init__.py +7 -7
  93. gaia/agents/routing/agent.py +567 -570
  94. gaia/agents/routing/system_prompt.py +75 -75
  95. gaia/agents/summarize/__init__.py +11 -0
  96. gaia/agents/summarize/agent.py +885 -0
  97. gaia/agents/summarize/prompts.py +129 -0
  98. gaia/api/__init__.py +23 -23
  99. gaia/api/agent_registry.py +238 -238
  100. gaia/api/app.py +305 -305
  101. gaia/api/openai_server.py +575 -575
  102. gaia/api/schemas.py +186 -186
  103. gaia/api/sse_handler.py +373 -373
  104. gaia/apps/__init__.py +4 -4
  105. gaia/apps/llm/__init__.py +6 -6
  106. gaia/apps/llm/app.py +173 -169
  107. gaia/apps/summarize/app.py +116 -633
  108. gaia/apps/summarize/html_viewer.py +133 -133
  109. gaia/apps/summarize/pdf_formatter.py +284 -284
  110. gaia/audio/__init__.py +2 -2
  111. gaia/audio/audio_client.py +439 -439
  112. gaia/audio/audio_recorder.py +269 -269
  113. gaia/audio/kokoro_tts.py +599 -599
  114. gaia/audio/whisper_asr.py +432 -432
  115. gaia/chat/__init__.py +16 -16
  116. gaia/chat/app.py +430 -430
  117. gaia/chat/prompts.py +522 -522
  118. gaia/chat/sdk.py +1228 -1225
  119. gaia/cli.py +5481 -5621
  120. gaia/database/__init__.py +10 -10
  121. gaia/database/agent.py +176 -176
  122. gaia/database/mixin.py +290 -290
  123. gaia/database/testing.py +64 -64
  124. gaia/eval/batch_experiment.py +2332 -2332
  125. gaia/eval/claude.py +542 -542
  126. gaia/eval/config.py +37 -37
  127. gaia/eval/email_generator.py +512 -512
  128. gaia/eval/eval.py +3179 -3179
  129. gaia/eval/groundtruth.py +1130 -1130
  130. gaia/eval/transcript_generator.py +582 -582
  131. gaia/eval/webapp/README.md +167 -167
  132. gaia/eval/webapp/package-lock.json +875 -875
  133. gaia/eval/webapp/package.json +20 -20
  134. gaia/eval/webapp/public/app.js +3402 -3402
  135. gaia/eval/webapp/public/index.html +87 -87
  136. gaia/eval/webapp/public/styles.css +3661 -3661
  137. gaia/eval/webapp/server.js +415 -415
  138. gaia/eval/webapp/test-setup.js +72 -72
  139. gaia/llm/__init__.py +9 -2
  140. gaia/llm/base_client.py +60 -0
  141. gaia/llm/exceptions.py +12 -0
  142. gaia/llm/factory.py +70 -0
  143. gaia/llm/lemonade_client.py +3236 -3221
  144. gaia/llm/lemonade_manager.py +294 -294
  145. gaia/llm/providers/__init__.py +9 -0
  146. gaia/llm/providers/claude.py +108 -0
  147. gaia/llm/providers/lemonade.py +120 -0
  148. gaia/llm/providers/openai_provider.py +79 -0
  149. gaia/llm/vlm_client.py +382 -382
  150. gaia/logger.py +189 -189
  151. gaia/mcp/agent_mcp_server.py +245 -245
  152. gaia/mcp/blender_mcp_client.py +138 -138
  153. gaia/mcp/blender_mcp_server.py +648 -648
  154. gaia/mcp/context7_cache.py +332 -332
  155. gaia/mcp/external_services.py +518 -518
  156. gaia/mcp/mcp_bridge.py +811 -550
  157. gaia/mcp/servers/__init__.py +6 -6
  158. gaia/mcp/servers/docker_mcp.py +83 -83
  159. gaia/perf_analysis.py +361 -0
  160. gaia/rag/__init__.py +10 -10
  161. gaia/rag/app.py +293 -293
  162. gaia/rag/demo.py +304 -304
  163. gaia/rag/pdf_utils.py +235 -235
  164. gaia/rag/sdk.py +2194 -2194
  165. gaia/security.py +163 -163
  166. gaia/talk/app.py +289 -289
  167. gaia/talk/sdk.py +538 -538
  168. gaia/testing/__init__.py +87 -87
  169. gaia/testing/assertions.py +330 -330
  170. gaia/testing/fixtures.py +333 -333
  171. gaia/testing/mocks.py +493 -493
  172. gaia/util.py +46 -46
  173. gaia/utils/__init__.py +33 -33
  174. gaia/utils/file_watcher.py +675 -675
  175. gaia/utils/parsing.py +223 -223
  176. gaia/version.py +100 -100
  177. amd_gaia-0.14.3.dist-info/RECORD +0 -168
  178. gaia/agents/code/app.py +0 -266
  179. gaia/llm/llm_client.py +0 -729
  180. {amd_gaia-0.14.3.dist-info → amd_gaia-0.15.1.dist-info}/WHEEL +0 -0
  181. {amd_gaia-0.14.3.dist-info → amd_gaia-0.15.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,108 @@
1
+ # Copyright(C) 2025-2026 Advanced Micro Devices, Inc. All rights reserved.
2
+ # SPDX-License-Identifier: MIT
3
+ """Claude provider - no embeddings support."""
4
+
5
+ from typing import Iterator, Optional, Union
6
+
7
+ try:
8
+ import anthropic
9
+ except ImportError:
10
+ anthropic = None # type: ignore
11
+
12
+ from ..base_client import LLMClient
13
+
14
+
15
+ class ClaudeProvider(LLMClient):
16
+ """Claude (Anthropic) provider."""
17
+
18
+ def __init__(
19
+ self,
20
+ api_key: Optional[str] = None,
21
+ model: str = "claude-3-5-sonnet-20241022",
22
+ system_prompt: Optional[str] = None,
23
+ **_kwargs,
24
+ ):
25
+ if anthropic is None:
26
+ raise ImportError(
27
+ "anthropic package is required for ClaudeProvider. "
28
+ "Install it with: pip install anthropic"
29
+ )
30
+
31
+ self._client = anthropic.Anthropic(api_key=api_key)
32
+ self._model = model
33
+ self._system_prompt = system_prompt
34
+
35
+ @property
36
+ def provider_name(self) -> str:
37
+ return "Claude"
38
+
39
+ def generate(
40
+ self,
41
+ prompt: str,
42
+ model: str | None = None,
43
+ stream: bool = False,
44
+ **kwargs,
45
+ ) -> Union[str, Iterator[str]]:
46
+ return self.chat(
47
+ [{"role": "user", "content": prompt}],
48
+ model=model,
49
+ stream=stream,
50
+ **kwargs,
51
+ )
52
+
53
+ def chat(
54
+ self,
55
+ messages: list[dict],
56
+ model: str | None = None,
57
+ stream: bool = False,
58
+ **kwargs,
59
+ ) -> Union[str, Iterator[str]]:
60
+ # Build parameters for Anthropic messages.create
61
+ params = {
62
+ "model": model or self._model,
63
+ "messages": messages,
64
+ "stream": stream,
65
+ **kwargs,
66
+ }
67
+ # Claude API requires system prompt as separate parameter, not in messages
68
+ if self._system_prompt:
69
+ params["system"] = self._system_prompt
70
+
71
+ response = self._client.messages.create(**params)
72
+ if stream:
73
+ return self._handle_stream(response)
74
+ return response.content[0].text
75
+
76
+ # embed() inherited from ABC - raises NotSupportedError
77
+
78
+ def vision(self, images: list[bytes], prompt: str, **kwargs) -> str:
79
+ import base64
80
+
81
+ # Claude supports vision via messages
82
+ image_b64 = base64.b64encode(images[0]).decode()
83
+ messages = [
84
+ {
85
+ "role": "user",
86
+ "content": [
87
+ {
88
+ "type": "image",
89
+ "source": {
90
+ "type": "base64",
91
+ "media_type": "image/jpeg",
92
+ "data": image_b64,
93
+ },
94
+ },
95
+ {"type": "text", "text": prompt},
96
+ ],
97
+ }
98
+ ]
99
+ return self.chat(messages, **kwargs)
100
+
101
+ # get_performance_stats() inherited from ABC - raises NotSupportedError
102
+ # load_model() inherited from ABC - raises NotSupportedError
103
+ # unload_model() inherited from ABC - raises NotSupportedError
104
+
105
+ def _handle_stream(self, response) -> Iterator[str]:
106
+ for chunk in response:
107
+ if hasattr(chunk, "delta") and hasattr(chunk.delta, "text"):
108
+ yield chunk.delta.text
@@ -0,0 +1,120 @@
1
+ # Copyright(C) 2025-2026 Advanced Micro Devices, Inc. All rights reserved.
2
+ # SPDX-License-Identifier: MIT
3
+ """Lemonade provider - supports ALL methods."""
4
+
5
+ from typing import Iterator, Optional, Union
6
+
7
+ from ..base_client import LLMClient
8
+ from ..lemonade_client import DEFAULT_MODEL_NAME, LemonadeClient
9
+
10
+
11
+ class LemonadeProvider(LLMClient):
12
+ """Lemonade provider - local AMD-optimized inference."""
13
+
14
+ def __init__(
15
+ self,
16
+ model: Optional[str] = None,
17
+ base_url: Optional[str] = None,
18
+ host: Optional[str] = None,
19
+ port: Optional[int] = None,
20
+ system_prompt: Optional[str] = None,
21
+ **kwargs,
22
+ ):
23
+ # Build kwargs for LemonadeClient, only including non-None values
24
+ backend_kwargs = {}
25
+ if model is not None:
26
+ backend_kwargs["model"] = model
27
+ if base_url is not None:
28
+ backend_kwargs["base_url"] = base_url
29
+ if host is not None:
30
+ backend_kwargs["host"] = host
31
+ if port is not None:
32
+ backend_kwargs["port"] = port
33
+ backend_kwargs.update(kwargs)
34
+
35
+ self._backend = LemonadeClient(**backend_kwargs)
36
+ self._model = model
37
+ self._system_prompt = system_prompt
38
+
39
+ @property
40
+ def provider_name(self) -> str:
41
+ return "Lemonade"
42
+
43
+ def generate(
44
+ self,
45
+ prompt: str,
46
+ model: str | None = None,
47
+ stream: bool = False,
48
+ **kwargs,
49
+ ) -> Union[str, Iterator[str]]:
50
+ # Use provided model, instance model, or default CPU model
51
+ effective_model = model or self._model or DEFAULT_MODEL_NAME
52
+
53
+ # Default to low temperature for deterministic responses (matches old LLMClient behavior)
54
+ kwargs.setdefault("temperature", 0.1)
55
+
56
+ response = self._backend.completions(
57
+ model=effective_model, prompt=prompt, stream=stream, **kwargs
58
+ )
59
+ if stream:
60
+ return self._handle_stream(response)
61
+ return self._extract_text(response)
62
+
63
+ def chat(
64
+ self,
65
+ messages: list[dict],
66
+ model: str | None = None,
67
+ stream: bool = False,
68
+ **kwargs,
69
+ ) -> Union[str, Iterator[str]]:
70
+ # Use provided model, instance model, or default CPU model
71
+ effective_model = model or self._model or DEFAULT_MODEL_NAME
72
+
73
+ # Prepend system prompt if set
74
+ if self._system_prompt:
75
+ messages = [{"role": "system", "content": self._system_prompt}] + list(
76
+ messages
77
+ )
78
+
79
+ # Default to low temperature for deterministic responses (matches old LLMClient behavior)
80
+ kwargs.setdefault("temperature", 0.1)
81
+
82
+ response = self._backend.chat_completions(
83
+ model=effective_model, messages=messages, stream=stream, **kwargs
84
+ )
85
+ if stream:
86
+ return self._handle_stream(response)
87
+ return response["choices"][0]["message"]["content"]
88
+
89
+ def embed(self, texts: list[str], **kwargs) -> list[list[float]]:
90
+ response = self._backend.embeddings(texts, **kwargs)
91
+ return [item["embedding"] for item in response["data"]]
92
+
93
+ def vision(self, images: list[bytes], prompt: str, **kwargs) -> str:
94
+ # Delegate to VLMClient
95
+ from ..vlm_client import VLMClient
96
+
97
+ vlm = VLMClient(base_url=self._backend.base_url)
98
+ return vlm.extract_from_image(images[0], prompt=prompt)
99
+
100
+ def get_performance_stats(self) -> dict:
101
+ return self._backend.get_stats() or {}
102
+
103
+ def load_model(self, model_name: str, **kwargs) -> None:
104
+ self._backend.load_model(model_name, **kwargs)
105
+ self._model = model_name
106
+
107
+ def unload_model(self) -> None:
108
+ self._backend.unload_model()
109
+
110
+ def _extract_text(self, response: dict) -> str:
111
+ return response["choices"][0]["text"]
112
+
113
+ def _handle_stream(self, response) -> Iterator[str]:
114
+ for chunk in response:
115
+ if "choices" in chunk and chunk["choices"]:
116
+ delta = chunk["choices"][0].get("delta", {})
117
+ if "content" in delta:
118
+ yield delta["content"]
119
+ elif "text" in chunk["choices"][0]:
120
+ yield chunk["choices"][0]["text"]
@@ -0,0 +1,79 @@
1
+ # Copyright(C) 2025-2026 Advanced Micro Devices, Inc. All rights reserved.
2
+ # SPDX-License-Identifier: MIT
3
+ """OpenAI provider - no vision support."""
4
+
5
+ from typing import Iterator, Optional, Union
6
+
7
+ from ..base_client import LLMClient
8
+
9
+
10
+ class OpenAIProvider(LLMClient):
11
+ """OpenAI (OpenAI API) provider."""
12
+
13
+ def __init__(
14
+ self,
15
+ api_key: Optional[str] = None,
16
+ model: str = "gpt-4o",
17
+ system_prompt: Optional[str] = None,
18
+ **_kwargs,
19
+ ):
20
+ import openai
21
+
22
+ self._client = openai.OpenAI(api_key=api_key)
23
+ self._model = model
24
+ self._system_prompt = system_prompt
25
+
26
+ @property
27
+ def provider_name(self) -> str:
28
+ return "OpenAI"
29
+
30
+ def generate(
31
+ self,
32
+ prompt: str,
33
+ model: str | None = None,
34
+ stream: bool = False,
35
+ **kwargs,
36
+ ) -> Union[str, Iterator[str]]:
37
+ # OpenAI doesn't have a separate completions endpoint for chat models
38
+ return self.chat(
39
+ [{"role": "user", "content": prompt}],
40
+ model=model,
41
+ stream=stream,
42
+ **kwargs,
43
+ )
44
+
45
+ def chat(
46
+ self,
47
+ messages: list[dict],
48
+ model: str | None = None,
49
+ stream: bool = False,
50
+ **kwargs,
51
+ ) -> Union[str, Iterator[str]]:
52
+ # Prepend system prompt if set
53
+ if self._system_prompt:
54
+ messages = [{"role": "system", "content": self._system_prompt}] + list(
55
+ messages
56
+ )
57
+
58
+ response = self._client.chat.completions.create(
59
+ model=model or self._model, messages=messages, stream=stream, **kwargs
60
+ )
61
+ if stream:
62
+ return self._handle_stream(response)
63
+ return response.choices[0].message.content
64
+
65
+ def embed(
66
+ self, texts: list[str], model: str = "text-embedding-3-small", **kwargs
67
+ ) -> list[list[float]]:
68
+ response = self._client.embeddings.create(model=model, input=texts, **kwargs)
69
+ return [item.embedding for item in response.data]
70
+
71
+ # vision() inherited from ABC - raises NotSupportedError
72
+ # get_performance_stats() inherited from ABC - raises NotSupportedError
73
+ # load_model() inherited from ABC - raises NotSupportedError
74
+ # unload_model() inherited from ABC - raises NotSupportedError
75
+
76
+ def _handle_stream(self, response) -> Iterator[str]:
77
+ for chunk in response:
78
+ if chunk.choices and chunk.choices[0].delta.content:
79
+ yield chunk.choices[0].delta.content