amd-gaia 0.15.0__py3-none-any.whl → 0.15.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (185) hide show
  1. {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.2.dist-info}/METADATA +222 -223
  2. amd_gaia-0.15.2.dist-info/RECORD +182 -0
  3. {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.2.dist-info}/WHEEL +1 -1
  4. {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.2.dist-info}/entry_points.txt +1 -0
  5. {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.2.dist-info}/licenses/LICENSE.md +20 -20
  6. gaia/__init__.py +29 -29
  7. gaia/agents/__init__.py +19 -19
  8. gaia/agents/base/__init__.py +9 -9
  9. gaia/agents/base/agent.py +2132 -2177
  10. gaia/agents/base/api_agent.py +119 -120
  11. gaia/agents/base/console.py +1967 -1841
  12. gaia/agents/base/errors.py +237 -237
  13. gaia/agents/base/mcp_agent.py +86 -86
  14. gaia/agents/base/tools.py +88 -83
  15. gaia/agents/blender/__init__.py +7 -0
  16. gaia/agents/blender/agent.py +553 -556
  17. gaia/agents/blender/agent_simple.py +133 -135
  18. gaia/agents/blender/app.py +211 -211
  19. gaia/agents/blender/app_simple.py +41 -41
  20. gaia/agents/blender/core/__init__.py +16 -16
  21. gaia/agents/blender/core/materials.py +506 -506
  22. gaia/agents/blender/core/objects.py +316 -316
  23. gaia/agents/blender/core/rendering.py +225 -225
  24. gaia/agents/blender/core/scene.py +220 -220
  25. gaia/agents/blender/core/view.py +146 -146
  26. gaia/agents/chat/__init__.py +9 -9
  27. gaia/agents/chat/agent.py +809 -835
  28. gaia/agents/chat/app.py +1065 -1058
  29. gaia/agents/chat/session.py +508 -508
  30. gaia/agents/chat/tools/__init__.py +15 -15
  31. gaia/agents/chat/tools/file_tools.py +96 -96
  32. gaia/agents/chat/tools/rag_tools.py +1744 -1729
  33. gaia/agents/chat/tools/shell_tools.py +437 -436
  34. gaia/agents/code/__init__.py +7 -7
  35. gaia/agents/code/agent.py +549 -549
  36. gaia/agents/code/cli.py +377 -0
  37. gaia/agents/code/models.py +135 -135
  38. gaia/agents/code/orchestration/__init__.py +24 -24
  39. gaia/agents/code/orchestration/checklist_executor.py +1763 -1763
  40. gaia/agents/code/orchestration/checklist_generator.py +713 -713
  41. gaia/agents/code/orchestration/factories/__init__.py +9 -9
  42. gaia/agents/code/orchestration/factories/base.py +63 -63
  43. gaia/agents/code/orchestration/factories/nextjs_factory.py +118 -118
  44. gaia/agents/code/orchestration/factories/python_factory.py +106 -106
  45. gaia/agents/code/orchestration/orchestrator.py +841 -841
  46. gaia/agents/code/orchestration/project_analyzer.py +391 -391
  47. gaia/agents/code/orchestration/steps/__init__.py +67 -67
  48. gaia/agents/code/orchestration/steps/base.py +188 -188
  49. gaia/agents/code/orchestration/steps/error_handler.py +314 -314
  50. gaia/agents/code/orchestration/steps/nextjs.py +828 -828
  51. gaia/agents/code/orchestration/steps/python.py +307 -307
  52. gaia/agents/code/orchestration/template_catalog.py +469 -469
  53. gaia/agents/code/orchestration/workflows/__init__.py +14 -14
  54. gaia/agents/code/orchestration/workflows/base.py +80 -80
  55. gaia/agents/code/orchestration/workflows/nextjs.py +186 -186
  56. gaia/agents/code/orchestration/workflows/python.py +94 -94
  57. gaia/agents/code/prompts/__init__.py +11 -11
  58. gaia/agents/code/prompts/base_prompt.py +77 -77
  59. gaia/agents/code/prompts/code_patterns.py +2034 -2036
  60. gaia/agents/code/prompts/nextjs_prompt.py +40 -40
  61. gaia/agents/code/prompts/python_prompt.py +109 -109
  62. gaia/agents/code/schema_inference.py +365 -365
  63. gaia/agents/code/system_prompt.py +41 -41
  64. gaia/agents/code/tools/__init__.py +42 -42
  65. gaia/agents/code/tools/cli_tools.py +1138 -1138
  66. gaia/agents/code/tools/code_formatting.py +319 -319
  67. gaia/agents/code/tools/code_tools.py +769 -769
  68. gaia/agents/code/tools/error_fixing.py +1347 -1347
  69. gaia/agents/code/tools/external_tools.py +180 -180
  70. gaia/agents/code/tools/file_io.py +845 -845
  71. gaia/agents/code/tools/prisma_tools.py +190 -190
  72. gaia/agents/code/tools/project_management.py +1016 -1016
  73. gaia/agents/code/tools/testing.py +321 -321
  74. gaia/agents/code/tools/typescript_tools.py +122 -122
  75. gaia/agents/code/tools/validation_parsing.py +461 -461
  76. gaia/agents/code/tools/validation_tools.py +806 -806
  77. gaia/agents/code/tools/web_dev_tools.py +1758 -1758
  78. gaia/agents/code/validators/__init__.py +16 -16
  79. gaia/agents/code/validators/antipattern_checker.py +241 -241
  80. gaia/agents/code/validators/ast_analyzer.py +197 -197
  81. gaia/agents/code/validators/requirements_validator.py +145 -145
  82. gaia/agents/code/validators/syntax_validator.py +171 -171
  83. gaia/agents/docker/__init__.py +7 -7
  84. gaia/agents/docker/agent.py +643 -642
  85. gaia/agents/emr/__init__.py +8 -8
  86. gaia/agents/emr/agent.py +1504 -1506
  87. gaia/agents/emr/cli.py +1322 -1322
  88. gaia/agents/emr/constants.py +475 -475
  89. gaia/agents/emr/dashboard/__init__.py +4 -4
  90. gaia/agents/emr/dashboard/server.py +1972 -1974
  91. gaia/agents/jira/__init__.py +11 -11
  92. gaia/agents/jira/agent.py +894 -894
  93. gaia/agents/jira/jql_templates.py +299 -299
  94. gaia/agents/routing/__init__.py +7 -7
  95. gaia/agents/routing/agent.py +567 -570
  96. gaia/agents/routing/system_prompt.py +75 -75
  97. gaia/agents/summarize/__init__.py +11 -0
  98. gaia/agents/summarize/agent.py +885 -0
  99. gaia/agents/summarize/prompts.py +129 -0
  100. gaia/api/__init__.py +23 -23
  101. gaia/api/agent_registry.py +238 -238
  102. gaia/api/app.py +305 -305
  103. gaia/api/openai_server.py +575 -575
  104. gaia/api/schemas.py +186 -186
  105. gaia/api/sse_handler.py +373 -373
  106. gaia/apps/__init__.py +4 -4
  107. gaia/apps/llm/__init__.py +6 -6
  108. gaia/apps/llm/app.py +184 -169
  109. gaia/apps/summarize/app.py +116 -633
  110. gaia/apps/summarize/html_viewer.py +133 -133
  111. gaia/apps/summarize/pdf_formatter.py +284 -284
  112. gaia/audio/__init__.py +2 -2
  113. gaia/audio/audio_client.py +439 -439
  114. gaia/audio/audio_recorder.py +269 -269
  115. gaia/audio/kokoro_tts.py +599 -599
  116. gaia/audio/whisper_asr.py +432 -432
  117. gaia/chat/__init__.py +16 -16
  118. gaia/chat/app.py +428 -430
  119. gaia/chat/prompts.py +522 -522
  120. gaia/chat/sdk.py +1228 -1225
  121. gaia/cli.py +5659 -5632
  122. gaia/database/__init__.py +10 -10
  123. gaia/database/agent.py +176 -176
  124. gaia/database/mixin.py +290 -290
  125. gaia/database/testing.py +64 -64
  126. gaia/eval/batch_experiment.py +2332 -2332
  127. gaia/eval/claude.py +542 -542
  128. gaia/eval/config.py +37 -37
  129. gaia/eval/email_generator.py +512 -512
  130. gaia/eval/eval.py +3179 -3179
  131. gaia/eval/groundtruth.py +1130 -1130
  132. gaia/eval/transcript_generator.py +582 -582
  133. gaia/eval/webapp/README.md +167 -167
  134. gaia/eval/webapp/package-lock.json +875 -875
  135. gaia/eval/webapp/package.json +20 -20
  136. gaia/eval/webapp/public/app.js +3402 -3402
  137. gaia/eval/webapp/public/index.html +87 -87
  138. gaia/eval/webapp/public/styles.css +3661 -3661
  139. gaia/eval/webapp/server.js +415 -415
  140. gaia/eval/webapp/test-setup.js +72 -72
  141. gaia/installer/__init__.py +23 -0
  142. gaia/installer/init_command.py +1275 -0
  143. gaia/installer/lemonade_installer.py +619 -0
  144. gaia/llm/__init__.py +10 -2
  145. gaia/llm/base_client.py +60 -0
  146. gaia/llm/exceptions.py +12 -0
  147. gaia/llm/factory.py +70 -0
  148. gaia/llm/lemonade_client.py +3421 -3221
  149. gaia/llm/lemonade_manager.py +294 -294
  150. gaia/llm/providers/__init__.py +9 -0
  151. gaia/llm/providers/claude.py +108 -0
  152. gaia/llm/providers/lemonade.py +118 -0
  153. gaia/llm/providers/openai_provider.py +79 -0
  154. gaia/llm/vlm_client.py +382 -382
  155. gaia/logger.py +189 -189
  156. gaia/mcp/agent_mcp_server.py +245 -245
  157. gaia/mcp/blender_mcp_client.py +138 -138
  158. gaia/mcp/blender_mcp_server.py +648 -648
  159. gaia/mcp/context7_cache.py +332 -332
  160. gaia/mcp/external_services.py +518 -518
  161. gaia/mcp/mcp_bridge.py +811 -550
  162. gaia/mcp/servers/__init__.py +6 -6
  163. gaia/mcp/servers/docker_mcp.py +83 -83
  164. gaia/perf_analysis.py +361 -0
  165. gaia/rag/__init__.py +10 -10
  166. gaia/rag/app.py +293 -293
  167. gaia/rag/demo.py +304 -304
  168. gaia/rag/pdf_utils.py +235 -235
  169. gaia/rag/sdk.py +2194 -2194
  170. gaia/security.py +183 -163
  171. gaia/talk/app.py +287 -289
  172. gaia/talk/sdk.py +538 -538
  173. gaia/testing/__init__.py +87 -87
  174. gaia/testing/assertions.py +330 -330
  175. gaia/testing/fixtures.py +333 -333
  176. gaia/testing/mocks.py +493 -493
  177. gaia/util.py +46 -46
  178. gaia/utils/__init__.py +33 -33
  179. gaia/utils/file_watcher.py +675 -675
  180. gaia/utils/parsing.py +223 -223
  181. gaia/version.py +100 -100
  182. amd_gaia-0.15.0.dist-info/RECORD +0 -168
  183. gaia/agents/code/app.py +0 -266
  184. gaia/llm/llm_client.py +0 -723
  185. {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.2.dist-info}/top_level.txt +0 -0
gaia/apps/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- # Copyright(C) 2025-2026 Advanced Micro Devices, Inc. All rights reserved.
2
- # SPDX-License-Identifier: MIT
3
-
4
- """Gaia Applications"""
1
+ # Copyright(C) 2025-2026 Advanced Micro Devices, Inc. All rights reserved.
2
+ # SPDX-License-Identifier: MIT
3
+
4
+ """Gaia Applications"""
gaia/apps/llm/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
- # Copyright(C) 2025-2026 Advanced Micro Devices, Inc. All rights reserved.
2
- # SPDX-License-Identifier: MIT
3
-
4
- """
5
- LLM agent module for simple LLM queries.
6
- """
1
+ # Copyright(C) 2025-2026 Advanced Micro Devices, Inc. All rights reserved.
2
+ # SPDX-License-Identifier: MIT
3
+
4
+ """
5
+ LLM agent module for simple LLM queries.
6
+ """
gaia/apps/llm/app.py CHANGED
@@ -1,169 +1,184 @@
1
- #!/usr/bin/env python3
2
- #
3
- # Copyright(C) 2024-2025 Advanced Micro Devices, Inc. All rights reserved.
4
- # SPDX-License-Identifier: MIT
5
-
6
- """
7
- Simple LLM App using the existing LLMClient wrapper to call Lemonade localhost backend.
8
- """
9
-
10
- import argparse
11
- import sys
12
- from typing import Iterator, Optional, Union
13
-
14
- from gaia.llm.llm_client import LLMClient
15
- from gaia.logger import get_logger
16
-
17
-
18
- class LlmApp:
19
- """Simple LLM application wrapper using LLMClient."""
20
-
21
- def __init__(
22
- self, system_prompt: Optional[str] = None, base_url: Optional[str] = None
23
- ):
24
- """Initialize the LLM app.
25
-
26
- Args:
27
- system_prompt: Optional system prompt for the LLM
28
- base_url: Base URL for local LLM server (defaults to LEMONADE_BASE_URL env var)
29
- """
30
- self.log = get_logger(__name__)
31
- self.client = LLMClient(system_prompt=system_prompt, base_url=base_url)
32
- self.log.debug("LLM app initialized")
33
-
34
- def query(
35
- self,
36
- prompt: str,
37
- model: Optional[str] = None,
38
- max_tokens: int = 512,
39
- stream: bool = False,
40
- **kwargs,
41
- ) -> Union[str, Iterator[str]]:
42
- """Send a query to the LLM and get a response."""
43
- if not prompt.strip():
44
- raise ValueError("Prompt cannot be empty")
45
-
46
- self.log.debug(f"Processing query with model: {model or 'default'}")
47
-
48
- # Prepare arguments
49
- generate_kwargs = dict(kwargs)
50
- if max_tokens:
51
- generate_kwargs["max_tokens"] = max_tokens
52
-
53
- # Generate response
54
- return self.client.generate(
55
- prompt=prompt.strip(), model=model, stream=stream, **generate_kwargs
56
- )
57
-
58
- def get_stats(self):
59
- """Get performance statistics."""
60
- return self.client.get_performance_stats() or {}
61
-
62
-
63
- def main(
64
- query: Optional[str] = None,
65
- model: Optional[str] = None,
66
- max_tokens: int = 512,
67
- system_prompt: Optional[str] = None,
68
- stream: bool = True,
69
- base_url: Optional[str] = None,
70
- ) -> str:
71
- """Main function to run the LLM app.
72
-
73
- Args:
74
- query: Query to send to the LLM
75
- model: Model name to use
76
- max_tokens: Maximum tokens to generate
77
- system_prompt: Optional system prompt
78
- stream: Whether to stream the response
79
- base_url: Base URL for local LLM server (defaults to LEMONADE_BASE_URL env var)
80
- """
81
- if not query:
82
- raise ValueError("Query is required")
83
-
84
- app = LlmApp(system_prompt=system_prompt, base_url=base_url)
85
- response = app.query(
86
- prompt=query, model=model, max_tokens=max_tokens, stream=stream
87
- )
88
-
89
- if stream:
90
- # Handle streaming response
91
- full_response = ""
92
- for chunk in response:
93
- print(chunk, end="", flush=True)
94
- full_response += chunk
95
- print() # Add newline
96
- return full_response
97
- else:
98
- return response
99
-
100
-
101
- def cli_main():
102
- """Command line interface."""
103
- parser = argparse.ArgumentParser(description="Simple LLM App")
104
-
105
- parser.add_argument("query", help="Query to send to the LLM")
106
- parser.add_argument("--model", help="Model name to use")
107
- parser.add_argument(
108
- "--max-tokens", type=int, default=512, help="Max tokens (default: 512)"
109
- )
110
- parser.add_argument("--system-prompt", help="System prompt")
111
- parser.add_argument("--stream", action="store_true", help="Stream response")
112
- parser.add_argument("--stats", action="store_true", help="Show stats")
113
- parser.add_argument(
114
- "--base-url",
115
- help="Base URL for local LLM server (defaults to LEMONADE_BASE_URL env var)",
116
- )
117
- parser.add_argument(
118
- "--logging-level",
119
- default="INFO",
120
- choices=["DEBUG", "INFO", "WARNING", "ERROR"],
121
- help="Logging level",
122
- )
123
-
124
- args = parser.parse_args()
125
-
126
- # Setup logging
127
- import logging
128
-
129
- from gaia.logger import log_manager
130
-
131
- log_manager.set_level("gaia", getattr(logging, args.logging_level))
132
-
133
- try:
134
- app = LlmApp(system_prompt=args.system_prompt, base_url=args.base_url)
135
-
136
- response = app.query(
137
- prompt=args.query,
138
- model=args.model,
139
- max_tokens=args.max_tokens,
140
- stream=args.stream,
141
- )
142
-
143
- if args.stream:
144
- # Already printed during streaming
145
- pass
146
- else:
147
- print(f"\n{'='*50}")
148
- print("LLM Response:")
149
- print("=" * 50)
150
- print(response)
151
- print("=" * 50)
152
-
153
- if args.stats:
154
- stats = app.get_stats()
155
- if stats:
156
- print(f"\n{'='*50}")
157
- print("Performance Statistics:")
158
- print("=" * 50)
159
- for key, value in stats.items():
160
- print(f"{key}: {value}")
161
- print("=" * 50)
162
-
163
- except Exception as e:
164
- print(f"Error: {e}")
165
- sys.exit(1)
166
-
167
-
168
- if __name__ == "__main__":
169
- cli_main()
1
+ #!/usr/bin/env python3
2
+ #
3
+ # Copyright(C) 2025-2026 Advanced Micro Devices, Inc. All rights reserved.
4
+ # SPDX-License-Identifier: MIT
5
+
6
+ """
7
+ Simple LLM App using the existing LLMClient wrapper to call Lemonade localhost backend.
8
+ """
9
+
10
+ import argparse
11
+ import sys
12
+ from typing import Iterator, Optional, Union
13
+
14
+ from gaia.llm import create_client
15
+ from gaia.logger import get_logger
16
+
17
+
18
+ class LlmApp:
19
+ """Simple LLM application wrapper using LLMClient."""
20
+
21
+ def __init__(
22
+ self, system_prompt: Optional[str] = None, base_url: Optional[str] = None
23
+ ):
24
+ """Initialize the LLM app.
25
+
26
+ Args:
27
+ system_prompt: Optional system prompt for the LLM
28
+ base_url: Base URL for local LLM server (defaults to LEMONADE_BASE_URL env var)
29
+ """
30
+ self.log = get_logger(__name__)
31
+ self.client = create_client(
32
+ "lemonade",
33
+ base_url=base_url,
34
+ system_prompt=system_prompt,
35
+ )
36
+ self.log.debug("LLM app initialized")
37
+
38
+ def query(
39
+ self,
40
+ prompt: str,
41
+ model: Optional[str] = None,
42
+ max_tokens: int = 512,
43
+ stream: bool = False,
44
+ **kwargs,
45
+ ) -> Union[str, Iterator[str]]:
46
+ """Send a query to the LLM and get a response."""
47
+ if not prompt.strip():
48
+ raise ValueError("Prompt cannot be empty")
49
+
50
+ self.log.debug(f"Processing query with model: {model or 'default'}")
51
+
52
+ # Prepare arguments
53
+ generate_kwargs = dict(kwargs)
54
+ if max_tokens:
55
+ generate_kwargs["max_tokens"] = max_tokens
56
+
57
+ # Generate response
58
+ return self.client.generate(
59
+ prompt=prompt.strip(), model=model, stream=stream, **generate_kwargs
60
+ )
61
+
62
+ def get_stats(self):
63
+ """Get performance statistics."""
64
+ return self.client.get_performance_stats() or {}
65
+
66
+
67
+ def main(
68
+ query: Optional[str] = None,
69
+ model: Optional[str] = None,
70
+ max_tokens: int = 512,
71
+ system_prompt: Optional[str] = None,
72
+ stream: bool = True,
73
+ base_url: Optional[str] = None,
74
+ ) -> str:
75
+ """Main function to run the LLM app.
76
+
77
+ Args:
78
+ query: Query to send to the LLM
79
+ model: Model name to use
80
+ max_tokens: Maximum tokens to generate
81
+ system_prompt: Optional system prompt
82
+ stream: Whether to stream the response
83
+ base_url: Base URL for local LLM server (defaults to LEMONADE_BASE_URL env var)
84
+ """
85
+ from rich.console import Console
86
+
87
+ console = Console()
88
+
89
+ if not query:
90
+ raise ValueError("Query is required")
91
+
92
+ app = LlmApp(system_prompt=system_prompt, base_url=base_url)
93
+ response = app.query(
94
+ prompt=query, model=model, max_tokens=max_tokens, stream=stream
95
+ )
96
+
97
+ if stream:
98
+ # Handle streaming response with Rich formatting
99
+ console.print()
100
+ console.print("[bold cyan]🤖 gaia:[/bold cyan] ", end="")
101
+ full_response = ""
102
+ for chunk in response:
103
+ if chunk: # Skip None chunks
104
+ print(chunk, end="", flush=True)
105
+ full_response += chunk
106
+ print() # Add newline
107
+ console.print()
108
+ return full_response
109
+ else:
110
+ console.print()
111
+ console.print(f"[bold cyan]🤖 gaia:[/bold cyan] {response}")
112
+ console.print()
113
+ return response
114
+
115
+
116
+ def cli_main():
117
+ """Command line interface."""
118
+ parser = argparse.ArgumentParser(description="Simple LLM App")
119
+
120
+ parser.add_argument("query", help="Query to send to the LLM")
121
+ parser.add_argument("--model", help="Model name to use")
122
+ parser.add_argument(
123
+ "--max-tokens", type=int, default=512, help="Max tokens (default: 512)"
124
+ )
125
+ parser.add_argument("--system-prompt", help="System prompt")
126
+ parser.add_argument("--stream", action="store_true", help="Stream response")
127
+ parser.add_argument("--stats", action="store_true", help="Show stats")
128
+ parser.add_argument(
129
+ "--base-url",
130
+ help="Base URL for local LLM server (defaults to LEMONADE_BASE_URL env var)",
131
+ )
132
+ parser.add_argument(
133
+ "--logging-level",
134
+ default="INFO",
135
+ choices=["DEBUG", "INFO", "WARNING", "ERROR"],
136
+ help="Logging level",
137
+ )
138
+
139
+ args = parser.parse_args()
140
+
141
+ # Setup logging
142
+ import logging
143
+
144
+ from gaia.logger import log_manager
145
+
146
+ log_manager.set_level("gaia", getattr(logging, args.logging_level))
147
+
148
+ try:
149
+ app = LlmApp(system_prompt=args.system_prompt, base_url=args.base_url)
150
+
151
+ response = app.query(
152
+ prompt=args.query,
153
+ model=args.model,
154
+ max_tokens=args.max_tokens,
155
+ stream=args.stream,
156
+ )
157
+
158
+ if args.stream:
159
+ # Already printed during streaming
160
+ pass
161
+ else:
162
+ print(f"\n{'='*50}")
163
+ print("LLM Response:")
164
+ print("=" * 50)
165
+ print(response)
166
+ print("=" * 50)
167
+
168
+ if args.stats:
169
+ stats = app.get_stats()
170
+ if stats:
171
+ print(f"\n{'='*50}")
172
+ print("Performance Statistics:")
173
+ print("=" * 50)
174
+ for key, value in stats.items():
175
+ print(f"{key}: {value}")
176
+ print("=" * 50)
177
+
178
+ except Exception as e:
179
+ print(f"Error: {e}")
180
+ sys.exit(1)
181
+
182
+
183
+ if __name__ == "__main__":
184
+ cli_main()