amd-gaia 0.15.0__py3-none-any.whl → 0.15.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (181) hide show
  1. {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.1.dist-info}/METADATA +223 -223
  2. amd_gaia-0.15.1.dist-info/RECORD +178 -0
  3. {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.1.dist-info}/entry_points.txt +1 -0
  4. {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.1.dist-info}/licenses/LICENSE.md +20 -20
  5. gaia/__init__.py +29 -29
  6. gaia/agents/__init__.py +19 -19
  7. gaia/agents/base/__init__.py +9 -9
  8. gaia/agents/base/agent.py +2177 -2177
  9. gaia/agents/base/api_agent.py +120 -120
  10. gaia/agents/base/console.py +1841 -1841
  11. gaia/agents/base/errors.py +237 -237
  12. gaia/agents/base/mcp_agent.py +86 -86
  13. gaia/agents/base/tools.py +83 -83
  14. gaia/agents/blender/agent.py +556 -556
  15. gaia/agents/blender/agent_simple.py +133 -135
  16. gaia/agents/blender/app.py +211 -211
  17. gaia/agents/blender/app_simple.py +41 -41
  18. gaia/agents/blender/core/__init__.py +16 -16
  19. gaia/agents/blender/core/materials.py +506 -506
  20. gaia/agents/blender/core/objects.py +316 -316
  21. gaia/agents/blender/core/rendering.py +225 -225
  22. gaia/agents/blender/core/scene.py +220 -220
  23. gaia/agents/blender/core/view.py +146 -146
  24. gaia/agents/chat/__init__.py +9 -9
  25. gaia/agents/chat/agent.py +835 -835
  26. gaia/agents/chat/app.py +1058 -1058
  27. gaia/agents/chat/session.py +508 -508
  28. gaia/agents/chat/tools/__init__.py +15 -15
  29. gaia/agents/chat/tools/file_tools.py +96 -96
  30. gaia/agents/chat/tools/rag_tools.py +1729 -1729
  31. gaia/agents/chat/tools/shell_tools.py +436 -436
  32. gaia/agents/code/__init__.py +7 -7
  33. gaia/agents/code/agent.py +549 -549
  34. gaia/agents/code/cli.py +377 -0
  35. gaia/agents/code/models.py +135 -135
  36. gaia/agents/code/orchestration/__init__.py +24 -24
  37. gaia/agents/code/orchestration/checklist_executor.py +1763 -1763
  38. gaia/agents/code/orchestration/checklist_generator.py +713 -713
  39. gaia/agents/code/orchestration/factories/__init__.py +9 -9
  40. gaia/agents/code/orchestration/factories/base.py +63 -63
  41. gaia/agents/code/orchestration/factories/nextjs_factory.py +118 -118
  42. gaia/agents/code/orchestration/factories/python_factory.py +106 -106
  43. gaia/agents/code/orchestration/orchestrator.py +841 -841
  44. gaia/agents/code/orchestration/project_analyzer.py +391 -391
  45. gaia/agents/code/orchestration/steps/__init__.py +67 -67
  46. gaia/agents/code/orchestration/steps/base.py +188 -188
  47. gaia/agents/code/orchestration/steps/error_handler.py +314 -314
  48. gaia/agents/code/orchestration/steps/nextjs.py +828 -828
  49. gaia/agents/code/orchestration/steps/python.py +307 -307
  50. gaia/agents/code/orchestration/template_catalog.py +469 -469
  51. gaia/agents/code/orchestration/workflows/__init__.py +14 -14
  52. gaia/agents/code/orchestration/workflows/base.py +80 -80
  53. gaia/agents/code/orchestration/workflows/nextjs.py +186 -186
  54. gaia/agents/code/orchestration/workflows/python.py +94 -94
  55. gaia/agents/code/prompts/__init__.py +11 -11
  56. gaia/agents/code/prompts/base_prompt.py +77 -77
  57. gaia/agents/code/prompts/code_patterns.py +2036 -2036
  58. gaia/agents/code/prompts/nextjs_prompt.py +40 -40
  59. gaia/agents/code/prompts/python_prompt.py +109 -109
  60. gaia/agents/code/schema_inference.py +365 -365
  61. gaia/agents/code/system_prompt.py +41 -41
  62. gaia/agents/code/tools/__init__.py +42 -42
  63. gaia/agents/code/tools/cli_tools.py +1138 -1138
  64. gaia/agents/code/tools/code_formatting.py +319 -319
  65. gaia/agents/code/tools/code_tools.py +769 -769
  66. gaia/agents/code/tools/error_fixing.py +1347 -1347
  67. gaia/agents/code/tools/external_tools.py +180 -180
  68. gaia/agents/code/tools/file_io.py +845 -845
  69. gaia/agents/code/tools/prisma_tools.py +190 -190
  70. gaia/agents/code/tools/project_management.py +1016 -1016
  71. gaia/agents/code/tools/testing.py +321 -321
  72. gaia/agents/code/tools/typescript_tools.py +122 -122
  73. gaia/agents/code/tools/validation_parsing.py +461 -461
  74. gaia/agents/code/tools/validation_tools.py +806 -806
  75. gaia/agents/code/tools/web_dev_tools.py +1758 -1758
  76. gaia/agents/code/validators/__init__.py +16 -16
  77. gaia/agents/code/validators/antipattern_checker.py +241 -241
  78. gaia/agents/code/validators/ast_analyzer.py +197 -197
  79. gaia/agents/code/validators/requirements_validator.py +145 -145
  80. gaia/agents/code/validators/syntax_validator.py +171 -171
  81. gaia/agents/docker/__init__.py +7 -7
  82. gaia/agents/docker/agent.py +642 -642
  83. gaia/agents/emr/__init__.py +8 -8
  84. gaia/agents/emr/agent.py +1506 -1506
  85. gaia/agents/emr/cli.py +1322 -1322
  86. gaia/agents/emr/constants.py +475 -475
  87. gaia/agents/emr/dashboard/__init__.py +4 -4
  88. gaia/agents/emr/dashboard/server.py +1974 -1974
  89. gaia/agents/jira/__init__.py +11 -11
  90. gaia/agents/jira/agent.py +894 -894
  91. gaia/agents/jira/jql_templates.py +299 -299
  92. gaia/agents/routing/__init__.py +7 -7
  93. gaia/agents/routing/agent.py +567 -570
  94. gaia/agents/routing/system_prompt.py +75 -75
  95. gaia/agents/summarize/__init__.py +11 -0
  96. gaia/agents/summarize/agent.py +885 -0
  97. gaia/agents/summarize/prompts.py +129 -0
  98. gaia/api/__init__.py +23 -23
  99. gaia/api/agent_registry.py +238 -238
  100. gaia/api/app.py +305 -305
  101. gaia/api/openai_server.py +575 -575
  102. gaia/api/schemas.py +186 -186
  103. gaia/api/sse_handler.py +373 -373
  104. gaia/apps/__init__.py +4 -4
  105. gaia/apps/llm/__init__.py +6 -6
  106. gaia/apps/llm/app.py +173 -169
  107. gaia/apps/summarize/app.py +116 -633
  108. gaia/apps/summarize/html_viewer.py +133 -133
  109. gaia/apps/summarize/pdf_formatter.py +284 -284
  110. gaia/audio/__init__.py +2 -2
  111. gaia/audio/audio_client.py +439 -439
  112. gaia/audio/audio_recorder.py +269 -269
  113. gaia/audio/kokoro_tts.py +599 -599
  114. gaia/audio/whisper_asr.py +432 -432
  115. gaia/chat/__init__.py +16 -16
  116. gaia/chat/app.py +430 -430
  117. gaia/chat/prompts.py +522 -522
  118. gaia/chat/sdk.py +1228 -1225
  119. gaia/cli.py +5481 -5632
  120. gaia/database/__init__.py +10 -10
  121. gaia/database/agent.py +176 -176
  122. gaia/database/mixin.py +290 -290
  123. gaia/database/testing.py +64 -64
  124. gaia/eval/batch_experiment.py +2332 -2332
  125. gaia/eval/claude.py +542 -542
  126. gaia/eval/config.py +37 -37
  127. gaia/eval/email_generator.py +512 -512
  128. gaia/eval/eval.py +3179 -3179
  129. gaia/eval/groundtruth.py +1130 -1130
  130. gaia/eval/transcript_generator.py +582 -582
  131. gaia/eval/webapp/README.md +167 -167
  132. gaia/eval/webapp/package-lock.json +875 -875
  133. gaia/eval/webapp/package.json +20 -20
  134. gaia/eval/webapp/public/app.js +3402 -3402
  135. gaia/eval/webapp/public/index.html +87 -87
  136. gaia/eval/webapp/public/styles.css +3661 -3661
  137. gaia/eval/webapp/server.js +415 -415
  138. gaia/eval/webapp/test-setup.js +72 -72
  139. gaia/llm/__init__.py +9 -2
  140. gaia/llm/base_client.py +60 -0
  141. gaia/llm/exceptions.py +12 -0
  142. gaia/llm/factory.py +70 -0
  143. gaia/llm/lemonade_client.py +3236 -3221
  144. gaia/llm/lemonade_manager.py +294 -294
  145. gaia/llm/providers/__init__.py +9 -0
  146. gaia/llm/providers/claude.py +108 -0
  147. gaia/llm/providers/lemonade.py +120 -0
  148. gaia/llm/providers/openai_provider.py +79 -0
  149. gaia/llm/vlm_client.py +382 -382
  150. gaia/logger.py +189 -189
  151. gaia/mcp/agent_mcp_server.py +245 -245
  152. gaia/mcp/blender_mcp_client.py +138 -138
  153. gaia/mcp/blender_mcp_server.py +648 -648
  154. gaia/mcp/context7_cache.py +332 -332
  155. gaia/mcp/external_services.py +518 -518
  156. gaia/mcp/mcp_bridge.py +811 -550
  157. gaia/mcp/servers/__init__.py +6 -6
  158. gaia/mcp/servers/docker_mcp.py +83 -83
  159. gaia/perf_analysis.py +361 -0
  160. gaia/rag/__init__.py +10 -10
  161. gaia/rag/app.py +293 -293
  162. gaia/rag/demo.py +304 -304
  163. gaia/rag/pdf_utils.py +235 -235
  164. gaia/rag/sdk.py +2194 -2194
  165. gaia/security.py +163 -163
  166. gaia/talk/app.py +289 -289
  167. gaia/talk/sdk.py +538 -538
  168. gaia/testing/__init__.py +87 -87
  169. gaia/testing/assertions.py +330 -330
  170. gaia/testing/fixtures.py +333 -333
  171. gaia/testing/mocks.py +493 -493
  172. gaia/util.py +46 -46
  173. gaia/utils/__init__.py +33 -33
  174. gaia/utils/file_watcher.py +675 -675
  175. gaia/utils/parsing.py +223 -223
  176. gaia/version.py +100 -100
  177. amd_gaia-0.15.0.dist-info/RECORD +0 -168
  178. gaia/agents/code/app.py +0 -266
  179. gaia/llm/llm_client.py +0 -723
  180. {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.1.dist-info}/WHEEL +0 -0
  181. {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.1.dist-info}/top_level.txt +0 -0
gaia/apps/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- # Copyright(C) 2025-2026 Advanced Micro Devices, Inc. All rights reserved.
2
- # SPDX-License-Identifier: MIT
3
-
4
- """Gaia Applications"""
1
+ # Copyright(C) 2025-2026 Advanced Micro Devices, Inc. All rights reserved.
2
+ # SPDX-License-Identifier: MIT
3
+
4
+ """Gaia Applications"""
gaia/apps/llm/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
- # Copyright(C) 2025-2026 Advanced Micro Devices, Inc. All rights reserved.
2
- # SPDX-License-Identifier: MIT
3
-
4
- """
5
- LLM agent module for simple LLM queries.
6
- """
1
+ # Copyright(C) 2025-2026 Advanced Micro Devices, Inc. All rights reserved.
2
+ # SPDX-License-Identifier: MIT
3
+
4
+ """
5
+ LLM agent module for simple LLM queries.
6
+ """
gaia/apps/llm/app.py CHANGED
@@ -1,169 +1,173 @@
1
- #!/usr/bin/env python3
2
- #
3
- # Copyright(C) 2024-2025 Advanced Micro Devices, Inc. All rights reserved.
4
- # SPDX-License-Identifier: MIT
5
-
6
- """
7
- Simple LLM App using the existing LLMClient wrapper to call Lemonade localhost backend.
8
- """
9
-
10
- import argparse
11
- import sys
12
- from typing import Iterator, Optional, Union
13
-
14
- from gaia.llm.llm_client import LLMClient
15
- from gaia.logger import get_logger
16
-
17
-
18
- class LlmApp:
19
- """Simple LLM application wrapper using LLMClient."""
20
-
21
- def __init__(
22
- self, system_prompt: Optional[str] = None, base_url: Optional[str] = None
23
- ):
24
- """Initialize the LLM app.
25
-
26
- Args:
27
- system_prompt: Optional system prompt for the LLM
28
- base_url: Base URL for local LLM server (defaults to LEMONADE_BASE_URL env var)
29
- """
30
- self.log = get_logger(__name__)
31
- self.client = LLMClient(system_prompt=system_prompt, base_url=base_url)
32
- self.log.debug("LLM app initialized")
33
-
34
- def query(
35
- self,
36
- prompt: str,
37
- model: Optional[str] = None,
38
- max_tokens: int = 512,
39
- stream: bool = False,
40
- **kwargs,
41
- ) -> Union[str, Iterator[str]]:
42
- """Send a query to the LLM and get a response."""
43
- if not prompt.strip():
44
- raise ValueError("Prompt cannot be empty")
45
-
46
- self.log.debug(f"Processing query with model: {model or 'default'}")
47
-
48
- # Prepare arguments
49
- generate_kwargs = dict(kwargs)
50
- if max_tokens:
51
- generate_kwargs["max_tokens"] = max_tokens
52
-
53
- # Generate response
54
- return self.client.generate(
55
- prompt=prompt.strip(), model=model, stream=stream, **generate_kwargs
56
- )
57
-
58
- def get_stats(self):
59
- """Get performance statistics."""
60
- return self.client.get_performance_stats() or {}
61
-
62
-
63
- def main(
64
- query: Optional[str] = None,
65
- model: Optional[str] = None,
66
- max_tokens: int = 512,
67
- system_prompt: Optional[str] = None,
68
- stream: bool = True,
69
- base_url: Optional[str] = None,
70
- ) -> str:
71
- """Main function to run the LLM app.
72
-
73
- Args:
74
- query: Query to send to the LLM
75
- model: Model name to use
76
- max_tokens: Maximum tokens to generate
77
- system_prompt: Optional system prompt
78
- stream: Whether to stream the response
79
- base_url: Base URL for local LLM server (defaults to LEMONADE_BASE_URL env var)
80
- """
81
- if not query:
82
- raise ValueError("Query is required")
83
-
84
- app = LlmApp(system_prompt=system_prompt, base_url=base_url)
85
- response = app.query(
86
- prompt=query, model=model, max_tokens=max_tokens, stream=stream
87
- )
88
-
89
- if stream:
90
- # Handle streaming response
91
- full_response = ""
92
- for chunk in response:
93
- print(chunk, end="", flush=True)
94
- full_response += chunk
95
- print() # Add newline
96
- return full_response
97
- else:
98
- return response
99
-
100
-
101
- def cli_main():
102
- """Command line interface."""
103
- parser = argparse.ArgumentParser(description="Simple LLM App")
104
-
105
- parser.add_argument("query", help="Query to send to the LLM")
106
- parser.add_argument("--model", help="Model name to use")
107
- parser.add_argument(
108
- "--max-tokens", type=int, default=512, help="Max tokens (default: 512)"
109
- )
110
- parser.add_argument("--system-prompt", help="System prompt")
111
- parser.add_argument("--stream", action="store_true", help="Stream response")
112
- parser.add_argument("--stats", action="store_true", help="Show stats")
113
- parser.add_argument(
114
- "--base-url",
115
- help="Base URL for local LLM server (defaults to LEMONADE_BASE_URL env var)",
116
- )
117
- parser.add_argument(
118
- "--logging-level",
119
- default="INFO",
120
- choices=["DEBUG", "INFO", "WARNING", "ERROR"],
121
- help="Logging level",
122
- )
123
-
124
- args = parser.parse_args()
125
-
126
- # Setup logging
127
- import logging
128
-
129
- from gaia.logger import log_manager
130
-
131
- log_manager.set_level("gaia", getattr(logging, args.logging_level))
132
-
133
- try:
134
- app = LlmApp(system_prompt=args.system_prompt, base_url=args.base_url)
135
-
136
- response = app.query(
137
- prompt=args.query,
138
- model=args.model,
139
- max_tokens=args.max_tokens,
140
- stream=args.stream,
141
- )
142
-
143
- if args.stream:
144
- # Already printed during streaming
145
- pass
146
- else:
147
- print(f"\n{'='*50}")
148
- print("LLM Response:")
149
- print("=" * 50)
150
- print(response)
151
- print("=" * 50)
152
-
153
- if args.stats:
154
- stats = app.get_stats()
155
- if stats:
156
- print(f"\n{'='*50}")
157
- print("Performance Statistics:")
158
- print("=" * 50)
159
- for key, value in stats.items():
160
- print(f"{key}: {value}")
161
- print("=" * 50)
162
-
163
- except Exception as e:
164
- print(f"Error: {e}")
165
- sys.exit(1)
166
-
167
-
168
- if __name__ == "__main__":
169
- cli_main()
1
+ #!/usr/bin/env python3
2
+ #
3
+ # Copyright(C) 2025-2026 Advanced Micro Devices, Inc. All rights reserved.
4
+ # SPDX-License-Identifier: MIT
5
+
6
+ """
7
+ Simple LLM App using the existing LLMClient wrapper to call Lemonade localhost backend.
8
+ """
9
+
10
+ import argparse
11
+ import sys
12
+ from typing import Iterator, Optional, Union
13
+
14
+ from gaia.llm import create_client
15
+ from gaia.logger import get_logger
16
+
17
+
18
+ class LlmApp:
19
+ """Simple LLM application wrapper using LLMClient."""
20
+
21
+ def __init__(
22
+ self, system_prompt: Optional[str] = None, base_url: Optional[str] = None
23
+ ):
24
+ """Initialize the LLM app.
25
+
26
+ Args:
27
+ system_prompt: Optional system prompt for the LLM
28
+ base_url: Base URL for local LLM server (defaults to LEMONADE_BASE_URL env var)
29
+ """
30
+ self.log = get_logger(__name__)
31
+ self.client = create_client(
32
+ "lemonade",
33
+ base_url=base_url,
34
+ system_prompt=system_prompt,
35
+ )
36
+ self.log.debug("LLM app initialized")
37
+
38
+ def query(
39
+ self,
40
+ prompt: str,
41
+ model: Optional[str] = None,
42
+ max_tokens: int = 512,
43
+ stream: bool = False,
44
+ **kwargs,
45
+ ) -> Union[str, Iterator[str]]:
46
+ """Send a query to the LLM and get a response."""
47
+ if not prompt.strip():
48
+ raise ValueError("Prompt cannot be empty")
49
+
50
+ self.log.debug(f"Processing query with model: {model or 'default'}")
51
+
52
+ # Prepare arguments
53
+ generate_kwargs = dict(kwargs)
54
+ if max_tokens:
55
+ generate_kwargs["max_tokens"] = max_tokens
56
+
57
+ # Generate response
58
+ return self.client.generate(
59
+ prompt=prompt.strip(), model=model, stream=stream, **generate_kwargs
60
+ )
61
+
62
+ def get_stats(self):
63
+ """Get performance statistics."""
64
+ return self.client.get_performance_stats() or {}
65
+
66
+
67
+ def main(
68
+ query: Optional[str] = None,
69
+ model: Optional[str] = None,
70
+ max_tokens: int = 512,
71
+ system_prompt: Optional[str] = None,
72
+ stream: bool = True,
73
+ base_url: Optional[str] = None,
74
+ ) -> str:
75
+ """Main function to run the LLM app.
76
+
77
+ Args:
78
+ query: Query to send to the LLM
79
+ model: Model name to use
80
+ max_tokens: Maximum tokens to generate
81
+ system_prompt: Optional system prompt
82
+ stream: Whether to stream the response
83
+ base_url: Base URL for local LLM server (defaults to LEMONADE_BASE_URL env var)
84
+ """
85
+ if not query:
86
+ raise ValueError("Query is required")
87
+
88
+ app = LlmApp(system_prompt=system_prompt, base_url=base_url)
89
+ response = app.query(
90
+ prompt=query, model=model, max_tokens=max_tokens, stream=stream
91
+ )
92
+
93
+ if stream:
94
+ # Handle streaming response
95
+ full_response = ""
96
+ for chunk in response:
97
+ print(chunk, end="", flush=True)
98
+ full_response += chunk
99
+ print() # Add newline
100
+ return full_response
101
+ else:
102
+ return response
103
+
104
+
105
+ def cli_main():
106
+ """Command line interface."""
107
+ parser = argparse.ArgumentParser(description="Simple LLM App")
108
+
109
+ parser.add_argument("query", help="Query to send to the LLM")
110
+ parser.add_argument("--model", help="Model name to use")
111
+ parser.add_argument(
112
+ "--max-tokens", type=int, default=512, help="Max tokens (default: 512)"
113
+ )
114
+ parser.add_argument("--system-prompt", help="System prompt")
115
+ parser.add_argument("--stream", action="store_true", help="Stream response")
116
+ parser.add_argument("--stats", action="store_true", help="Show stats")
117
+ parser.add_argument(
118
+ "--base-url",
119
+ help="Base URL for local LLM server (defaults to LEMONADE_BASE_URL env var)",
120
+ )
121
+ parser.add_argument(
122
+ "--logging-level",
123
+ default="INFO",
124
+ choices=["DEBUG", "INFO", "WARNING", "ERROR"],
125
+ help="Logging level",
126
+ )
127
+
128
+ args = parser.parse_args()
129
+
130
+ # Setup logging
131
+ import logging
132
+
133
+ from gaia.logger import log_manager
134
+
135
+ log_manager.set_level("gaia", getattr(logging, args.logging_level))
136
+
137
+ try:
138
+ app = LlmApp(system_prompt=args.system_prompt, base_url=args.base_url)
139
+
140
+ response = app.query(
141
+ prompt=args.query,
142
+ model=args.model,
143
+ max_tokens=args.max_tokens,
144
+ stream=args.stream,
145
+ )
146
+
147
+ if args.stream:
148
+ # Already printed during streaming
149
+ pass
150
+ else:
151
+ print(f"\n{'='*50}")
152
+ print("LLM Response:")
153
+ print("=" * 50)
154
+ print(response)
155
+ print("=" * 50)
156
+
157
+ if args.stats:
158
+ stats = app.get_stats()
159
+ if stats:
160
+ print(f"\n{'='*50}")
161
+ print("Performance Statistics:")
162
+ print("=" * 50)
163
+ for key, value in stats.items():
164
+ print(f"{key}: {value}")
165
+ print("=" * 50)
166
+
167
+ except Exception as e:
168
+ print(f"Error: {e}")
169
+ sys.exit(1)
170
+
171
+
172
+ if __name__ == "__main__":
173
+ cli_main()