amd-gaia 0.15.0__py3-none-any.whl → 0.15.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (181) hide show
  1. {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.1.dist-info}/METADATA +223 -223
  2. amd_gaia-0.15.1.dist-info/RECORD +178 -0
  3. {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.1.dist-info}/entry_points.txt +1 -0
  4. {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.1.dist-info}/licenses/LICENSE.md +20 -20
  5. gaia/__init__.py +29 -29
  6. gaia/agents/__init__.py +19 -19
  7. gaia/agents/base/__init__.py +9 -9
  8. gaia/agents/base/agent.py +2177 -2177
  9. gaia/agents/base/api_agent.py +120 -120
  10. gaia/agents/base/console.py +1841 -1841
  11. gaia/agents/base/errors.py +237 -237
  12. gaia/agents/base/mcp_agent.py +86 -86
  13. gaia/agents/base/tools.py +83 -83
  14. gaia/agents/blender/agent.py +556 -556
  15. gaia/agents/blender/agent_simple.py +133 -135
  16. gaia/agents/blender/app.py +211 -211
  17. gaia/agents/blender/app_simple.py +41 -41
  18. gaia/agents/blender/core/__init__.py +16 -16
  19. gaia/agents/blender/core/materials.py +506 -506
  20. gaia/agents/blender/core/objects.py +316 -316
  21. gaia/agents/blender/core/rendering.py +225 -225
  22. gaia/agents/blender/core/scene.py +220 -220
  23. gaia/agents/blender/core/view.py +146 -146
  24. gaia/agents/chat/__init__.py +9 -9
  25. gaia/agents/chat/agent.py +835 -835
  26. gaia/agents/chat/app.py +1058 -1058
  27. gaia/agents/chat/session.py +508 -508
  28. gaia/agents/chat/tools/__init__.py +15 -15
  29. gaia/agents/chat/tools/file_tools.py +96 -96
  30. gaia/agents/chat/tools/rag_tools.py +1729 -1729
  31. gaia/agents/chat/tools/shell_tools.py +436 -436
  32. gaia/agents/code/__init__.py +7 -7
  33. gaia/agents/code/agent.py +549 -549
  34. gaia/agents/code/cli.py +377 -0
  35. gaia/agents/code/models.py +135 -135
  36. gaia/agents/code/orchestration/__init__.py +24 -24
  37. gaia/agents/code/orchestration/checklist_executor.py +1763 -1763
  38. gaia/agents/code/orchestration/checklist_generator.py +713 -713
  39. gaia/agents/code/orchestration/factories/__init__.py +9 -9
  40. gaia/agents/code/orchestration/factories/base.py +63 -63
  41. gaia/agents/code/orchestration/factories/nextjs_factory.py +118 -118
  42. gaia/agents/code/orchestration/factories/python_factory.py +106 -106
  43. gaia/agents/code/orchestration/orchestrator.py +841 -841
  44. gaia/agents/code/orchestration/project_analyzer.py +391 -391
  45. gaia/agents/code/orchestration/steps/__init__.py +67 -67
  46. gaia/agents/code/orchestration/steps/base.py +188 -188
  47. gaia/agents/code/orchestration/steps/error_handler.py +314 -314
  48. gaia/agents/code/orchestration/steps/nextjs.py +828 -828
  49. gaia/agents/code/orchestration/steps/python.py +307 -307
  50. gaia/agents/code/orchestration/template_catalog.py +469 -469
  51. gaia/agents/code/orchestration/workflows/__init__.py +14 -14
  52. gaia/agents/code/orchestration/workflows/base.py +80 -80
  53. gaia/agents/code/orchestration/workflows/nextjs.py +186 -186
  54. gaia/agents/code/orchestration/workflows/python.py +94 -94
  55. gaia/agents/code/prompts/__init__.py +11 -11
  56. gaia/agents/code/prompts/base_prompt.py +77 -77
  57. gaia/agents/code/prompts/code_patterns.py +2036 -2036
  58. gaia/agents/code/prompts/nextjs_prompt.py +40 -40
  59. gaia/agents/code/prompts/python_prompt.py +109 -109
  60. gaia/agents/code/schema_inference.py +365 -365
  61. gaia/agents/code/system_prompt.py +41 -41
  62. gaia/agents/code/tools/__init__.py +42 -42
  63. gaia/agents/code/tools/cli_tools.py +1138 -1138
  64. gaia/agents/code/tools/code_formatting.py +319 -319
  65. gaia/agents/code/tools/code_tools.py +769 -769
  66. gaia/agents/code/tools/error_fixing.py +1347 -1347
  67. gaia/agents/code/tools/external_tools.py +180 -180
  68. gaia/agents/code/tools/file_io.py +845 -845
  69. gaia/agents/code/tools/prisma_tools.py +190 -190
  70. gaia/agents/code/tools/project_management.py +1016 -1016
  71. gaia/agents/code/tools/testing.py +321 -321
  72. gaia/agents/code/tools/typescript_tools.py +122 -122
  73. gaia/agents/code/tools/validation_parsing.py +461 -461
  74. gaia/agents/code/tools/validation_tools.py +806 -806
  75. gaia/agents/code/tools/web_dev_tools.py +1758 -1758
  76. gaia/agents/code/validators/__init__.py +16 -16
  77. gaia/agents/code/validators/antipattern_checker.py +241 -241
  78. gaia/agents/code/validators/ast_analyzer.py +197 -197
  79. gaia/agents/code/validators/requirements_validator.py +145 -145
  80. gaia/agents/code/validators/syntax_validator.py +171 -171
  81. gaia/agents/docker/__init__.py +7 -7
  82. gaia/agents/docker/agent.py +642 -642
  83. gaia/agents/emr/__init__.py +8 -8
  84. gaia/agents/emr/agent.py +1506 -1506
  85. gaia/agents/emr/cli.py +1322 -1322
  86. gaia/agents/emr/constants.py +475 -475
  87. gaia/agents/emr/dashboard/__init__.py +4 -4
  88. gaia/agents/emr/dashboard/server.py +1974 -1974
  89. gaia/agents/jira/__init__.py +11 -11
  90. gaia/agents/jira/agent.py +894 -894
  91. gaia/agents/jira/jql_templates.py +299 -299
  92. gaia/agents/routing/__init__.py +7 -7
  93. gaia/agents/routing/agent.py +567 -570
  94. gaia/agents/routing/system_prompt.py +75 -75
  95. gaia/agents/summarize/__init__.py +11 -0
  96. gaia/agents/summarize/agent.py +885 -0
  97. gaia/agents/summarize/prompts.py +129 -0
  98. gaia/api/__init__.py +23 -23
  99. gaia/api/agent_registry.py +238 -238
  100. gaia/api/app.py +305 -305
  101. gaia/api/openai_server.py +575 -575
  102. gaia/api/schemas.py +186 -186
  103. gaia/api/sse_handler.py +373 -373
  104. gaia/apps/__init__.py +4 -4
  105. gaia/apps/llm/__init__.py +6 -6
  106. gaia/apps/llm/app.py +173 -169
  107. gaia/apps/summarize/app.py +116 -633
  108. gaia/apps/summarize/html_viewer.py +133 -133
  109. gaia/apps/summarize/pdf_formatter.py +284 -284
  110. gaia/audio/__init__.py +2 -2
  111. gaia/audio/audio_client.py +439 -439
  112. gaia/audio/audio_recorder.py +269 -269
  113. gaia/audio/kokoro_tts.py +599 -599
  114. gaia/audio/whisper_asr.py +432 -432
  115. gaia/chat/__init__.py +16 -16
  116. gaia/chat/app.py +430 -430
  117. gaia/chat/prompts.py +522 -522
  118. gaia/chat/sdk.py +1228 -1225
  119. gaia/cli.py +5481 -5632
  120. gaia/database/__init__.py +10 -10
  121. gaia/database/agent.py +176 -176
  122. gaia/database/mixin.py +290 -290
  123. gaia/database/testing.py +64 -64
  124. gaia/eval/batch_experiment.py +2332 -2332
  125. gaia/eval/claude.py +542 -542
  126. gaia/eval/config.py +37 -37
  127. gaia/eval/email_generator.py +512 -512
  128. gaia/eval/eval.py +3179 -3179
  129. gaia/eval/groundtruth.py +1130 -1130
  130. gaia/eval/transcript_generator.py +582 -582
  131. gaia/eval/webapp/README.md +167 -167
  132. gaia/eval/webapp/package-lock.json +875 -875
  133. gaia/eval/webapp/package.json +20 -20
  134. gaia/eval/webapp/public/app.js +3402 -3402
  135. gaia/eval/webapp/public/index.html +87 -87
  136. gaia/eval/webapp/public/styles.css +3661 -3661
  137. gaia/eval/webapp/server.js +415 -415
  138. gaia/eval/webapp/test-setup.js +72 -72
  139. gaia/llm/__init__.py +9 -2
  140. gaia/llm/base_client.py +60 -0
  141. gaia/llm/exceptions.py +12 -0
  142. gaia/llm/factory.py +70 -0
  143. gaia/llm/lemonade_client.py +3236 -3221
  144. gaia/llm/lemonade_manager.py +294 -294
  145. gaia/llm/providers/__init__.py +9 -0
  146. gaia/llm/providers/claude.py +108 -0
  147. gaia/llm/providers/lemonade.py +120 -0
  148. gaia/llm/providers/openai_provider.py +79 -0
  149. gaia/llm/vlm_client.py +382 -382
  150. gaia/logger.py +189 -189
  151. gaia/mcp/agent_mcp_server.py +245 -245
  152. gaia/mcp/blender_mcp_client.py +138 -138
  153. gaia/mcp/blender_mcp_server.py +648 -648
  154. gaia/mcp/context7_cache.py +332 -332
  155. gaia/mcp/external_services.py +518 -518
  156. gaia/mcp/mcp_bridge.py +811 -550
  157. gaia/mcp/servers/__init__.py +6 -6
  158. gaia/mcp/servers/docker_mcp.py +83 -83
  159. gaia/perf_analysis.py +361 -0
  160. gaia/rag/__init__.py +10 -10
  161. gaia/rag/app.py +293 -293
  162. gaia/rag/demo.py +304 -304
  163. gaia/rag/pdf_utils.py +235 -235
  164. gaia/rag/sdk.py +2194 -2194
  165. gaia/security.py +163 -163
  166. gaia/talk/app.py +289 -289
  167. gaia/talk/sdk.py +538 -538
  168. gaia/testing/__init__.py +87 -87
  169. gaia/testing/assertions.py +330 -330
  170. gaia/testing/fixtures.py +333 -333
  171. gaia/testing/mocks.py +493 -493
  172. gaia/util.py +46 -46
  173. gaia/utils/__init__.py +33 -33
  174. gaia/utils/file_watcher.py +675 -675
  175. gaia/utils/parsing.py +223 -223
  176. gaia/version.py +100 -100
  177. amd_gaia-0.15.0.dist-info/RECORD +0 -168
  178. gaia/agents/code/app.py +0 -266
  179. gaia/llm/llm_client.py +0 -723
  180. {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.1.dist-info}/WHEEL +0 -0
  181. {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.1.dist-info}/top_level.txt +0 -0
gaia/chat/prompts.py CHANGED
@@ -1,522 +1,522 @@
1
- # Copyright(C) 2025-2026 Advanced Micro Devices, Inc. All rights reserved.
2
- # SPDX-License-Identifier: MIT
3
-
4
- from gaia.logger import get_logger
5
-
6
-
7
- class Prompts:
8
- log = get_logger(__name__)
9
-
10
- # Define model-specific formatting templates
11
- prompt_formats = {
12
- "llama3": {
13
- "system": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n{system_message}<|eot_id|>",
14
- "user": "<|start_header_id|>user<|end_header_id|>\n{content}<|eot_id|>",
15
- "assistant": "<|start_header_id|>assistant<|end_header_id|>\n{content}",
16
- },
17
- "mistral": {
18
- "system": "<s>[INST] {system_message}\n\n",
19
- "user": "{content}",
20
- "assistant": " [/INST] {content}</s>",
21
- },
22
- "qwen": {
23
- "system": "<|im_start|>system\n{system_message}<|im_end|>",
24
- "user": "<|im_start|>user\n{content}<|im_end|>",
25
- "assistant": "<|im_start|>assistant\n{content}<|im_end|>",
26
- },
27
- "phi3": {
28
- "system": "<|user|>{system_message}\n",
29
- "user": "<|user|>{content}<|end|>",
30
- "assistant": "<|assistant|>{content}<|end|>",
31
- "assistant_prefix": "<|assistant|>",
32
- },
33
- "llama2": {
34
- "system": "<s>[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n",
35
- "chat_entry": "{content}", # Content will include [/INST] and </s><s>[INST] formatting
36
- "assistant_prefix": " [/INST] ",
37
- },
38
- "chatglm": {
39
- "system": "<|system|>\n{system_message}\n",
40
- "user": "<|user|>\n{content}\n",
41
- "assistant": "<|assistant|>\n{content}\n",
42
- "observation": "<|observation|>\n{content}\n", # For external return results
43
- },
44
- "gemma": {
45
- "system": "<start_of_turn>system\n{system_message}<end_of_turn>\n",
46
- "user": "<start_of_turn>user\n{content}<end_of_turn>\n",
47
- "assistant": "<start_of_turn>assistant\n{content}<end_of_turn>\n",
48
- },
49
- "deepseek": {
50
- "system": "{system_message}\n",
51
- "user": "<|User|>{content}\n",
52
- "assistant": "<|Assistant|>{content}\n",
53
- },
54
- "gpt-oss": {
55
- "system": "<|start|>system<|channel|>main<|message|>{system_message}<|end|>",
56
- "user": "<|start|>user<|channel|>main<|message|>{content}<|end|>",
57
- "assistant": "<|start|>assistant<|channel|>final<|message|>{content}<|end|>",
58
- "assistant_prefix": "<|start|>assistant<|channel|>final<|message|>",
59
- },
60
- "lfm2": {
61
- "system": "<|startoftext|><|im_start|>system\n{system_message}<|im_end|>",
62
- "user": "<|im_start|>user\n{content}<|im_end|>",
63
- "assistant": "<|im_start|>assistant\n{content}<|im_end|>",
64
- "assistant_prefix": "<|im_start|>assistant\n",
65
- },
66
- "default": {
67
- "system": "{system_message}\n",
68
- "user": "User: {content}\n",
69
- "assistant": "Assistant: {content}\n",
70
- "chat_entry": "{role}: {content}\n",
71
- "assistant_prefix": "Assistant: ",
72
- },
73
- # Add other model formats here...
74
- }
75
-
76
- system_messages = {
77
- "llama3": "You are a helpful AI assistant. You provide clear, accurate, and technically-sound responses while maintaining a friendly demeanor.",
78
- "phi3": "You are a helpful AI assistant. You provide clear, accurate, and technically-sound responses while maintaining a friendly demeanor.",
79
- "chatglm": "You are ChatGLM3, a large language model trained by Zhipu.AI. Follow the user's instructions carefully. Respond using markdown.",
80
- "gemma": "You are Gemma, a helpful AI assistant. You provide clear, accurate, and technically-sound responses while maintaining a friendly demeanor.",
81
- "deepseek": "You are DeepSeek R1, a large language model trained by DeepSeek. You provide clear, accurate, and technically-sound responses while maintaining a friendly demeanor.",
82
- "qwen": "You are Qwen, a helpful AI assistant. You provide clear, accurate, and technically-sound responses while maintaining a friendly demeanor.",
83
- "lfm2": "You are a helpful assistant trained by Liquid AI.",
84
- "default": "You are a helpful AI assistant. You provide clear, accurate, and technically-sound responses while maintaining a friendly demeanor.",
85
- # Add other system messages here...
86
- }
87
-
88
- @staticmethod
89
- def format_chat_history(
90
- model: str,
91
- chat_history: list,
92
- assistant_name: str = "assistant",
93
- system_prompt: str = None,
94
- ) -> str:
95
- """Format the chat history according to the model's requirements."""
96
- matched_model = Prompts.match_model_name(model)
97
- format_template = Prompts.prompt_formats.get(matched_model)
98
- Prompts.log.debug(
99
- f"model:{model}, matched_model: {matched_model}, format_template:\n{format_template}"
100
- )
101
-
102
- if not format_template:
103
- raise ValueError(f"No format template found for model {matched_model}")
104
-
105
- # Start with the system message - use custom system_prompt if provided, otherwise use default with assistant_name
106
- if system_prompt:
107
- system_msg = system_prompt
108
- else:
109
- base_msg = Prompts.system_messages.get(
110
- matched_model, "You are a helpful assistant."
111
- )
112
- # Incorporate assistant_name into the default system message if it's not "assistant"
113
- if assistant_name != "assistant":
114
- system_msg = base_msg.replace(
115
- "helpful AI assistant",
116
- f"helpful AI assistant named {assistant_name}",
117
- )
118
- system_msg = system_msg.replace(
119
- "You are a helpful assistant",
120
- f"You are {assistant_name}, a helpful assistant",
121
- )
122
- # Handle specific model names
123
- if matched_model == "chatglm":
124
- system_msg = system_msg.replace(
125
- "You are ChatGLM3", f"You are {assistant_name} (ChatGLM3)"
126
- )
127
- elif matched_model == "gemma":
128
- system_msg = system_msg.replace(
129
- "You are Gemma", f"You are {assistant_name} (Gemma)"
130
- )
131
- elif matched_model == "deepseek":
132
- system_msg = system_msg.replace(
133
- "You are DeepSeek R1", f"You are {assistant_name} (DeepSeek R1)"
134
- )
135
- elif matched_model == "qwen":
136
- system_msg = system_msg.replace(
137
- "You are Qwen", f"You are {assistant_name} (Qwen)"
138
- )
139
- elif matched_model == "lfm2":
140
- system_msg = system_msg.replace(
141
- "You are a helpful assistant",
142
- f"You are {assistant_name}, a helpful assistant",
143
- )
144
- else:
145
- system_msg = base_msg
146
-
147
- formatted_prompt = format_template["system"].format(system_message=system_msg)
148
-
149
- # Create dynamic prefixes
150
- user_prefix = "user: "
151
- assistant_prefix = f"{assistant_name}: "
152
-
153
- if matched_model == "gemma":
154
- for entry in chat_history:
155
- if entry.startswith(user_prefix):
156
- content = entry[len(user_prefix) :]
157
- formatted_prompt += format_template["user"].format(content=content)
158
- elif entry.startswith(assistant_prefix):
159
- content = entry[len(assistant_prefix) :]
160
- formatted_prompt += format_template["assistant"].format(
161
- content=content
162
- )
163
- formatted_prompt += (
164
- "<end_of_turn>\n" # Add end token after assistant responses
165
- )
166
-
167
- # Add the assistant prefix if the last message was from user
168
- if chat_history and chat_history[-1].startswith(user_prefix):
169
- formatted_prompt += format_template["assistant"].format(content="")
170
-
171
- return formatted_prompt
172
-
173
- elif matched_model == "llama3":
174
- for i, entry in enumerate(chat_history):
175
- if entry.startswith(user_prefix):
176
- content = entry[len(user_prefix) :]
177
- formatted_prompt += format_template["user"].format(content=content)
178
- elif entry.startswith(assistant_prefix):
179
- content = entry[len(assistant_prefix) :]
180
- formatted_prompt += (
181
- format_template["assistant"].format(content=content)
182
- + "<|eot_id|>"
183
- )
184
-
185
- if chat_history and chat_history[-1].startswith(user_prefix):
186
- formatted_prompt += format_template["assistant"].format(content="")
187
-
188
- return formatted_prompt
189
-
190
- elif matched_model == "mistral":
191
- for i, entry in enumerate(chat_history):
192
- if entry.startswith(user_prefix):
193
- content = entry[len(user_prefix) :]
194
- if i > 0: # Add new instruction block for all but first message
195
- formatted_prompt += "<s>[INST] "
196
- formatted_prompt += format_template["user"].format(content=content)
197
- elif entry.startswith(assistant_prefix):
198
- content = entry[len(assistant_prefix) :]
199
- formatted_prompt += format_template["assistant"].format(
200
- content=content
201
- )
202
-
203
- # Add final [INST] block if last message was from user
204
- if chat_history and chat_history[-1].startswith(user_prefix):
205
- formatted_prompt += " [/INST]"
206
-
207
- return formatted_prompt
208
-
209
- elif matched_model == "qwen":
210
- for entry in chat_history:
211
- if entry.startswith(user_prefix):
212
- content = entry[len(user_prefix) :]
213
- formatted_prompt += format_template["user"].format(content=content)
214
- elif entry.startswith(assistant_prefix):
215
- content = entry[len(assistant_prefix) :]
216
- formatted_prompt += format_template["assistant"].format(
217
- content=content
218
- )
219
-
220
- # Add the final assistant token for the next response
221
- if chat_history and chat_history[-1].startswith(user_prefix):
222
- formatted_prompt += "<|im_start|>assistant\n"
223
-
224
- return formatted_prompt
225
-
226
- elif matched_model == "llama2":
227
- # Start with system message
228
- formatted_prompt = format_template["system"].format(
229
- system_message=system_msg
230
- )
231
-
232
- for i, entry in enumerate(chat_history):
233
- if entry.startswith(user_prefix):
234
- content = entry[len(user_prefix) :]
235
- if i > 0: # Not the first message
236
- formatted_prompt += "</s><s>[INST] "
237
- formatted_prompt += content
238
- elif entry.startswith(assistant_prefix):
239
- content = entry[len(assistant_prefix) :]
240
- formatted_prompt += " [/INST] " + content
241
-
242
- # Add final [/INST] if last message was from user
243
- if chat_history and chat_history[-1].startswith(user_prefix):
244
- formatted_prompt += " [/INST]"
245
-
246
- return formatted_prompt
247
-
248
- elif matched_model == "chatglm":
249
- # Start with system message
250
- formatted_prompt = format_template["system"].format(
251
- system_message=system_msg
252
- )
253
-
254
- for entry in chat_history:
255
- if entry.startswith(user_prefix):
256
- content = entry[len(user_prefix) :]
257
- formatted_prompt += format_template["user"].format(content=content)
258
- elif entry.startswith(assistant_prefix):
259
- content = entry[len(assistant_prefix) :]
260
- formatted_prompt += format_template["assistant"].format(
261
- content=content
262
- )
263
- elif entry.startswith(
264
- "observation: "
265
- ): # Add support for observation messages
266
- content = entry[12:]
267
- formatted_prompt += format_template["observation"].format(
268
- content=content
269
- )
270
-
271
- # Add the assistant prefix if the last message was from user
272
- if chat_history and chat_history[-1].startswith(user_prefix):
273
- formatted_prompt += "<|assistant|>\n"
274
-
275
- return formatted_prompt
276
-
277
- elif matched_model == "lfm2":
278
- # LFM2 format - similar to qwen but with different tags and startoftext
279
- for entry in chat_history:
280
- if entry.startswith(user_prefix):
281
- content = entry[len(user_prefix) :]
282
- formatted_prompt += format_template["user"].format(content=content)
283
- elif entry.startswith(assistant_prefix):
284
- content = entry[len(assistant_prefix) :]
285
- formatted_prompt += format_template["assistant"].format(
286
- content=content
287
- )
288
-
289
- # Add the assistant prefix if the last message was from user
290
- if chat_history and chat_history[-1].startswith(user_prefix):
291
- formatted_prompt += format_template["assistant_prefix"]
292
-
293
- return formatted_prompt
294
-
295
- # Standard handling for other models
296
- for entry in chat_history:
297
- if entry.startswith(user_prefix):
298
- role, content = "user", entry[len(user_prefix) :]
299
- elif entry.startswith(assistant_prefix):
300
- role, content = "assistant", entry[len(assistant_prefix) :]
301
- else:
302
- continue
303
-
304
- # Use the role-specific format template for all models
305
- formatted_prompt += format_template[role].format(content=content)
306
-
307
- # Add the assistant prefix for the next response if it exists
308
- if (
309
- "assistant_prefix" in format_template
310
- and chat_history
311
- and chat_history[-1].startswith(user_prefix)
312
- ):
313
- formatted_prompt += format_template["assistant_prefix"]
314
- # If no assistant_prefix but we need to add assistant marker
315
- elif chat_history and chat_history[-1].startswith(user_prefix):
316
- if "assistant" in format_template:
317
- formatted_prompt += format_template["assistant"].format(content="")
318
-
319
- return formatted_prompt
320
-
321
- @staticmethod
322
- def match_model_name(model: str) -> str:
323
- """Match a model path/name to its corresponding prompt type."""
324
- Prompts.log.debug(f"Matching model name: {model}")
325
- model = model.lower() # Convert to lowercase for case-insensitive matching
326
-
327
- if any(x in model for x in ["phi-3", "phi3"]):
328
- return "phi3"
329
- elif "gemma" in model:
330
- return "gemma"
331
- elif any(x in model for x in ["llama3", "llama-3", "llama3.2", "llama-3.2"]):
332
- return "llama3"
333
- elif "mistral" in model:
334
- return "mistral"
335
- elif "qwen" in model:
336
- return "qwen"
337
- elif "chatglm" in model:
338
- return "chatglm"
339
- elif "deepseek" in model:
340
- return "deepseek"
341
- elif "gpt-oss" in model or "gptoss" in model:
342
- return "gpt-oss"
343
- elif any(x in model for x in ["lfm2", "lfm-2", "lfm_2", "liquid", "liquidai"]):
344
- return "lfm2"
345
- else:
346
- Prompts.log.warning(
347
- f"No specific format found for model {model}, using default format"
348
- )
349
- return "default"
350
-
351
- @classmethod
352
- def get_system_prompt(
353
- cls,
354
- model: str,
355
- chat_history: list[str],
356
- assistant_name: str = "assistant",
357
- system_prompt: str = None,
358
- ) -> str:
359
- """Get the formatted system prompt for the given model and chat history."""
360
- return cls.format_chat_history(
361
- model, chat_history, assistant_name, system_prompt
362
- )
363
-
364
-
365
- def main():
366
- """Test different prompt formats with sample conversations."""
367
- # Sample conversation
368
- chat_history = [
369
- "user: Hello, how are you?",
370
- "assistant: I'm doing well, thank you! How can I help you today?",
371
- "user: What's the weather like?",
372
- ]
373
-
374
- # Test cases for different models
375
- test_models = [
376
- "amd/Phi-3-mini-4k-instruct-awq-g128-int4-asym-fp32-onnx-ryzen-strix",
377
- "amd/Llama-2-7b-hf-awq-g128-int4-asym-fp32-onnx-ryzen-strix",
378
- "meta-llama/Meta-Llama-3-8B",
379
- "amd/Mistral-7B-Instruct-v0.3-awq-g128-int4-asym-fp32-onnx-ryzen-strix",
380
- ]
381
-
382
- for model in test_models:
383
- print(f"\n{'='*80}")
384
- print(f"Testing model: {model}")
385
- formatted_prompt = Prompts.get_system_prompt(model, chat_history)
386
- print(f"Matched as: {formatted_prompt}")
387
- print(f"{'='*80}\n")
388
-
389
- try:
390
- formatted_prompt = Prompts.get_system_prompt(model, chat_history)
391
- print("Formatted prompt:")
392
- print("-" * 40)
393
- print(formatted_prompt)
394
- print("-" * 40)
395
- except ValueError as e:
396
- print(f"Error: {e}")
397
-
398
-
399
- def test_llama2_format():
400
- """Specific test for Llama 2 format."""
401
- model = "meta-llama/Llama-2-7b-chat-hf"
402
- chat_history = [
403
- "user: What is Python?",
404
- "assistant: Python is a high-level programming language known for its simplicity and readability.",
405
- "user: Can you show me a simple example?",
406
- ]
407
-
408
- print("\nTesting Llama 2 Format:")
409
- print("=" * 60)
410
- formatted = Prompts.get_system_prompt(model, chat_history)
411
- print(formatted)
412
-
413
-
414
- def test_llama3_format():
415
- """Specific test for Llama 3 format."""
416
- model = "meta-llama/Meta-Llama-3-8B"
417
- chat_history = [
418
- "user: Explain what an API is.",
419
- "assistant: An API (Application Programming Interface) is a set of rules and protocols that allows different software applications to communicate with each other.",
420
- "user: Give me an example.",
421
- ]
422
-
423
- print("\nTesting Llama 3 Format:")
424
- print("=" * 60)
425
- formatted = Prompts.get_system_prompt(model, chat_history)
426
- print(formatted)
427
-
428
-
429
- def test_qwen_format():
430
- """Specific test for Qwen format."""
431
- model = "amd/Qwen1.5-7B-Chat-awq-g128-int4-asym-fp32-onnx-ryzen-strix"
432
- chat_history = [
433
- "user: What is Python?",
434
- "assistant: Python is a high-level programming language known for its simplicity and readability.",
435
- "user: Show me an example.",
436
- ]
437
-
438
- print("\nTesting Qwen Format:")
439
- print("=" * 60)
440
- formatted = Prompts.get_system_prompt(model, chat_history)
441
- print(formatted)
442
-
443
-
444
- def test_chatglm_format():
445
- """Specific test for ChatGLM format."""
446
- model = "THUDM/chatglm3-6b"
447
- chat_history = [
448
- "user: What's the weather like?",
449
- "assistant: Let me check the weather for you.",
450
- "observation: Current temperature is 72°F, sunny with light clouds",
451
- "assistant: Based on the current data, it's a pleasant day with 72°F temperature and partly cloudy skies.",
452
- "user: Thank you!",
453
- ]
454
-
455
- print("\nTesting ChatGLM Format:")
456
- print("=" * 60)
457
- formatted = Prompts.get_system_prompt(model, chat_history)
458
- print(formatted)
459
-
460
-
461
- def test_llama32_format():
462
- """Specific test for Llama 3.2 format."""
463
- model = "Llama-3.2-3B-Instruct-Hybrid"
464
- chat_history = [
465
- "user: Hello, how are you?",
466
- "assistant: I'm doing well, thank you! How can I help you today?",
467
- "user: What's the weather like?",
468
- ]
469
-
470
- print("\nTesting Llama 3.2 Format:")
471
- print("=" * 60)
472
- matched_model = Prompts.match_model_name(model)
473
- print(f"Model: {model}")
474
- print(f"Matched as: {matched_model}")
475
- print("-" * 60)
476
- formatted = Prompts.get_system_prompt(model, chat_history)
477
- print("Formatted prompt:")
478
- print(formatted)
479
-
480
-
481
- def test_lfm2_format():
482
- """Specific test for LFM2 (Liquid Foundation Model 2) format."""
483
- model = "liquid/lfm2-1b"
484
- chat_history = [
485
- "user: What is C. elegans?",
486
- "assistant: It's a tiny nematode that lives in temperate soil environments.",
487
- "user: Tell me more about it.",
488
- ]
489
-
490
- print("\nTesting LFM2 Format:")
491
- print("=" * 60)
492
- matched_model = Prompts.match_model_name(model)
493
- print(f"Model: {model}")
494
- print(f"Matched as: {matched_model}")
495
- print("-" * 60)
496
- formatted = Prompts.get_system_prompt(model, chat_history)
497
- print("Formatted prompt:")
498
- print(formatted)
499
- print("-" * 60)
500
-
501
- # Also test with empty history (just system prompt)
502
- print("\nLFM2 with empty history (system prompt only):")
503
- formatted_empty = Prompts.get_system_prompt(model, [])
504
- print(formatted_empty)
505
- print("-" * 60)
506
-
507
- # Test with single user message
508
- print("\nLFM2 with single user message:")
509
- single_msg = ["user: Hello!"]
510
- formatted_single = Prompts.get_system_prompt(model, single_msg)
511
- print(formatted_single)
512
-
513
-
514
- if __name__ == "__main__":
515
- # Run all tests
516
- main()
517
- test_llama2_format()
518
- test_llama3_format()
519
- test_qwen_format()
520
- test_chatglm_format()
521
- test_llama32_format()
522
- test_lfm2_format()
1
+ # Copyright(C) 2025-2026 Advanced Micro Devices, Inc. All rights reserved.
2
+ # SPDX-License-Identifier: MIT
3
+
4
+ from gaia.logger import get_logger
5
+
6
+
7
+ class Prompts:
8
+ log = get_logger(__name__)
9
+
10
+ # Define model-specific formatting templates
11
+ prompt_formats = {
12
+ "llama3": {
13
+ "system": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n{system_message}<|eot_id|>",
14
+ "user": "<|start_header_id|>user<|end_header_id|>\n{content}<|eot_id|>",
15
+ "assistant": "<|start_header_id|>assistant<|end_header_id|>\n{content}",
16
+ },
17
+ "mistral": {
18
+ "system": "<s>[INST] {system_message}\n\n",
19
+ "user": "{content}",
20
+ "assistant": " [/INST] {content}</s>",
21
+ },
22
+ "qwen": {
23
+ "system": "<|im_start|>system\n{system_message}<|im_end|>",
24
+ "user": "<|im_start|>user\n{content}<|im_end|>",
25
+ "assistant": "<|im_start|>assistant\n{content}<|im_end|>",
26
+ },
27
+ "phi3": {
28
+ "system": "<|user|>{system_message}\n",
29
+ "user": "<|user|>{content}<|end|>",
30
+ "assistant": "<|assistant|>{content}<|end|>",
31
+ "assistant_prefix": "<|assistant|>",
32
+ },
33
+ "llama2": {
34
+ "system": "<s>[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n",
35
+ "chat_entry": "{content}", # Content will include [/INST] and </s><s>[INST] formatting
36
+ "assistant_prefix": " [/INST] ",
37
+ },
38
+ "chatglm": {
39
+ "system": "<|system|>\n{system_message}\n",
40
+ "user": "<|user|>\n{content}\n",
41
+ "assistant": "<|assistant|>\n{content}\n",
42
+ "observation": "<|observation|>\n{content}\n", # For external return results
43
+ },
44
+ "gemma": {
45
+ "system": "<start_of_turn>system\n{system_message}<end_of_turn>\n",
46
+ "user": "<start_of_turn>user\n{content}<end_of_turn>\n",
47
+ "assistant": "<start_of_turn>assistant\n{content}<end_of_turn>\n",
48
+ },
49
+ "deepseek": {
50
+ "system": "{system_message}\n",
51
+ "user": "<|User|>{content}\n",
52
+ "assistant": "<|Assistant|>{content}\n",
53
+ },
54
+ "gpt-oss": {
55
+ "system": "<|start|>system<|channel|>main<|message|>{system_message}<|end|>",
56
+ "user": "<|start|>user<|channel|>main<|message|>{content}<|end|>",
57
+ "assistant": "<|start|>assistant<|channel|>final<|message|>{content}<|end|>",
58
+ "assistant_prefix": "<|start|>assistant<|channel|>final<|message|>",
59
+ },
60
+ "lfm2": {
61
+ "system": "<|startoftext|><|im_start|>system\n{system_message}<|im_end|>",
62
+ "user": "<|im_start|>user\n{content}<|im_end|>",
63
+ "assistant": "<|im_start|>assistant\n{content}<|im_end|>",
64
+ "assistant_prefix": "<|im_start|>assistant\n",
65
+ },
66
+ "default": {
67
+ "system": "{system_message}\n",
68
+ "user": "User: {content}\n",
69
+ "assistant": "Assistant: {content}\n",
70
+ "chat_entry": "{role}: {content}\n",
71
+ "assistant_prefix": "Assistant: ",
72
+ },
73
+ # Add other model formats here...
74
+ }
75
+
76
+ system_messages = {
77
+ "llama3": "You are a helpful AI assistant. You provide clear, accurate, and technically-sound responses while maintaining a friendly demeanor.",
78
+ "phi3": "You are a helpful AI assistant. You provide clear, accurate, and technically-sound responses while maintaining a friendly demeanor.",
79
+ "chatglm": "You are ChatGLM3, a large language model trained by Zhipu.AI. Follow the user's instructions carefully. Respond using markdown.",
80
+ "gemma": "You are Gemma, a helpful AI assistant. You provide clear, accurate, and technically-sound responses while maintaining a friendly demeanor.",
81
+ "deepseek": "You are DeepSeek R1, a large language model trained by DeepSeek. You provide clear, accurate, and technically-sound responses while maintaining a friendly demeanor.",
82
+ "qwen": "You are Qwen, a helpful AI assistant. You provide clear, accurate, and technically-sound responses while maintaining a friendly demeanor.",
83
+ "lfm2": "You are a helpful assistant trained by Liquid AI.",
84
+ "default": "You are a helpful AI assistant. You provide clear, accurate, and technically-sound responses while maintaining a friendly demeanor.",
85
+ # Add other system messages here...
86
+ }
87
+
88
+ @staticmethod
89
+ def format_chat_history(
90
+ model: str,
91
+ chat_history: list,
92
+ assistant_name: str = "assistant",
93
+ system_prompt: str = None,
94
+ ) -> str:
95
+ """Format the chat history according to the model's requirements."""
96
+ matched_model = Prompts.match_model_name(model)
97
+ format_template = Prompts.prompt_formats.get(matched_model)
98
+ Prompts.log.debug(
99
+ f"model:{model}, matched_model: {matched_model}, format_template:\n{format_template}"
100
+ )
101
+
102
+ if not format_template:
103
+ raise ValueError(f"No format template found for model {matched_model}")
104
+
105
+ # Start with the system message - use custom system_prompt if provided, otherwise use default with assistant_name
106
+ if system_prompt:
107
+ system_msg = system_prompt
108
+ else:
109
+ base_msg = Prompts.system_messages.get(
110
+ matched_model, "You are a helpful assistant."
111
+ )
112
+ # Incorporate assistant_name into the default system message if it's not "assistant"
113
+ if assistant_name != "assistant":
114
+ system_msg = base_msg.replace(
115
+ "helpful AI assistant",
116
+ f"helpful AI assistant named {assistant_name}",
117
+ )
118
+ system_msg = system_msg.replace(
119
+ "You are a helpful assistant",
120
+ f"You are {assistant_name}, a helpful assistant",
121
+ )
122
+ # Handle specific model names
123
+ if matched_model == "chatglm":
124
+ system_msg = system_msg.replace(
125
+ "You are ChatGLM3", f"You are {assistant_name} (ChatGLM3)"
126
+ )
127
+ elif matched_model == "gemma":
128
+ system_msg = system_msg.replace(
129
+ "You are Gemma", f"You are {assistant_name} (Gemma)"
130
+ )
131
+ elif matched_model == "deepseek":
132
+ system_msg = system_msg.replace(
133
+ "You are DeepSeek R1", f"You are {assistant_name} (DeepSeek R1)"
134
+ )
135
+ elif matched_model == "qwen":
136
+ system_msg = system_msg.replace(
137
+ "You are Qwen", f"You are {assistant_name} (Qwen)"
138
+ )
139
+ elif matched_model == "lfm2":
140
+ system_msg = system_msg.replace(
141
+ "You are a helpful assistant",
142
+ f"You are {assistant_name}, a helpful assistant",
143
+ )
144
+ else:
145
+ system_msg = base_msg
146
+
147
+ formatted_prompt = format_template["system"].format(system_message=system_msg)
148
+
149
+ # Create dynamic prefixes
150
+ user_prefix = "user: "
151
+ assistant_prefix = f"{assistant_name}: "
152
+
153
+ if matched_model == "gemma":
154
+ for entry in chat_history:
155
+ if entry.startswith(user_prefix):
156
+ content = entry[len(user_prefix) :]
157
+ formatted_prompt += format_template["user"].format(content=content)
158
+ elif entry.startswith(assistant_prefix):
159
+ content = entry[len(assistant_prefix) :]
160
+ formatted_prompt += format_template["assistant"].format(
161
+ content=content
162
+ )
163
+ formatted_prompt += (
164
+ "<end_of_turn>\n" # Add end token after assistant responses
165
+ )
166
+
167
+ # Add the assistant prefix if the last message was from user
168
+ if chat_history and chat_history[-1].startswith(user_prefix):
169
+ formatted_prompt += format_template["assistant"].format(content="")
170
+
171
+ return formatted_prompt
172
+
173
+ elif matched_model == "llama3":
174
+ for i, entry in enumerate(chat_history):
175
+ if entry.startswith(user_prefix):
176
+ content = entry[len(user_prefix) :]
177
+ formatted_prompt += format_template["user"].format(content=content)
178
+ elif entry.startswith(assistant_prefix):
179
+ content = entry[len(assistant_prefix) :]
180
+ formatted_prompt += (
181
+ format_template["assistant"].format(content=content)
182
+ + "<|eot_id|>"
183
+ )
184
+
185
+ if chat_history and chat_history[-1].startswith(user_prefix):
186
+ formatted_prompt += format_template["assistant"].format(content="")
187
+
188
+ return formatted_prompt
189
+
190
+ elif matched_model == "mistral":
191
+ for i, entry in enumerate(chat_history):
192
+ if entry.startswith(user_prefix):
193
+ content = entry[len(user_prefix) :]
194
+ if i > 0: # Add new instruction block for all but first message
195
+ formatted_prompt += "<s>[INST] "
196
+ formatted_prompt += format_template["user"].format(content=content)
197
+ elif entry.startswith(assistant_prefix):
198
+ content = entry[len(assistant_prefix) :]
199
+ formatted_prompt += format_template["assistant"].format(
200
+ content=content
201
+ )
202
+
203
+ # Add final [INST] block if last message was from user
204
+ if chat_history and chat_history[-1].startswith(user_prefix):
205
+ formatted_prompt += " [/INST]"
206
+
207
+ return formatted_prompt
208
+
209
+ elif matched_model == "qwen":
210
+ for entry in chat_history:
211
+ if entry.startswith(user_prefix):
212
+ content = entry[len(user_prefix) :]
213
+ formatted_prompt += format_template["user"].format(content=content)
214
+ elif entry.startswith(assistant_prefix):
215
+ content = entry[len(assistant_prefix) :]
216
+ formatted_prompt += format_template["assistant"].format(
217
+ content=content
218
+ )
219
+
220
+ # Add the final assistant token for the next response
221
+ if chat_history and chat_history[-1].startswith(user_prefix):
222
+ formatted_prompt += "<|im_start|>assistant\n"
223
+
224
+ return formatted_prompt
225
+
226
+ elif matched_model == "llama2":
227
+ # Start with system message
228
+ formatted_prompt = format_template["system"].format(
229
+ system_message=system_msg
230
+ )
231
+
232
+ for i, entry in enumerate(chat_history):
233
+ if entry.startswith(user_prefix):
234
+ content = entry[len(user_prefix) :]
235
+ if i > 0: # Not the first message
236
+ formatted_prompt += "</s><s>[INST] "
237
+ formatted_prompt += content
238
+ elif entry.startswith(assistant_prefix):
239
+ content = entry[len(assistant_prefix) :]
240
+ formatted_prompt += " [/INST] " + content
241
+
242
+ # Add final [/INST] if last message was from user
243
+ if chat_history and chat_history[-1].startswith(user_prefix):
244
+ formatted_prompt += " [/INST]"
245
+
246
+ return formatted_prompt
247
+
248
+ elif matched_model == "chatglm":
249
+ # Start with system message
250
+ formatted_prompt = format_template["system"].format(
251
+ system_message=system_msg
252
+ )
253
+
254
+ for entry in chat_history:
255
+ if entry.startswith(user_prefix):
256
+ content = entry[len(user_prefix) :]
257
+ formatted_prompt += format_template["user"].format(content=content)
258
+ elif entry.startswith(assistant_prefix):
259
+ content = entry[len(assistant_prefix) :]
260
+ formatted_prompt += format_template["assistant"].format(
261
+ content=content
262
+ )
263
+ elif entry.startswith(
264
+ "observation: "
265
+ ): # Add support for observation messages
266
+ content = entry[12:]
267
+ formatted_prompt += format_template["observation"].format(
268
+ content=content
269
+ )
270
+
271
+ # Add the assistant prefix if the last message was from user
272
+ if chat_history and chat_history[-1].startswith(user_prefix):
273
+ formatted_prompt += "<|assistant|>\n"
274
+
275
+ return formatted_prompt
276
+
277
+ elif matched_model == "lfm2":
278
+ # LFM2 format - similar to qwen but with different tags and startoftext
279
+ for entry in chat_history:
280
+ if entry.startswith(user_prefix):
281
+ content = entry[len(user_prefix) :]
282
+ formatted_prompt += format_template["user"].format(content=content)
283
+ elif entry.startswith(assistant_prefix):
284
+ content = entry[len(assistant_prefix) :]
285
+ formatted_prompt += format_template["assistant"].format(
286
+ content=content
287
+ )
288
+
289
+ # Add the assistant prefix if the last message was from user
290
+ if chat_history and chat_history[-1].startswith(user_prefix):
291
+ formatted_prompt += format_template["assistant_prefix"]
292
+
293
+ return formatted_prompt
294
+
295
+ # Standard handling for other models
296
+ for entry in chat_history:
297
+ if entry.startswith(user_prefix):
298
+ role, content = "user", entry[len(user_prefix) :]
299
+ elif entry.startswith(assistant_prefix):
300
+ role, content = "assistant", entry[len(assistant_prefix) :]
301
+ else:
302
+ continue
303
+
304
+ # Use the role-specific format template for all models
305
+ formatted_prompt += format_template[role].format(content=content)
306
+
307
+ # Add the assistant prefix for the next response if it exists
308
+ if (
309
+ "assistant_prefix" in format_template
310
+ and chat_history
311
+ and chat_history[-1].startswith(user_prefix)
312
+ ):
313
+ formatted_prompt += format_template["assistant_prefix"]
314
+ # If no assistant_prefix but we need to add assistant marker
315
+ elif chat_history and chat_history[-1].startswith(user_prefix):
316
+ if "assistant" in format_template:
317
+ formatted_prompt += format_template["assistant"].format(content="")
318
+
319
+ return formatted_prompt
320
+
321
+ @staticmethod
322
+ def match_model_name(model: str) -> str:
323
+ """Match a model path/name to its corresponding prompt type."""
324
+ Prompts.log.debug(f"Matching model name: {model}")
325
+ model = model.lower() # Convert to lowercase for case-insensitive matching
326
+
327
+ if any(x in model for x in ["phi-3", "phi3"]):
328
+ return "phi3"
329
+ elif "gemma" in model:
330
+ return "gemma"
331
+ elif any(x in model for x in ["llama3", "llama-3", "llama3.2", "llama-3.2"]):
332
+ return "llama3"
333
+ elif "mistral" in model:
334
+ return "mistral"
335
+ elif "qwen" in model:
336
+ return "qwen"
337
+ elif "chatglm" in model:
338
+ return "chatglm"
339
+ elif "deepseek" in model:
340
+ return "deepseek"
341
+ elif "gpt-oss" in model or "gptoss" in model:
342
+ return "gpt-oss"
343
+ elif any(x in model for x in ["lfm2", "lfm-2", "lfm_2", "liquid", "liquidai"]):
344
+ return "lfm2"
345
+ else:
346
+ Prompts.log.warning(
347
+ f"No specific format found for model {model}, using default format"
348
+ )
349
+ return "default"
350
+
351
+ @classmethod
352
+ def get_system_prompt(
353
+ cls,
354
+ model: str,
355
+ chat_history: list[str],
356
+ assistant_name: str = "assistant",
357
+ system_prompt: str = None,
358
+ ) -> str:
359
+ """Get the formatted system prompt for the given model and chat history."""
360
+ return cls.format_chat_history(
361
+ model, chat_history, assistant_name, system_prompt
362
+ )
363
+
364
+
365
+ def main():
366
+ """Test different prompt formats with sample conversations."""
367
+ # Sample conversation
368
+ chat_history = [
369
+ "user: Hello, how are you?",
370
+ "assistant: I'm doing well, thank you! How can I help you today?",
371
+ "user: What's the weather like?",
372
+ ]
373
+
374
+ # Test cases for different models
375
+ test_models = [
376
+ "amd/Phi-3-mini-4k-instruct-awq-g128-int4-asym-fp32-onnx-ryzen-strix",
377
+ "amd/Llama-2-7b-hf-awq-g128-int4-asym-fp32-onnx-ryzen-strix",
378
+ "meta-llama/Meta-Llama-3-8B",
379
+ "amd/Mistral-7B-Instruct-v0.3-awq-g128-int4-asym-fp32-onnx-ryzen-strix",
380
+ ]
381
+
382
+ for model in test_models:
383
+ print(f"\n{'='*80}")
384
+ print(f"Testing model: {model}")
385
+ formatted_prompt = Prompts.get_system_prompt(model, chat_history)
386
+ print(f"Matched as: {formatted_prompt}")
387
+ print(f"{'='*80}\n")
388
+
389
+ try:
390
+ formatted_prompt = Prompts.get_system_prompt(model, chat_history)
391
+ print("Formatted prompt:")
392
+ print("-" * 40)
393
+ print(formatted_prompt)
394
+ print("-" * 40)
395
+ except ValueError as e:
396
+ print(f"Error: {e}")
397
+
398
+
399
+ def test_llama2_format():
400
+ """Specific test for Llama 2 format."""
401
+ model = "meta-llama/Llama-2-7b-chat-hf"
402
+ chat_history = [
403
+ "user: What is Python?",
404
+ "assistant: Python is a high-level programming language known for its simplicity and readability.",
405
+ "user: Can you show me a simple example?",
406
+ ]
407
+
408
+ print("\nTesting Llama 2 Format:")
409
+ print("=" * 60)
410
+ formatted = Prompts.get_system_prompt(model, chat_history)
411
+ print(formatted)
412
+
413
+
414
+ def test_llama3_format():
415
+ """Specific test for Llama 3 format."""
416
+ model = "meta-llama/Meta-Llama-3-8B"
417
+ chat_history = [
418
+ "user: Explain what an API is.",
419
+ "assistant: An API (Application Programming Interface) is a set of rules and protocols that allows different software applications to communicate with each other.",
420
+ "user: Give me an example.",
421
+ ]
422
+
423
+ print("\nTesting Llama 3 Format:")
424
+ print("=" * 60)
425
+ formatted = Prompts.get_system_prompt(model, chat_history)
426
+ print(formatted)
427
+
428
+
429
+ def test_qwen_format():
430
+ """Specific test for Qwen format."""
431
+ model = "amd/Qwen1.5-7B-Chat-awq-g128-int4-asym-fp32-onnx-ryzen-strix"
432
+ chat_history = [
433
+ "user: What is Python?",
434
+ "assistant: Python is a high-level programming language known for its simplicity and readability.",
435
+ "user: Show me an example.",
436
+ ]
437
+
438
+ print("\nTesting Qwen Format:")
439
+ print("=" * 60)
440
+ formatted = Prompts.get_system_prompt(model, chat_history)
441
+ print(formatted)
442
+
443
+
444
+ def test_chatglm_format():
445
+ """Specific test for ChatGLM format."""
446
+ model = "THUDM/chatglm3-6b"
447
+ chat_history = [
448
+ "user: What's the weather like?",
449
+ "assistant: Let me check the weather for you.",
450
+ "observation: Current temperature is 72°F, sunny with light clouds",
451
+ "assistant: Based on the current data, it's a pleasant day with 72°F temperature and partly cloudy skies.",
452
+ "user: Thank you!",
453
+ ]
454
+
455
+ print("\nTesting ChatGLM Format:")
456
+ print("=" * 60)
457
+ formatted = Prompts.get_system_prompt(model, chat_history)
458
+ print(formatted)
459
+
460
+
461
+ def test_llama32_format():
462
+ """Specific test for Llama 3.2 format."""
463
+ model = "Llama-3.2-3B-Instruct-Hybrid"
464
+ chat_history = [
465
+ "user: Hello, how are you?",
466
+ "assistant: I'm doing well, thank you! How can I help you today?",
467
+ "user: What's the weather like?",
468
+ ]
469
+
470
+ print("\nTesting Llama 3.2 Format:")
471
+ print("=" * 60)
472
+ matched_model = Prompts.match_model_name(model)
473
+ print(f"Model: {model}")
474
+ print(f"Matched as: {matched_model}")
475
+ print("-" * 60)
476
+ formatted = Prompts.get_system_prompt(model, chat_history)
477
+ print("Formatted prompt:")
478
+ print(formatted)
479
+
480
+
481
+ def test_lfm2_format():
482
+ """Specific test for LFM2 (Liquid Foundation Model 2) format."""
483
+ model = "liquid/lfm2-1b"
484
+ chat_history = [
485
+ "user: What is C. elegans?",
486
+ "assistant: It's a tiny nematode that lives in temperate soil environments.",
487
+ "user: Tell me more about it.",
488
+ ]
489
+
490
+ print("\nTesting LFM2 Format:")
491
+ print("=" * 60)
492
+ matched_model = Prompts.match_model_name(model)
493
+ print(f"Model: {model}")
494
+ print(f"Matched as: {matched_model}")
495
+ print("-" * 60)
496
+ formatted = Prompts.get_system_prompt(model, chat_history)
497
+ print("Formatted prompt:")
498
+ print(formatted)
499
+ print("-" * 60)
500
+
501
+ # Also test with empty history (just system prompt)
502
+ print("\nLFM2 with empty history (system prompt only):")
503
+ formatted_empty = Prompts.get_system_prompt(model, [])
504
+ print(formatted_empty)
505
+ print("-" * 60)
506
+
507
+ # Test with single user message
508
+ print("\nLFM2 with single user message:")
509
+ single_msg = ["user: Hello!"]
510
+ formatted_single = Prompts.get_system_prompt(model, single_msg)
511
+ print(formatted_single)
512
+
513
+
514
+ if __name__ == "__main__":
515
+ # Run all tests
516
+ main()
517
+ test_llama2_format()
518
+ test_llama3_format()
519
+ test_qwen_format()
520
+ test_chatglm_format()
521
+ test_llama32_format()
522
+ test_lfm2_format()