solace-agent-mesh 0.2.0__py3-none-any.whl → 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of solace-agent-mesh might be problematic. Click here for more details.

Files changed (51) hide show
  1. solace_agent_mesh/agents/global/actions/plantuml_diagram.py +94 -36
  2. solace_agent_mesh/agents/global/actions/plotly_graph.py +48 -22
  3. solace_agent_mesh/cli/__init__.py +1 -1
  4. solace_agent_mesh/cli/commands/add/agent.py +1 -1
  5. solace_agent_mesh/cli/commands/add/copy_from_plugin.py +9 -7
  6. solace_agent_mesh/cli/commands/add/gateway.py +2 -2
  7. solace_agent_mesh/cli/commands/build.py +15 -0
  8. solace_agent_mesh/cli/commands/init/ai_provider_step.py +45 -28
  9. solace_agent_mesh/cli/commands/init/broker_step.py +1 -4
  10. solace_agent_mesh/cli/commands/init/check_if_already_done.py +1 -1
  11. solace_agent_mesh/cli/commands/init/create_config_file_step.py +8 -0
  12. solace_agent_mesh/cli/commands/init/init.py +20 -38
  13. solace_agent_mesh/cli/commands/init/web_init_step.py +32 -0
  14. solace_agent_mesh/cli/commands/plugin/build.py +52 -10
  15. solace_agent_mesh/cli/commands/plugin/create.py +3 -3
  16. solace_agent_mesh/cli/commands/run.py +2 -2
  17. solace_agent_mesh/cli/main.py +20 -8
  18. solace_agent_mesh/common/prompt_templates.py +1 -3
  19. solace_agent_mesh/common/utils.py +88 -19
  20. solace_agent_mesh/config_portal/__init__.py +0 -0
  21. solace_agent_mesh/config_portal/backend/__init__.py +0 -0
  22. solace_agent_mesh/config_portal/backend/common.py +35 -0
  23. solace_agent_mesh/config_portal/backend/server.py +233 -0
  24. solace_agent_mesh/config_portal/frontend/static/client/Solace_community_logo.png +0 -0
  25. solace_agent_mesh/config_portal/frontend/static/client/assets/_index-b13CSm84.js +42 -0
  26. solace_agent_mesh/config_portal/frontend/static/client/assets/components-ZIfdTbrV.js +191 -0
  27. solace_agent_mesh/config_portal/frontend/static/client/assets/entry.client-DX1misIU.js +19 -0
  28. solace_agent_mesh/config_portal/frontend/static/client/assets/index-BJHAE5s4.js +17 -0
  29. solace_agent_mesh/config_portal/frontend/static/client/assets/manifest-c92a7808.js +1 -0
  30. solace_agent_mesh/config_portal/frontend/static/client/assets/root-BApq5dPK.js +10 -0
  31. solace_agent_mesh/config_portal/frontend/static/client/assets/root-DX4gQ516.css +1 -0
  32. solace_agent_mesh/config_portal/frontend/static/client/favicon.ico +0 -0
  33. solace_agent_mesh/config_portal/frontend/static/client/index.html +7 -0
  34. solace_agent_mesh/configs/orchestrator.yaml +1 -1
  35. solace_agent_mesh/orchestrator/components/orchestrator_action_manager_timeout_component.py +4 -0
  36. solace_agent_mesh/orchestrator/components/orchestrator_stimulus_processor_component.py +46 -16
  37. solace_agent_mesh/orchestrator/components/orchestrator_streaming_output_component.py +19 -5
  38. solace_agent_mesh/orchestrator/orchestrator_main.py +11 -5
  39. solace_agent_mesh/orchestrator/orchestrator_prompt.py +78 -74
  40. solace_agent_mesh/services/history_service/history_providers/sql_history_provider.py +1 -1
  41. solace_agent_mesh/services/llm_service/components/llm_request_component.py +54 -31
  42. solace_agent_mesh/templates/rest-api-default-config.yaml +4 -2
  43. solace_agent_mesh/templates/solace-agent-mesh-default.yaml +9 -0
  44. solace_agent_mesh/templates/web-default-config.yaml +4 -2
  45. solace_agent_mesh-0.2.2.dist-info/METADATA +172 -0
  46. {solace_agent_mesh-0.2.0.dist-info → solace_agent_mesh-0.2.2.dist-info}/RECORD +49 -35
  47. solace_agent_mesh/common/prompt_templates_unused_delete.py +0 -161
  48. solace_agent_mesh-0.2.0.dist-info/METADATA +0 -209
  49. {solace_agent_mesh-0.2.0.dist-info → solace_agent_mesh-0.2.2.dist-info}/WHEEL +0 -0
  50. {solace_agent_mesh-0.2.0.dist-info → solace_agent_mesh-0.2.2.dist-info}/entry_points.txt +0 -0
  51. {solace_agent_mesh-0.2.0.dist-info → solace_agent_mesh-0.2.2.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,5 @@
1
1
  from typing import Dict, Any, List
2
2
  import yaml
3
- from langchain_core.messages import HumanMessage
4
3
  from ..services.file_service import FS_PROTOCOL, Types, LLM_QUERY_OPTIONS, TRANSFORMERS
5
4
  from solace_ai_connector.common.log import log
6
5
 
@@ -9,58 +8,53 @@ MAX_SYSTEM_PROMPT_EXAMPLES = 6
9
8
 
10
9
  # Examples that should always be included in the prompt
11
10
  fixed_examples = [
12
- {
13
- "docstring": "This example shows a stimulus from a chatbot gateway in which a user is asking about the top stories on the website hacker news. The web_request is not yet open, so the change_agent_status action is invoked to open the web_request agent.",
14
- "tag_prefix_placeholder": "{tp}",
15
- "starting_id": "1",
16
- "user_input": "What is the top story on hacker news?",
17
- "metadata": [
18
- "local_time: 2024-11-06 15:58:04 EST-0500 (Wednesday)"
19
- ],
20
- "reasoning": [
21
- "- User is asking for the top story on Hacker News",
22
- "- We need to use the web_request agent to fetch the latest information",
23
- "- The web_request agent is currently closed, so we need to open it first",
24
- "- After opening the agent, we'll need to make a web request to Hacker News"
25
- ],
26
- "response_text": "",
27
- "status_update": "To get the latest top story from Hacker News, I'll need to access the web. I'm preparing to do that now.",
28
- "action": {
29
- "agent": "global",
30
- "name": "change_agent_status",
31
- "parameters": {
32
- "agent_name": "web_request",
33
- "new_state": "open"
34
- }
35
- }
36
- }
37
- ]
11
+ {
12
+ "docstring": "This example shows a stimulus from a chatbot gateway in which a user is asking about the top stories on the website hacker news. The web_request is not yet open, so the change_agent_status action is invoked to open the web_request agent.",
13
+ "tag_prefix_placeholder": "{tp}",
14
+ "starting_id": "1",
15
+ "user_input": "What is the top story on hacker news?",
16
+ "metadata": ["local_time: 2024-11-06 15:58:04 EST-0500 (Wednesday)"],
17
+ "reasoning": [
18
+ "- User is asking for the top story on Hacker News",
19
+ "- We need to use the web_request agent to fetch the latest information",
20
+ "- The web_request agent is currently closed, so we need to open it first",
21
+ "- After opening the agent, we'll need to make a web request to Hacker News",
22
+ ],
23
+ "response_text": "",
24
+ "status_update": "To get the latest top story from Hacker News, I'll need to access the web. I'm preparing to do that now.",
25
+ "action": {
26
+ "agent": "global",
27
+ "name": "change_agent_status",
28
+ "parameters": {"agent_name": "web_request", "new_state": "open"},
29
+ },
30
+ }
31
+ ]
38
32
 
39
33
 
40
34
  def get_file_handling_prompt(tp: str) -> str:
41
- parameters_desc = ""
42
- parameter_examples = ""
35
+ parameters_desc = ""
36
+ parameter_examples = ""
43
37
 
44
- for transformer in TRANSFORMERS:
45
- if transformer.description:
46
- parameters_desc += "\n" + transformer.description.strip() + "\n"
38
+ for transformer in TRANSFORMERS:
39
+ if transformer.description:
40
+ parameters_desc += "\n" + transformer.description.strip() + "\n"
47
41
 
48
- if transformer.examples:
49
- for example in transformer.examples:
50
- parameter_examples += "\n" + example.strip() + "\n"
42
+ if transformer.examples:
43
+ for example in transformer.examples:
44
+ parameter_examples += "\n" + example.strip() + "\n"
51
45
 
52
- parameters_desc = "\n ".join(parameters_desc.split("\n"))
53
- parameter_examples = "\n ".join(parameter_examples.split("\n"))
46
+ parameters_desc = "\n ".join(parameters_desc.split("\n"))
47
+ parameter_examples = "\n ".join(parameter_examples.split("\n"))
54
48
 
55
- parameters_desc = parameters_desc.replace("{tp}", tp)
56
- parameter_examples = parameter_examples.replace("{tp}", tp)
49
+ parameters_desc = parameters_desc.replace("{tp}", tp)
50
+ parameter_examples = parameter_examples.replace("{tp}", tp)
57
51
 
58
- if parameter_examples:
59
- parameter_examples = f"""
52
+ if parameter_examples:
53
+ parameter_examples = f"""
60
54
  Here are some examples of how to use the query parameters:
61
55
  {parameter_examples}"""
62
56
 
63
- prompt = f"""
57
+ prompt = f"""
64
58
  XML tags are used to represent files. The assistant will use the <{tp}file> tag to represent a file. The file tag has the following format:
65
59
  <{tp}file name="filename" mime_type="mimetype" size="size in bytes">
66
60
  <schema-yaml>...JSON schema, yaml format...</schema-yaml> (optional)
@@ -99,7 +93,7 @@ def get_file_handling_prompt(tp: str) -> str:
99
93
  For all files, there's `encoding` parameter, this is used to encode the file format. The supported values are `datauri`, `base64`, `zip`, and `gzip`. Use this to convert a file to ZIP or datauri for HTML encoding.
100
94
  For example (return a file as zip): `{FS_PROTOCOL}://c27e6908-55d5-4ce0-bc93-a8e28f84be12_annual_report.csv?encoding=zip&resolve=true`
101
95
  Example 2 (HTML tag must be datauri encoded): `<img src="{FS_PROTOCOL}://c9183b0f-fd11-48b4-ad8f-df221bff3da9_generated_image.png?encoding=datauri&resolve=true" alt="image">`
102
- Example 3 (return a regular image directly): `amfs://a1b2c3d4-5e6f-7g8h-9i0j-1k2l3m4n5o6p_photo.png?resolve=true` - When returning images directly in messaging platforms like Slack, don't use any encoding parameter
96
+ Example 3 (return a regular image directly): `{FS_PROTOCOL}://a1b2c3d4-5e6f-7g8h-9i0j-1k2l3m4n5o6p_photo.png?resolve=true` - When returning images directly in messaging platforms like Slack, don't use any encoding parameter
103
97
 
104
98
  For all files, there's `resolve=true` parameter, this is used to resolve the url to the actual data before processing.
105
99
  For example: `{FS_PROTOCOL}://577c928c-c126-42d8-8a48-020b93096110_names.csv?resolve=true`
@@ -138,7 +132,7 @@ def get_file_handling_prompt(tp: str) -> str:
138
132
 
139
133
  {parameter_examples}
140
134
  """
141
- return prompt
135
+ return prompt
142
136
 
143
137
 
144
138
  def create_examples(
@@ -155,7 +149,7 @@ def create_examples(
155
149
  """
156
150
  examples = (fixed_examples + agent_examples)[:MAX_SYSTEM_PROMPT_EXAMPLES]
157
151
  formatted_examples = format_examples_by_llm_type(examples)
158
-
152
+
159
153
  return "\n".join([example.replace("{tp}", tp) for example in formatted_examples])
160
154
 
161
155
 
@@ -190,18 +184,18 @@ def SystemPrompt(info: Dict[str, Any], action_examples: List[str]) -> str:
190
184
  handling_files = get_file_handling_prompt(tp)
191
185
 
192
186
  return f"""
193
- Note to avoid unintended collisions, all tag names in the assistant response will start with the value {tp}
187
+ Note to avoid unintended collisions, all tag names in the assistant response will start with the value `{tp}`
194
188
  <orchestrator_info>
195
189
  You are an assistant serving as the orchestrator in an AI agentic system. Your primary functions are to:
196
190
  1. Receive stimuli from external sources via the system Gateway
197
191
  2. Invoke actions within system agents to address these stimuli
198
192
  3. Formulate responses based on agent actions
199
193
 
200
- This process is iterative, with the assistant being reinvoked at each step.
194
+ This process is iterative, where the assistant is reinvoked at each step.
201
195
 
202
196
  The Stimulus represents user or application requests.
203
197
 
204
- The assistant receives a history of all gateway-orchestrator exchanges, excluding its own action invocations and reasoning.
198
+ The assistant receives a history of all gateway-orchestrator exchanges, excluding responses from agents' action invocations and reasoning. Don't use this as a guide for the responses.
205
199
 
206
200
  The assistant's behavior aligns with the system purpose specified below:
207
201
  <system_purpose>
@@ -215,33 +209,39 @@ The assistant's behavior aligns with the system purpose specified below:
215
209
  3. After opening agents, the assistant will be reinvoked with an updated list of open agents and their actions.
216
210
  4. When opening an agent, provide only a brief status update without detailed explanations.
217
211
  5. Do not perform any other actions besides opening the required agents in this step.
212
+ 6. All `{tp}` XML tags must be generated as raw text directly within the response stream, seamlessly integrated with any natural language text, as shown in the positive examples.
213
+ 7. Crucially, `{tp}` directive tags must *never* be wrapped in markdown code fences (``` ```) of any kind, including ```xml.
218
214
  - Report generation:
219
215
  1. If a report is requested and no format is specified, create the report in an HTML file.
220
- 2. Generate each section of the report independently and store it in the file service with create_file action. When finishing the report, combine the sections using amfs urls with the resolve=true query parameter to insert the sections into the main document. When inserting amfs HTML URLs into the HTML document, place them directly in the document without any surrounding tags or brackets. Here is an example of the body section of an HTML report combining multiple sections:
216
+ 2. Generate each section of the report independently and store it in the file service with create_file action. When finishing the report, combine the sections using {FS_PROTOCOL} urls with the resolve=true query parameter to insert the sections into the main document. When inserting {FS_PROTOCOL} HTML URLs into the HTML document, place them directly in the document without any surrounding tags or brackets. Here is an example of the body section of an HTML report combining multiple sections:
221
217
  <body>
222
218
  <!-- Title -->
223
219
  <h1>Report Title</h1>
224
220
 
225
221
  <!-- Section 1 -->
226
- amfs://xxxxxx.html?resolve=true
222
+ {FS_PROTOCOL}://xxxxxx.html?resolve=true
227
223
 
228
224
  <!-- Section 2 -->
229
- amfs://yyyyyy.html?resolve=true
225
+ {FS_PROTOCOL}://yyyyyy.html?resolve=true
230
226
 
231
227
  <!-- Section 3 -->
232
- amfs://zzzzzz.html?resolve=true
228
+ {FS_PROTOCOL}://zzzzzz.html?resolve=true
233
229
  </body>
234
230
  When generating HTML, create the header first with all the necessary CSS and JS links so that it is clear what css the rest of the document will use.
235
- 3. Images are always very useful in reports, so the assistant will add them when appropriate. If images are embedded in html, they must be resolved and converted to datauri format or they won't render in the final document. This can be done by using the encoding=datauri&resolve=true in the amfs link. For example, <img src="amfs://xxxxxx.png?encoding=datauri&resolve=true". The assistant will take care of the rest. Images can be created in parallel
231
+ 3. Images are always very useful in reports, so the assistant will add them when appropriate. If images are embedded in html, they must be resolved and converted to datauri format or they won't render in the final document. This can be done by using the encoding=datauri&resolve=true in the {FS_PROTOCOL} link. For example, <img src="{FS_PROTOCOL}://xxxxxx.png?encoding=datauri&resolve=true">. The assistant will take care of the rest. Images can be created in parallel
236
232
  4. During report generation in interactive sessions, the assistant will send lots of status messages to indicate what is happening.
237
233
  - Handling stimuli with open agents:
238
234
  1. Use agents' actions to break down the stimulus into smaller, manageable tasks.
239
- 2. Prioritize using available actions to fulfill the stimulus whenever possible.
240
- 3. If no suitable agents or actions are available, the assistant will:
235
+ 2. Invoke agents' actions to perform these tasks
236
+ 3. After invoking an action with the invoke action directive, finish the response and wait for the action to complete.
237
+ 4. The action will be run and the results will then be returned on a later step. NEVER guess or fill in the response without waiting for the action's response.
238
+ 5. Prioritize using available actions to fulfill the stimulus whenever possible.
239
+ 6. If no suitable agents or actions are available, the assistant will:
241
240
  a) Use its own knowledge to respond, or
242
241
  b) Ask the user for additional information, or
243
242
  c) Inform the user that it cannot fulfill the request.
244
- - The first user message contains the history of all exchanges between the gateway and the orchestrator before now. Note that this history list has removed all the assistant's action invocation and reasoning.
243
+ - The first user message contains the history of all exchanges between the gateway and the orchestrator before now. Note that this history list has removed all the agent's action invocation outputs and reasoning.
244
+ - Do not use the history as a guide for how to respond. That history is only present to provide some context to the coversation. The format and data have been modified and does not show all the assistant's directives.
245
245
  - The assistant will not guess at an answer. No answer is better than a wrong answer.
246
246
  - The assistant will invoke the actions and specify the parameters for each action, following the rules of the action. If there is not sufficient context to fill in an action parameter, the assistant will ask the user for more information.
247
247
  - After invoking the actions, the assistant will end the response and wait for the action responses. It will not guess at the answers.
@@ -253,6 +253,7 @@ The assistant's behavior aligns with the system purpose specified below:
253
253
  2. Within this tag, include:
254
254
  a) A brief list of points describing the plan and thoughts.
255
255
  b) A list of potential actions needed to fulfill the stimulus.
256
+ c) Always include a statement that the assistant will not follow invoke actions with any other output.
256
257
  3. Ensure all content is contained within the <{tp}reasoning> tag.
257
258
  4. Keep each point concise and focused.
258
259
  - For large grouped output, such as a list of items or a big code block (> 10 lines), the assistant will create a file by surrounding the output with the tags <{tp}file name="filename" mime_type="mimetype"><data> the content </data></{tp}file>. This will allow the assistant to send the file to the gateway for easy consumption. This works well for a csv file, a code file or just a big text file.
@@ -262,6 +263,7 @@ The assistant's behavior aligns with the system purpose specified below:
262
263
  - When the stimulus asks what the system can do, the assistant will open all the agents to see their details before creating a nicely formatted list describing the actions available and indicating that it can do normal chatbot things as well. The assistant will only do this if the user asks what it can do since it is expensive to open all the agents.
263
264
  - The assistant is concise and professional in its responses. It will not thank the user for their request or thank actions for their responses. It will not provide any unnecessary information in its responses.
264
265
  - The assistant will not follow invoke_actions with further comments or explanations
266
+ - After outputing the invoke action tags, the assistant will not add any additional text. It will just end the response always.
265
267
  - The assistant will distinguish between normal text and status updates. All status updates will be enclosed in <{tp}status_update/> tags.
266
268
  - Responses that are just letting the originator know that progress is being made or what the next step is should be status updates. They should be brief and to the point.
267
269
  <action_rules>
@@ -350,7 +352,7 @@ def UserStimulusPrompt(
350
352
  )
351
353
 
352
354
  prompt = (
353
- "NOTE - this history represents the conversation as seen by the user on the other side of the gateway. It does not include the assistant's invoke actions or reasoning. All of that has been removed, so don't use this history as an example for how the assistant should behave\n"
355
+ "NOTE - this history represents the conversation as seen by the user on the other side of the gateway. It does not include the responses from invoked actions or reasoning. All of that has been removed, so don't use this history as an example for how the assistant should behave\n"
354
356
  f"{gateway_history_str}\n"
355
357
  f"<{info['tag_prefix']}stimulus>\n"
356
358
  f"{stimulus}\n"
@@ -408,7 +410,7 @@ reputation is on the line.
408
410
  """
409
411
 
410
412
 
411
- def ContextQueryPrompt(query: str, context: str) -> HumanMessage:
413
+ def ContextQueryPrompt(query: str, context: str) -> str:
412
414
  return f"""
413
415
  You (orchestrator) are being asked to query, comment on or edit the following text following the originator's request.
414
416
  Do your best to give a complete and accurate answer using only the context given below. Ensure that you
@@ -433,11 +435,11 @@ you should ask the originator for more information. Include links to the source
433
435
  def format_examples_by_llm_type(examples: list, llm_type: str = "anthropic") -> list:
434
436
  """
435
437
  Render examples based on llm type
436
-
438
+
437
439
  Args:
438
440
  llm_type (str): The type of LLM to render examples for (default: "anthropic")
439
441
  examples (list): List of examples in model-agnostic format
440
-
442
+
441
443
  Returns:
442
444
  list: List of examples formatted for the specified LLM
443
445
  """
@@ -452,11 +454,12 @@ def format_examples_by_llm_type(examples: list, llm_type: str = "anthropic") ->
452
454
 
453
455
  return formatted_examples
454
456
 
457
+
455
458
  def format_example_for_anthropic(example: dict) -> str:
456
459
  """
457
460
  Format an example for the Anthropic's LLMs
458
461
  """
459
-
462
+
460
463
  tag_prefix = example.get("tag_prefix_placeholder", "t123")
461
464
  starting_id = example.get("starting_id", "1")
462
465
  docstring = example.get("docstring", "")
@@ -464,7 +467,7 @@ def format_example_for_anthropic(example: dict) -> str:
464
467
  metadata_lines = example.get("metadata", [])
465
468
  reasoning_lines = example.get("reasoning", [])
466
469
  response_text = example.get("response_text", "")
467
-
470
+
468
471
  # Start building the XML structure, add the description and user input
469
472
  xml_content = f"""<example>
470
473
  <example_docstring>
@@ -476,41 +479,41 @@ def format_example_for_anthropic(example: dict) -> str:
476
479
  </{tag_prefix}stimulus>
477
480
  <{tag_prefix}stimulus_metadata>
478
481
  """
479
-
482
+
480
483
  # Add metadata lines
481
484
  for metadata_line in metadata_lines:
482
485
  xml_content += f"{metadata_line}\n"
483
-
486
+
484
487
  xml_content += f"""</{tag_prefix}stimulus_metadata>
485
488
  </example_stimulus>
486
489
  <example_response>
487
490
  <{tag_prefix}reasoning>
488
491
  """
489
-
492
+
490
493
  # Add reasoning lines
491
494
  for reasoning_line in reasoning_lines:
492
495
  xml_content += f"{reasoning_line}\n"
493
-
496
+
494
497
  xml_content += f"""</{tag_prefix}reasoning>
495
498
  {response_text}"""
496
-
499
+
497
500
  # Add action invocation section
498
501
  if "action" in example:
499
502
  action_data = example.get("action", {})
500
503
  status_update = example.get("status_update", "")
501
504
  agent_name = action_data.get("agent", "")
502
505
  action_name = action_data.get("name", "")
503
-
506
+
504
507
  xml_content += f"""
505
508
  <{tag_prefix}status_update>{status_update}</{tag_prefix}status_update>
506
509
  <{tag_prefix}invoke_action agent="{agent_name}" action="{action_name}">"""
507
-
510
+
508
511
  # Handle parameters as dictionary
509
512
  parameter_dict = action_data.get("parameters", {})
510
513
  for param_name, param_value in parameter_dict.items():
511
514
  xml_content += f"""
512
515
  <{tag_prefix}parameter name="{param_name}">"""
513
-
516
+
514
517
  # Handle parameter names and values (as lists)
515
518
  if isinstance(param_value, list):
516
519
  for line in param_value:
@@ -519,11 +522,11 @@ def format_example_for_anthropic(example: dict) -> str:
519
522
  else:
520
523
  # For simple string values
521
524
  xml_content += f"{param_value}"
522
-
525
+
523
526
  xml_content += f"</{tag_prefix}parameter>\n"
524
-
527
+
525
528
  xml_content += f"</{tag_prefix}invoke_action>"
526
-
529
+
527
530
  # Close the XML structure
528
531
  xml_content += """
529
532
  </example_response>
@@ -532,4 +535,5 @@ def format_example_for_anthropic(example: dict) -> str:
532
535
 
533
536
  return xml_content
534
537
 
535
- LONG_TERM_MEMORY_PROMPT = " - You are capable of remembering things and have long-term memory, this happens automatically."
538
+
539
+ LONG_TERM_MEMORY_PROMPT = " - You are capable of remembering things and have long-term memory, this happens automatically."
@@ -72,7 +72,7 @@ class SQLHistoryProvider(BaseHistoryProvider):
72
72
  query = f"SELECT data FROM {self.table_name} WHERE session_id = %s"
73
73
  cursor = self.db.execute(query, (session_id,))
74
74
  row = cursor.fetchone()
75
- if not row.get("data"):
75
+ if not row or not row.get("data"):
76
76
  return {}
77
77
  data = row["data"] if isinstance(row["data"], dict) else json.loads(row["data"])
78
78
  return data
@@ -195,49 +195,72 @@ class LLMRequestComponent(ComponentBase):
195
195
  aggregate_result = ""
196
196
  current_batch = ""
197
197
  first_chunk = True
198
+ error = False
198
199
 
199
- for response_message, last_message in self.do_broker_request_response(
200
- llm_message,
201
- stream=True,
202
- streaming_complete_expression="input.payload:last_chunk",
203
- ):
204
- # Only process if the stimulus UUIDs correlate
205
- if not self._correlate_request_and_response(input_message, response_message):
206
- log.error("Mismatched request and response stimulus UUIDs: %s %s",
207
- self._get_user_propery(input_message, "stimulus_uuid"),
208
- self._get_user_propery(response_message, "stimulus_uuid"))
209
- raise ValueError("Mismatched request and response stimulus UUIDs")
210
-
211
- payload = response_message.get_payload()
212
- content = payload.get("chunk", "")
213
- aggregate_result += content
214
- current_batch += content
215
-
216
- if payload.get("handle_error", False):
217
- log.error("Error invoking LLM service: %s", payload.get("content", ""), exc_info=True)
218
- aggregate_result = payload.get("content", None)
219
- last_message = True
220
-
221
- if len(current_batch.split()) >= self.stream_batch_size or last_message:
200
+ try:
201
+ for response_message, last_message in self.do_broker_request_response(
202
+ llm_message,
203
+ stream=True,
204
+ streaming_complete_expression="input.payload:last_chunk",
205
+ ):
206
+ # Only process if the stimulus UUIDs correlate
207
+ if not self._correlate_request_and_response(input_message, response_message):
208
+ log.error("Mismatched request and response stimulus UUIDs: %s %s",
209
+ self._get_user_propery(input_message, "stimulus_uuid"),
210
+ self._get_user_propery(response_message, "stimulus_uuid"))
211
+ raise ValueError("Mismatched request and response stimulus UUIDs")
212
+
213
+ payload = response_message.get_payload()
214
+ content = payload.get("chunk", "")
215
+ aggregate_result += content
216
+ current_batch += content
217
+
218
+ if payload.get("handle_error", False):
219
+ log.error("Error invoking LLM service: %s", payload.get("content", ""), exc_info=True)
220
+ aggregate_result = payload.get("content", None)
221
+ last_message = True
222
+ error = True
223
+
224
+ if len(current_batch.split()) >= self.stream_batch_size or last_message:
225
+ self._send_streaming_chunk(
226
+ input_message,
227
+ current_batch,
228
+ aggregate_result,
229
+ response_uuid,
230
+ first_chunk,
231
+ last_message,
232
+ )
233
+ current_batch = ""
234
+ first_chunk = False
235
+
236
+ if last_message:
237
+ break
238
+ except TimeoutError as e:
239
+ log.error("Timeout error during streaming: %s", e, exc_info=True)
240
+ aggregate_result = "That request took too long to process. Try again with a more focused request or by breaking it in smaller parts."
241
+ error = True
242
+ except Exception as e:
243
+ log.error("Error during streaming: %s", e, exc_info=True)
244
+ aggregate_result = f"I apologize, but I encountered an error while processing your request: {str(e)}"
245
+ error = True
246
+ finally:
247
+ # Send a final error chunk if there was an error
248
+ if error:
222
249
  self._send_streaming_chunk(
223
250
  input_message,
224
- current_batch,
251
+ aggregate_result,
225
252
  aggregate_result,
226
253
  response_uuid,
227
254
  first_chunk,
228
- last_message,
255
+ True,
229
256
  )
230
- current_batch = ""
231
- first_chunk = False
232
-
233
- if last_message:
234
- break
235
-
257
+ # Return the final result
236
258
  return {
237
259
  "content": aggregate_result,
238
260
  "response_uuid": response_uuid,
239
261
  "streaming": True,
240
262
  "last_chunk": True,
263
+ "error": error
241
264
  }
242
265
 
243
266
  def _create_llm_message(self, message: Message, messages: list, source_info: dict) -> Message:
@@ -24,6 +24,8 @@
24
24
  local_dev : ${WEBUI_LOCAL_DEV}
25
25
 
26
26
  - response_format_prompt: &response_format_prompt >
27
- Return all responses in markdown format. When returning files or images, use the <{{tag_prefix}}file> tags.
28
- Do not return files as links or with generic <file> tags.
27
+ Use markdown formatting for text responses.
28
+ FILE HANDLING REQUIREMENTS:
29
+ - ALWAYS use <{{tag_prefix}}file> tags for files/images
30
+ - NEVER use markdown image/link syntax or generic HTML tags for files
29
31
 
@@ -16,6 +16,15 @@ solace_agent_mesh:
16
16
  # Slack agent, send messages to Slack channels
17
17
  - name: slack
18
18
  enabled: true
19
+ # Configuring the built-in services
20
+ services:
21
+ # Embedding service for vector embeddings
22
+ # If enabled, the following environment variables are required:
23
+ # - EMBEDDING_SERVICE_MODEL_NAME
24
+ # - EMBEDDING_SERVICE_API_KEY
25
+ # - EMBEDDING_SERVICE_ENDPOINT
26
+ - name: embedding
27
+ enabled: false
19
28
 
20
29
  # Directory to component yaml config files, this directory would have a sub directory for each component type
21
30
  config_directory: configs
@@ -4,5 +4,7 @@
4
4
  max_file_size: 2000 # 2GB
5
5
 
6
6
  - response_format_prompt: &response_format_prompt >
7
- Return all responses in markdown format. When returning files or images, use the <{{tag_prefix}}file> tags.
8
- Do not return files as links or with generic <file> tags.
7
+ Use markdown formatting for text responses.
8
+ FILE HANDLING REQUIREMENTS:
9
+ - ALWAYS use <{{tag_prefix}}file> tags for files/images
10
+ - NEVER use markdown image/link syntax or generic HTML tags for files