solace-agent-mesh 0.2.0__py3-none-any.whl → 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of solace-agent-mesh might be problematic. Click here for more details.
- solace_agent_mesh/agents/global/actions/plotly_graph.py +48 -22
- solace_agent_mesh/cli/__init__.py +1 -1
- solace_agent_mesh/cli/commands/add/copy_from_plugin.py +8 -6
- solace_agent_mesh/cli/commands/build.py +15 -0
- solace_agent_mesh/cli/commands/init/ai_provider_step.py +45 -28
- solace_agent_mesh/cli/commands/init/broker_step.py +1 -4
- solace_agent_mesh/cli/commands/init/create_config_file_step.py +8 -0
- solace_agent_mesh/cli/commands/init/init.py +50 -37
- solace_agent_mesh/cli/commands/plugin/build.py +52 -10
- solace_agent_mesh/cli/commands/run.py +2 -2
- solace_agent_mesh/cli/main.py +14 -8
- solace_agent_mesh/common/prompt_templates.py +1 -3
- solace_agent_mesh/common/utils.py +88 -19
- solace_agent_mesh/config_portal/__init__.py +0 -0
- solace_agent_mesh/config_portal/backend/__init__.py +0 -0
- solace_agent_mesh/config_portal/backend/common.py +35 -0
- solace_agent_mesh/config_portal/backend/server.py +233 -0
- solace_agent_mesh/config_portal/frontend/static/client/assets/_index-DRPGOzHj.js +42 -0
- solace_agent_mesh/config_portal/frontend/static/client/assets/components-ZIfdTbrV.js +191 -0
- solace_agent_mesh/config_portal/frontend/static/client/assets/entry.client-DX1misIU.js +19 -0
- solace_agent_mesh/config_portal/frontend/static/client/assets/index-BJHAE5s4.js +17 -0
- solace_agent_mesh/config_portal/frontend/static/client/assets/manifest-8147e469.js +1 -0
- solace_agent_mesh/config_portal/frontend/static/client/assets/root-DgMDqKDc.js +10 -0
- solace_agent_mesh/config_portal/frontend/static/client/assets/root-hhS5izs8.css +1 -0
- solace_agent_mesh/config_portal/frontend/static/client/favicon.ico +0 -0
- solace_agent_mesh/config_portal/frontend/static/client/index.html +7 -0
- solace_agent_mesh/configs/orchestrator.yaml +1 -1
- solace_agent_mesh/orchestrator/components/orchestrator_action_manager_timeout_component.py +4 -0
- solace_agent_mesh/orchestrator/components/orchestrator_stimulus_processor_component.py +28 -15
- solace_agent_mesh/orchestrator/components/orchestrator_streaming_output_component.py +19 -5
- solace_agent_mesh/orchestrator/orchestrator_main.py +11 -5
- solace_agent_mesh/orchestrator/orchestrator_prompt.py +78 -74
- solace_agent_mesh/templates/solace-agent-mesh-default.yaml +9 -0
- solace_agent_mesh-0.2.1.dist-info/METADATA +172 -0
- {solace_agent_mesh-0.2.0.dist-info → solace_agent_mesh-0.2.1.dist-info}/RECORD +38 -26
- solace_agent_mesh/common/prompt_templates_unused_delete.py +0 -161
- solace_agent_mesh-0.2.0.dist-info/METADATA +0 -209
- {solace_agent_mesh-0.2.0.dist-info → solace_agent_mesh-0.2.1.dist-info}/WHEEL +0 -0
- {solace_agent_mesh-0.2.0.dist-info → solace_agent_mesh-0.2.1.dist-info}/entry_points.txt +0 -0
- {solace_agent_mesh-0.2.0.dist-info → solace_agent_mesh-0.2.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
from typing import Dict, Any, List
|
|
2
2
|
import yaml
|
|
3
|
-
from langchain_core.messages import HumanMessage
|
|
4
3
|
from ..services.file_service import FS_PROTOCOL, Types, LLM_QUERY_OPTIONS, TRANSFORMERS
|
|
5
4
|
from solace_ai_connector.common.log import log
|
|
6
5
|
|
|
@@ -9,58 +8,53 @@ MAX_SYSTEM_PROMPT_EXAMPLES = 6
|
|
|
9
8
|
|
|
10
9
|
# Examples that should always be included in the prompt
|
|
11
10
|
fixed_examples = [
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
"new_state": "open"
|
|
34
|
-
}
|
|
35
|
-
}
|
|
36
|
-
}
|
|
37
|
-
]
|
|
11
|
+
{
|
|
12
|
+
"docstring": "This example shows a stimulus from a chatbot gateway in which a user is asking about the top stories on the website hacker news. The web_request is not yet open, so the change_agent_status action is invoked to open the web_request agent.",
|
|
13
|
+
"tag_prefix_placeholder": "{tp}",
|
|
14
|
+
"starting_id": "1",
|
|
15
|
+
"user_input": "What is the top story on hacker news?",
|
|
16
|
+
"metadata": ["local_time: 2024-11-06 15:58:04 EST-0500 (Wednesday)"],
|
|
17
|
+
"reasoning": [
|
|
18
|
+
"- User is asking for the top story on Hacker News",
|
|
19
|
+
"- We need to use the web_request agent to fetch the latest information",
|
|
20
|
+
"- The web_request agent is currently closed, so we need to open it first",
|
|
21
|
+
"- After opening the agent, we'll need to make a web request to Hacker News",
|
|
22
|
+
],
|
|
23
|
+
"response_text": "",
|
|
24
|
+
"status_update": "To get the latest top story from Hacker News, I'll need to access the web. I'm preparing to do that now.",
|
|
25
|
+
"action": {
|
|
26
|
+
"agent": "global",
|
|
27
|
+
"name": "change_agent_status",
|
|
28
|
+
"parameters": {"agent_name": "web_request", "new_state": "open"},
|
|
29
|
+
},
|
|
30
|
+
}
|
|
31
|
+
]
|
|
38
32
|
|
|
39
33
|
|
|
40
34
|
def get_file_handling_prompt(tp: str) -> str:
|
|
41
|
-
|
|
42
|
-
|
|
35
|
+
parameters_desc = ""
|
|
36
|
+
parameter_examples = ""
|
|
43
37
|
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
38
|
+
for transformer in TRANSFORMERS:
|
|
39
|
+
if transformer.description:
|
|
40
|
+
parameters_desc += "\n" + transformer.description.strip() + "\n"
|
|
47
41
|
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
42
|
+
if transformer.examples:
|
|
43
|
+
for example in transformer.examples:
|
|
44
|
+
parameter_examples += "\n" + example.strip() + "\n"
|
|
51
45
|
|
|
52
|
-
|
|
53
|
-
|
|
46
|
+
parameters_desc = "\n ".join(parameters_desc.split("\n"))
|
|
47
|
+
parameter_examples = "\n ".join(parameter_examples.split("\n"))
|
|
54
48
|
|
|
55
|
-
|
|
56
|
-
|
|
49
|
+
parameters_desc = parameters_desc.replace("{tp}", tp)
|
|
50
|
+
parameter_examples = parameter_examples.replace("{tp}", tp)
|
|
57
51
|
|
|
58
|
-
|
|
59
|
-
|
|
52
|
+
if parameter_examples:
|
|
53
|
+
parameter_examples = f"""
|
|
60
54
|
Here are some examples of how to use the query parameters:
|
|
61
55
|
{parameter_examples}"""
|
|
62
56
|
|
|
63
|
-
|
|
57
|
+
prompt = f"""
|
|
64
58
|
XML tags are used to represent files. The assistant will use the <{tp}file> tag to represent a file. The file tag has the following format:
|
|
65
59
|
<{tp}file name="filename" mime_type="mimetype" size="size in bytes">
|
|
66
60
|
<schema-yaml>...JSON schema, yaml format...</schema-yaml> (optional)
|
|
@@ -99,7 +93,7 @@ def get_file_handling_prompt(tp: str) -> str:
|
|
|
99
93
|
For all files, there's `encoding` parameter, this is used to encode the file format. The supported values are `datauri`, `base64`, `zip`, and `gzip`. Use this to convert a file to ZIP or datauri for HTML encoding.
|
|
100
94
|
For example (return a file as zip): `{FS_PROTOCOL}://c27e6908-55d5-4ce0-bc93-a8e28f84be12_annual_report.csv?encoding=zip&resolve=true`
|
|
101
95
|
Example 2 (HTML tag must be datauri encoded): `<img src="{FS_PROTOCOL}://c9183b0f-fd11-48b4-ad8f-df221bff3da9_generated_image.png?encoding=datauri&resolve=true" alt="image">`
|
|
102
|
-
Example 3 (return a regular image directly): `
|
|
96
|
+
Example 3 (return a regular image directly): `{FS_PROTOCOL}://a1b2c3d4-5e6f-7g8h-9i0j-1k2l3m4n5o6p_photo.png?resolve=true` - When returning images directly in messaging platforms like Slack, don't use any encoding parameter
|
|
103
97
|
|
|
104
98
|
For all files, there's `resolve=true` parameter, this is used to resolve the url to the actual data before processing.
|
|
105
99
|
For example: `{FS_PROTOCOL}://577c928c-c126-42d8-8a48-020b93096110_names.csv?resolve=true`
|
|
@@ -138,7 +132,7 @@ def get_file_handling_prompt(tp: str) -> str:
|
|
|
138
132
|
|
|
139
133
|
{parameter_examples}
|
|
140
134
|
"""
|
|
141
|
-
|
|
135
|
+
return prompt
|
|
142
136
|
|
|
143
137
|
|
|
144
138
|
def create_examples(
|
|
@@ -155,7 +149,7 @@ def create_examples(
|
|
|
155
149
|
"""
|
|
156
150
|
examples = (fixed_examples + agent_examples)[:MAX_SYSTEM_PROMPT_EXAMPLES]
|
|
157
151
|
formatted_examples = format_examples_by_llm_type(examples)
|
|
158
|
-
|
|
152
|
+
|
|
159
153
|
return "\n".join([example.replace("{tp}", tp) for example in formatted_examples])
|
|
160
154
|
|
|
161
155
|
|
|
@@ -190,18 +184,18 @@ def SystemPrompt(info: Dict[str, Any], action_examples: List[str]) -> str:
|
|
|
190
184
|
handling_files = get_file_handling_prompt(tp)
|
|
191
185
|
|
|
192
186
|
return f"""
|
|
193
|
-
Note to avoid unintended collisions, all tag names in the assistant response will start with the value {tp}
|
|
187
|
+
Note to avoid unintended collisions, all tag names in the assistant response will start with the value `{tp}`
|
|
194
188
|
<orchestrator_info>
|
|
195
189
|
You are an assistant serving as the orchestrator in an AI agentic system. Your primary functions are to:
|
|
196
190
|
1. Receive stimuli from external sources via the system Gateway
|
|
197
191
|
2. Invoke actions within system agents to address these stimuli
|
|
198
192
|
3. Formulate responses based on agent actions
|
|
199
193
|
|
|
200
|
-
This process is iterative,
|
|
194
|
+
This process is iterative, where the assistant is reinvoked at each step.
|
|
201
195
|
|
|
202
196
|
The Stimulus represents user or application requests.
|
|
203
197
|
|
|
204
|
-
The assistant receives a history of all gateway-orchestrator exchanges, excluding
|
|
198
|
+
The assistant receives a history of all gateway-orchestrator exchanges, excluding responses from agents' action invocations and reasoning. Don't use this as a guide for the responses.
|
|
205
199
|
|
|
206
200
|
The assistant's behavior aligns with the system purpose specified below:
|
|
207
201
|
<system_purpose>
|
|
@@ -215,33 +209,39 @@ The assistant's behavior aligns with the system purpose specified below:
|
|
|
215
209
|
3. After opening agents, the assistant will be reinvoked with an updated list of open agents and their actions.
|
|
216
210
|
4. When opening an agent, provide only a brief status update without detailed explanations.
|
|
217
211
|
5. Do not perform any other actions besides opening the required agents in this step.
|
|
212
|
+
6. All `{tp}` XML tags must be generated as raw text directly within the response stream, seamlessly integrated with any natural language text, as shown in the positive examples.
|
|
213
|
+
7. Crucially, `{tp}` directive tags must *never* be wrapped in markdown code fences (``` ```) of any kind, including ```xml.
|
|
218
214
|
- Report generation:
|
|
219
215
|
1. If a report is requested and no format is specified, create the report in an HTML file.
|
|
220
|
-
2. Generate each section of the report independently and store it in the file service with create_file action. When finishing the report, combine the sections using
|
|
216
|
+
2. Generate each section of the report independently and store it in the file service with create_file action. When finishing the report, combine the sections using {FS_PROTOCOL} urls with the resolve=true query parameter to insert the sections into the main document. When inserting {FS_PROTOCOL} HTML URLs into the HTML document, place them directly in the document without any surrounding tags or brackets. Here is an example of the body section of an HTML report combining multiple sections:
|
|
221
217
|
<body>
|
|
222
218
|
<!-- Title -->
|
|
223
219
|
<h1>Report Title</h1>
|
|
224
220
|
|
|
225
221
|
<!-- Section 1 -->
|
|
226
|
-
|
|
222
|
+
{FS_PROTOCOL}://xxxxxx.html?resolve=true
|
|
227
223
|
|
|
228
224
|
<!-- Section 2 -->
|
|
229
|
-
|
|
225
|
+
{FS_PROTOCOL}://yyyyyy.html?resolve=true
|
|
230
226
|
|
|
231
227
|
<!-- Section 3 -->
|
|
232
|
-
|
|
228
|
+
{FS_PROTOCOL}://zzzzzz.html?resolve=true
|
|
233
229
|
</body>
|
|
234
230
|
When generating HTML, create the header first with all the necessary CSS and JS links so that it is clear what css the rest of the document will use.
|
|
235
|
-
3. Images are always very useful in reports, so the assistant will add them when appropriate. If images are embedded in html, they must be resolved and converted to datauri format or they won't render in the final document. This can be done by using the encoding=datauri&resolve=true in the
|
|
231
|
+
3. Images are always very useful in reports, so the assistant will add them when appropriate. If images are embedded in html, they must be resolved and converted to datauri format or they won't render in the final document. This can be done by using the encoding=datauri&resolve=true in the {FS_PROTOCOL} link. For example, <img src="{FS_PROTOCOL}://xxxxxx.png?encoding=datauri&resolve=true">. The assistant will take care of the rest. Images can be created in parallel
|
|
236
232
|
4. During report generation in interactive sessions, the assistant will send lots of status messages to indicate what is happening.
|
|
237
233
|
- Handling stimuli with open agents:
|
|
238
234
|
1. Use agents' actions to break down the stimulus into smaller, manageable tasks.
|
|
239
|
-
2.
|
|
240
|
-
3.
|
|
235
|
+
2. Invoke agents' actions to perform these tasks
|
|
236
|
+
3. After invoking an action with the invoke action directive, finish the response and wait for the action to complete.
|
|
237
|
+
4. The action will be run and the results will then be returned on a later step. NEVER guess or fill in the response without waiting for the action's response.
|
|
238
|
+
5. Prioritize using available actions to fulfill the stimulus whenever possible.
|
|
239
|
+
6. If no suitable agents or actions are available, the assistant will:
|
|
241
240
|
a) Use its own knowledge to respond, or
|
|
242
241
|
b) Ask the user for additional information, or
|
|
243
242
|
c) Inform the user that it cannot fulfill the request.
|
|
244
|
-
- The first user message contains the history of all exchanges between the gateway and the orchestrator before now. Note that this history list has removed all the
|
|
243
|
+
- The first user message contains the history of all exchanges between the gateway and the orchestrator before now. Note that this history list has removed all the agent's action invocation outputs and reasoning.
|
|
244
|
+
- Do not use the history as a guide for how to respond. That history is only present to provide some context to the coversation. The format and data have been modified and does not show all the assistant's directives.
|
|
245
245
|
- The assistant will not guess at an answer. No answer is better than a wrong answer.
|
|
246
246
|
- The assistant will invoke the actions and specify the parameters for each action, following the rules of the action. If there is not sufficient context to fill in an action parameter, the assistant will ask the user for more information.
|
|
247
247
|
- After invoking the actions, the assistant will end the response and wait for the action responses. It will not guess at the answers.
|
|
@@ -253,6 +253,7 @@ The assistant's behavior aligns with the system purpose specified below:
|
|
|
253
253
|
2. Within this tag, include:
|
|
254
254
|
a) A brief list of points describing the plan and thoughts.
|
|
255
255
|
b) A list of potential actions needed to fulfill the stimulus.
|
|
256
|
+
c) Always include a statement that the assistant will not follow invoke actions with any other output.
|
|
256
257
|
3. Ensure all content is contained within the <{tp}reasoning> tag.
|
|
257
258
|
4. Keep each point concise and focused.
|
|
258
259
|
- For large grouped output, such as a list of items or a big code block (> 10 lines), the assistant will create a file by surrounding the output with the tags <{tp}file name="filename" mime_type="mimetype"><data> the content </data></{tp}file>. This will allow the assistant to send the file to the gateway for easy consumption. This works well for a csv file, a code file or just a big text file.
|
|
@@ -262,6 +263,7 @@ The assistant's behavior aligns with the system purpose specified below:
|
|
|
262
263
|
- When the stimulus asks what the system can do, the assistant will open all the agents to see their details before creating a nicely formatted list describing the actions available and indicating that it can do normal chatbot things as well. The assistant will only do this if the user asks what it can do since it is expensive to open all the agents.
|
|
263
264
|
- The assistant is concise and professional in its responses. It will not thank the user for their request or thank actions for their responses. It will not provide any unnecessary information in its responses.
|
|
264
265
|
- The assistant will not follow invoke_actions with further comments or explanations
|
|
266
|
+
- After outputing the invoke action tags, the assistant will not add any additional text. It will just end the response always.
|
|
265
267
|
- The assistant will distinguish between normal text and status updates. All status updates will be enclosed in <{tp}status_update/> tags.
|
|
266
268
|
- Responses that are just letting the originator know that progress is being made or what the next step is should be status updates. They should be brief and to the point.
|
|
267
269
|
<action_rules>
|
|
@@ -350,7 +352,7 @@ def UserStimulusPrompt(
|
|
|
350
352
|
)
|
|
351
353
|
|
|
352
354
|
prompt = (
|
|
353
|
-
"NOTE - this history represents the conversation as seen by the user on the other side of the gateway. It does not include the
|
|
355
|
+
"NOTE - this history represents the conversation as seen by the user on the other side of the gateway. It does not include the responses from invoked actions or reasoning. All of that has been removed, so don't use this history as an example for how the assistant should behave\n"
|
|
354
356
|
f"{gateway_history_str}\n"
|
|
355
357
|
f"<{info['tag_prefix']}stimulus>\n"
|
|
356
358
|
f"{stimulus}\n"
|
|
@@ -408,7 +410,7 @@ reputation is on the line.
|
|
|
408
410
|
"""
|
|
409
411
|
|
|
410
412
|
|
|
411
|
-
def ContextQueryPrompt(query: str, context: str) ->
|
|
413
|
+
def ContextQueryPrompt(query: str, context: str) -> str:
|
|
412
414
|
return f"""
|
|
413
415
|
You (orchestrator) are being asked to query, comment on or edit the following text following the originator's request.
|
|
414
416
|
Do your best to give a complete and accurate answer using only the context given below. Ensure that you
|
|
@@ -433,11 +435,11 @@ you should ask the originator for more information. Include links to the source
|
|
|
433
435
|
def format_examples_by_llm_type(examples: list, llm_type: str = "anthropic") -> list:
|
|
434
436
|
"""
|
|
435
437
|
Render examples based on llm type
|
|
436
|
-
|
|
438
|
+
|
|
437
439
|
Args:
|
|
438
440
|
llm_type (str): The type of LLM to render examples for (default: "anthropic")
|
|
439
441
|
examples (list): List of examples in model-agnostic format
|
|
440
|
-
|
|
442
|
+
|
|
441
443
|
Returns:
|
|
442
444
|
list: List of examples formatted for the specified LLM
|
|
443
445
|
"""
|
|
@@ -452,11 +454,12 @@ def format_examples_by_llm_type(examples: list, llm_type: str = "anthropic") ->
|
|
|
452
454
|
|
|
453
455
|
return formatted_examples
|
|
454
456
|
|
|
457
|
+
|
|
455
458
|
def format_example_for_anthropic(example: dict) -> str:
|
|
456
459
|
"""
|
|
457
460
|
Format an example for the Anthropic's LLMs
|
|
458
461
|
"""
|
|
459
|
-
|
|
462
|
+
|
|
460
463
|
tag_prefix = example.get("tag_prefix_placeholder", "t123")
|
|
461
464
|
starting_id = example.get("starting_id", "1")
|
|
462
465
|
docstring = example.get("docstring", "")
|
|
@@ -464,7 +467,7 @@ def format_example_for_anthropic(example: dict) -> str:
|
|
|
464
467
|
metadata_lines = example.get("metadata", [])
|
|
465
468
|
reasoning_lines = example.get("reasoning", [])
|
|
466
469
|
response_text = example.get("response_text", "")
|
|
467
|
-
|
|
470
|
+
|
|
468
471
|
# Start building the XML structure, add the description and user input
|
|
469
472
|
xml_content = f"""<example>
|
|
470
473
|
<example_docstring>
|
|
@@ -476,41 +479,41 @@ def format_example_for_anthropic(example: dict) -> str:
|
|
|
476
479
|
</{tag_prefix}stimulus>
|
|
477
480
|
<{tag_prefix}stimulus_metadata>
|
|
478
481
|
"""
|
|
479
|
-
|
|
482
|
+
|
|
480
483
|
# Add metadata lines
|
|
481
484
|
for metadata_line in metadata_lines:
|
|
482
485
|
xml_content += f"{metadata_line}\n"
|
|
483
|
-
|
|
486
|
+
|
|
484
487
|
xml_content += f"""</{tag_prefix}stimulus_metadata>
|
|
485
488
|
</example_stimulus>
|
|
486
489
|
<example_response>
|
|
487
490
|
<{tag_prefix}reasoning>
|
|
488
491
|
"""
|
|
489
|
-
|
|
492
|
+
|
|
490
493
|
# Add reasoning lines
|
|
491
494
|
for reasoning_line in reasoning_lines:
|
|
492
495
|
xml_content += f"{reasoning_line}\n"
|
|
493
|
-
|
|
496
|
+
|
|
494
497
|
xml_content += f"""</{tag_prefix}reasoning>
|
|
495
498
|
{response_text}"""
|
|
496
|
-
|
|
499
|
+
|
|
497
500
|
# Add action invocation section
|
|
498
501
|
if "action" in example:
|
|
499
502
|
action_data = example.get("action", {})
|
|
500
503
|
status_update = example.get("status_update", "")
|
|
501
504
|
agent_name = action_data.get("agent", "")
|
|
502
505
|
action_name = action_data.get("name", "")
|
|
503
|
-
|
|
506
|
+
|
|
504
507
|
xml_content += f"""
|
|
505
508
|
<{tag_prefix}status_update>{status_update}</{tag_prefix}status_update>
|
|
506
509
|
<{tag_prefix}invoke_action agent="{agent_name}" action="{action_name}">"""
|
|
507
|
-
|
|
510
|
+
|
|
508
511
|
# Handle parameters as dictionary
|
|
509
512
|
parameter_dict = action_data.get("parameters", {})
|
|
510
513
|
for param_name, param_value in parameter_dict.items():
|
|
511
514
|
xml_content += f"""
|
|
512
515
|
<{tag_prefix}parameter name="{param_name}">"""
|
|
513
|
-
|
|
516
|
+
|
|
514
517
|
# Handle parameter names and values (as lists)
|
|
515
518
|
if isinstance(param_value, list):
|
|
516
519
|
for line in param_value:
|
|
@@ -519,11 +522,11 @@ def format_example_for_anthropic(example: dict) -> str:
|
|
|
519
522
|
else:
|
|
520
523
|
# For simple string values
|
|
521
524
|
xml_content += f"{param_value}"
|
|
522
|
-
|
|
525
|
+
|
|
523
526
|
xml_content += f"</{tag_prefix}parameter>\n"
|
|
524
|
-
|
|
527
|
+
|
|
525
528
|
xml_content += f"</{tag_prefix}invoke_action>"
|
|
526
|
-
|
|
529
|
+
|
|
527
530
|
# Close the XML structure
|
|
528
531
|
xml_content += """
|
|
529
532
|
</example_response>
|
|
@@ -532,4 +535,5 @@ def format_example_for_anthropic(example: dict) -> str:
|
|
|
532
535
|
|
|
533
536
|
return xml_content
|
|
534
537
|
|
|
535
|
-
|
|
538
|
+
|
|
539
|
+
LONG_TERM_MEMORY_PROMPT = " - You are capable of remembering things and have long-term memory, this happens automatically."
|
|
@@ -16,6 +16,15 @@ solace_agent_mesh:
|
|
|
16
16
|
# Slack agent, send messages to Slack channels
|
|
17
17
|
- name: slack
|
|
18
18
|
enabled: true
|
|
19
|
+
# Configuring the built-in services
|
|
20
|
+
services:
|
|
21
|
+
# Embedding service for vector embeddings
|
|
22
|
+
# If enabled, the following environment variables are required:
|
|
23
|
+
# - EMBEDDING_SERVICE_MODEL_NAME
|
|
24
|
+
# - EMBEDDING_SERVICE_API_KEY
|
|
25
|
+
# - EMBEDDING_SERVICE_ENDPOINT
|
|
26
|
+
- name: embedding
|
|
27
|
+
enabled: false
|
|
19
28
|
|
|
20
29
|
# Directory to component yaml config files, this directory would have a sub directory for each component type
|
|
21
30
|
config_directory: configs
|
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: solace-agent-mesh
|
|
3
|
+
Version: 0.2.1
|
|
4
|
+
Summary: Solace Agent Mesh is an EDA AI-first platform powered by Solace
|
|
5
|
+
Project-URL: homepage, https://github.com/SolaceLabs/solace-agent-mesh
|
|
6
|
+
Project-URL: repository, https://github.com/SolaceLabs/solace-agent-mesh
|
|
7
|
+
Project-URL: documentation, https://github.com/SolaceLabs/solace-agent-mesh/blob/main/docs/docs/index.md
|
|
8
|
+
Author-email: Edward Funnekotter <edward.funnekotter@solace.com>, Greg Meldrum <greg.meldrum@solace.com>, Cyrus Mobini <cyrus.mobini@solace.com>
|
|
9
|
+
License-File: LICENSE
|
|
10
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
|
11
|
+
Classifier: Operating System :: OS Independent
|
|
12
|
+
Classifier: Programming Language :: Python :: 3
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
17
|
+
Requires-Python: >=3.10.16
|
|
18
|
+
Requires-Dist: beautifulsoup4~=4.12.3
|
|
19
|
+
Requires-Dist: boto3~=1.34.122
|
|
20
|
+
Requires-Dist: build~=1.2.2.post1
|
|
21
|
+
Requires-Dist: click~=8.1.7
|
|
22
|
+
Requires-Dist: duckduckgo-search~=6.3.7
|
|
23
|
+
Requires-Dist: html2text~=2024.2.26
|
|
24
|
+
Requires-Dist: jq~=1.8.0
|
|
25
|
+
Requires-Dist: kaleido~=0.1.0.post1; sys_platform == 'win32'
|
|
26
|
+
Requires-Dist: kaleido~=0.2.1; sys_platform != 'win32'
|
|
27
|
+
Requires-Dist: markitdown~=0.0.1a3
|
|
28
|
+
Requires-Dist: plotly~=5.24.1
|
|
29
|
+
Requires-Dist: pyperclip~=1.9.0
|
|
30
|
+
Requires-Dist: pytest-cov~=5.0.0
|
|
31
|
+
Requires-Dist: pytest~=8.3.1
|
|
32
|
+
Requires-Dist: python-dateutil==2.9.0.post0
|
|
33
|
+
Requires-Dist: ruamel-yaml~=0.18.6
|
|
34
|
+
Requires-Dist: solace-ai-connector-rest~=0.0.3
|
|
35
|
+
Requires-Dist: solace-ai-connector-slack~=0.0.2
|
|
36
|
+
Requires-Dist: solace-ai-connector-web~=0.2.2
|
|
37
|
+
Requires-Dist: solace-ai-connector[llm]~=1.0.3
|
|
38
|
+
Requires-Dist: solace-ai-connector[websocket]~=1.0.3
|
|
39
|
+
Requires-Dist: solace-ai-connector~=1.0.3
|
|
40
|
+
Description-Content-Type: text/markdown
|
|
41
|
+
|
|
42
|
+
<p align="center">
|
|
43
|
+
<img src="./docs/static/img/logo.png" alt="Solace Agent Mesh Logo" width="100"/>
|
|
44
|
+
</p>
|
|
45
|
+
<h2 align="center">
|
|
46
|
+
Solace Agent Mesh
|
|
47
|
+
</h2>
|
|
48
|
+
<h4 align="center">Open-source framework for building event driven multi-agent AI systems</h3>
|
|
49
|
+
|
|
50
|
+
<p align="center">
|
|
51
|
+
<a href="https://github.com/SolaceLabs/solace-agent-mesh/blob/main/LICENSE">
|
|
52
|
+
<img src="https://img.shields.io/github/license/SolaceLabs/solace-agent-mesh" alt="License">
|
|
53
|
+
</a>
|
|
54
|
+
<a href="https://pypi.org/project/solace-agent-mesh">
|
|
55
|
+
<img src="https://img.shields.io/pypi/v/solace-agent-mesh.svg" alt="PyPI - Version">
|
|
56
|
+
</a>
|
|
57
|
+
<a href="https://pypi.org/project/solace-agent-mesh">
|
|
58
|
+
<img src="https://img.shields.io/pypi/pyversions/solace-agent-mesh.svg" alt="PyPI - Python Version">
|
|
59
|
+
</a>
|
|
60
|
+
</p>
|
|
61
|
+
<p align="center">
|
|
62
|
+
<a href="#-key-features">Key Features</a> •
|
|
63
|
+
<a href="#-quick-start-5-minutes">Quickstart</a> •
|
|
64
|
+
<a href="#️-next-steps">Next Steps</a>
|
|
65
|
+
</p>
|
|
66
|
+
|
|
67
|
+
---
|
|
68
|
+
|
|
69
|
+
Whether you're prototyping an 🤖 AI assistant or deploying a 🌎 production-grade solution, **Solace Agent Mesh (SAM)** provides the infrastructure to:
|
|
70
|
+
- Connect AI agents to real-world data sources and systems.
|
|
71
|
+
- Add gateways to expose capabilities via REST, a browser-based UI, Slack, and many more.
|
|
72
|
+
- Scale from local development to distributed, enterprise deployments.
|
|
73
|
+
|
|
74
|
+

|
|
75
|
+
|
|
76
|
+
---
|
|
77
|
+
|
|
78
|
+
## ✨ Key Features
|
|
79
|
+
- ⚙️ **[Modular, Event-Driven Architecture](https://solacelabs.github.io/solace-agent-mesh/docs/documentation/getting-started/component-overview)** – All components communicate via events through a central event mesh, enabling loose coupling and high scalability.
|
|
80
|
+
- 🤖 **[Composable Agents](https://solacelabs.github.io/solace-agent-mesh/docs/documentation/concepts/agents)** – Combine specialized AI agents to solve complex, multi-step workflows.
|
|
81
|
+
- 🌐 **[Flexible Interfaces](https://solacelabs.github.io/solace-agent-mesh/docs/documentation/concepts/gateways)** – Interact with SAM via the REST API, browser UI, or [Slack Integration](https://solacelabs.github.io/solace-agent-mesh/docs/documentation/tutorials/slack-integration).
|
|
82
|
+
- 🧠 **[Built-in Orchestration](https://solacelabs.github.io/solace-agent-mesh/docs/documentation/concepts/orchestrator)** – Tasks are automatically broken down and delegated across agents by a built-in orchestrator.
|
|
83
|
+
- 🧩 **[Plugin-Extensible](https://solacelabs.github.io/solace-agent-mesh/docs/documentation/concepts/plugins)** – Add your own agents, gateways, or services with minimal boilerplate.
|
|
84
|
+
- 🏢 **[Production-Ready](https://solacelabs.github.io/solace-agent-mesh/docs/documentation/deployment/deploy)** – Backed by [Solace’s enterprise-grade event broker](https://solace.com/products/event-broker/) for reliability and performance.
|
|
85
|
+
- 🔧 **[Services](https://solacelabs.github.io/solace-agent-mesh/docs/documentation/concepts/services)** – File storage, memory, and embeddings, all extensible, and built-in.
|
|
86
|
+
|
|
87
|
+
---
|
|
88
|
+
|
|
89
|
+
## 🚀 Quick Start (5 minutes)
|
|
90
|
+
|
|
91
|
+
Set up Solace Agent Mesh in just a few steps.
|
|
92
|
+
|
|
93
|
+
### ⚙️ System Requirements
|
|
94
|
+
|
|
95
|
+
To run Solace Agent Mesh locally, you’ll need:
|
|
96
|
+
|
|
97
|
+
- **Python 3.10+**
|
|
98
|
+
- **pip** (comes with Python)
|
|
99
|
+
- **OS**: MacOS, Linux, or Windows (with [WSL](https://learn.microsoft.com/en-us/windows/wsl/))
|
|
100
|
+
- **LLM API key** (any major provider or custom endpoint)
|
|
101
|
+
|
|
102
|
+
### 💻 Setup Steps
|
|
103
|
+
|
|
104
|
+
```bash
|
|
105
|
+
# 1. (Optional) Create and activate a Python virtual environment
|
|
106
|
+
python3 -m venv venv
|
|
107
|
+
source venv/bin/activate
|
|
108
|
+
|
|
109
|
+
# 2. Install the Solace Agent Mesh
|
|
110
|
+
pip install solace-agent-mesh
|
|
111
|
+
|
|
112
|
+
# 3. Initialize a new project
|
|
113
|
+
mkdir my-agent-mesh && cd my-agent-mesh
|
|
114
|
+
solace-agent-mesh init # Follow the steps in the interactive init
|
|
115
|
+
|
|
116
|
+
# 4. Build and run the project
|
|
117
|
+
solace-agent-mesh run -b # Shortcut for build + run
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
#### Once running:
|
|
121
|
+
|
|
122
|
+
- Open the Web UI at [http://localhost:5001](http://localhost:5001) to talk with a chat interface.
|
|
123
|
+
<details>
|
|
124
|
+
<summary>Use the REST API directly via curl</summary>
|
|
125
|
+
|
|
126
|
+
```bash
|
|
127
|
+
curl --location 'http://127.0.0.1:5050/api/v1/request' \
|
|
128
|
+
--form 'prompt="What is the capital of France?"' \
|
|
129
|
+
--form 'stream="false"'
|
|
130
|
+
```
|
|
131
|
+
|
|
132
|
+
</details>
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
---
|
|
136
|
+
|
|
137
|
+
## ➡️ Next Steps
|
|
138
|
+
|
|
139
|
+
Want to go further? Here are some hands-on tutorials to help you get started:
|
|
140
|
+
|
|
141
|
+
| 🔧 Integration | ⏱️ Est. Time | 📘 Tutorial |
|
|
142
|
+
|----------------|--------------|-------------|
|
|
143
|
+
| 🌤️ **Weather Agent**<br>Build an agent that gives Solace Agent Mesh the ability to access real-time weather information. | **~5 min** | [Weather Agent Plugin](https://github.com/SolaceLabs/solace-agent-mesh-core-plugins/tree/main/sam-geo-information) |
|
|
144
|
+
| 🗃️ **SQL Database Integration**<br>Enable Solace Agent Mesh to answer company-specific questions using a sample coffee company database.| **~10–15 min** | [SQL Database Tutorial](https://solacelabs.github.io/solace-agent-mesh/docs/documentation/tutorials/sql-database) |
|
|
145
|
+
| 🧠 **MCP Integration**<br>Integrating a Model Context Protocol (MCP) Server into Solace Agent Mesh. | **~10–15 min** | [MCP Integration Tutorial](https://solacelabs.github.io/solace-agent-mesh/docs/documentation/tutorials/mcp-integration) |
|
|
146
|
+
| 💬 **Slack Integration**<br>Chat with Solace Agent Mesh directly from Slack. | **~20–30 min** | [Slack Integration Tutorial](https://solacelabs.github.io/solace-agent-mesh/docs/documentation/tutorials/slack-integration) |
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
📚 Want to explore more? Check out the full [Solace Agent Mesh documentation](https://solacelabs.github.io/solace-agent-mesh/docs/documentation/getting-started/introduction/).
|
|
150
|
+
|
|
151
|
+
---
|
|
152
|
+
|
|
153
|
+
## 📦 Release Notes
|
|
154
|
+
|
|
155
|
+
Stay up to date with the latest changes, features, and fixes.
|
|
156
|
+
See [CHANGELOG.md](CHANGELOG.md) for a full history of updates.
|
|
157
|
+
|
|
158
|
+
---
|
|
159
|
+
|
|
160
|
+
## 👥 Contributors
|
|
161
|
+
|
|
162
|
+
Solace Agent Mesh is built with the help of our amazing community.
|
|
163
|
+
Thanks to everyone who has contributed ideas, code, and time to make this project better.
|
|
164
|
+
👀 View the full list of contributors → [GitHub Contributors](https://github.com/SolaceLabs/solace-agent-mesh/graphs/contributors)
|
|
165
|
+
🤝 **Looking to contribute?** Check out [CONTRIBUTING.md](CONTRIBUTING.md) to get started and see how you can help.
|
|
166
|
+
|
|
167
|
+
---
|
|
168
|
+
|
|
169
|
+
## 📄 License
|
|
170
|
+
|
|
171
|
+
This project is licensed under the **Apache 2.0 License**.
|
|
172
|
+
See the full license text in the [LICENSE](LICENSE) file.
|