mail-swarms 1.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (137) hide show
  1. mail/__init__.py +35 -0
  2. mail/api.py +1964 -0
  3. mail/cli.py +432 -0
  4. mail/client.py +1657 -0
  5. mail/config/__init__.py +8 -0
  6. mail/config/client.py +87 -0
  7. mail/config/server.py +165 -0
  8. mail/core/__init__.py +72 -0
  9. mail/core/actions.py +69 -0
  10. mail/core/agents.py +73 -0
  11. mail/core/message.py +366 -0
  12. mail/core/runtime.py +3537 -0
  13. mail/core/tasks.py +311 -0
  14. mail/core/tools.py +1206 -0
  15. mail/db/__init__.py +0 -0
  16. mail/db/init.py +182 -0
  17. mail/db/types.py +65 -0
  18. mail/db/utils.py +523 -0
  19. mail/examples/__init__.py +27 -0
  20. mail/examples/analyst_dummy/__init__.py +15 -0
  21. mail/examples/analyst_dummy/agent.py +136 -0
  22. mail/examples/analyst_dummy/prompts.py +44 -0
  23. mail/examples/consultant_dummy/__init__.py +15 -0
  24. mail/examples/consultant_dummy/agent.py +136 -0
  25. mail/examples/consultant_dummy/prompts.py +42 -0
  26. mail/examples/data_analysis/__init__.py +40 -0
  27. mail/examples/data_analysis/analyst/__init__.py +9 -0
  28. mail/examples/data_analysis/analyst/agent.py +67 -0
  29. mail/examples/data_analysis/analyst/prompts.py +53 -0
  30. mail/examples/data_analysis/processor/__init__.py +13 -0
  31. mail/examples/data_analysis/processor/actions.py +293 -0
  32. mail/examples/data_analysis/processor/agent.py +67 -0
  33. mail/examples/data_analysis/processor/prompts.py +48 -0
  34. mail/examples/data_analysis/reporter/__init__.py +10 -0
  35. mail/examples/data_analysis/reporter/actions.py +187 -0
  36. mail/examples/data_analysis/reporter/agent.py +67 -0
  37. mail/examples/data_analysis/reporter/prompts.py +49 -0
  38. mail/examples/data_analysis/statistics/__init__.py +18 -0
  39. mail/examples/data_analysis/statistics/actions.py +343 -0
  40. mail/examples/data_analysis/statistics/agent.py +67 -0
  41. mail/examples/data_analysis/statistics/prompts.py +60 -0
  42. mail/examples/mafia/__init__.py +0 -0
  43. mail/examples/mafia/game.py +1537 -0
  44. mail/examples/mafia/narrator_tools.py +396 -0
  45. mail/examples/mafia/personas.py +240 -0
  46. mail/examples/mafia/prompts.py +489 -0
  47. mail/examples/mafia/roles.py +147 -0
  48. mail/examples/mafia/spec.md +350 -0
  49. mail/examples/math_dummy/__init__.py +23 -0
  50. mail/examples/math_dummy/actions.py +252 -0
  51. mail/examples/math_dummy/agent.py +136 -0
  52. mail/examples/math_dummy/prompts.py +46 -0
  53. mail/examples/math_dummy/types.py +5 -0
  54. mail/examples/research/__init__.py +39 -0
  55. mail/examples/research/researcher/__init__.py +9 -0
  56. mail/examples/research/researcher/agent.py +67 -0
  57. mail/examples/research/researcher/prompts.py +54 -0
  58. mail/examples/research/searcher/__init__.py +10 -0
  59. mail/examples/research/searcher/actions.py +324 -0
  60. mail/examples/research/searcher/agent.py +67 -0
  61. mail/examples/research/searcher/prompts.py +53 -0
  62. mail/examples/research/summarizer/__init__.py +18 -0
  63. mail/examples/research/summarizer/actions.py +255 -0
  64. mail/examples/research/summarizer/agent.py +67 -0
  65. mail/examples/research/summarizer/prompts.py +55 -0
  66. mail/examples/research/verifier/__init__.py +10 -0
  67. mail/examples/research/verifier/actions.py +337 -0
  68. mail/examples/research/verifier/agent.py +67 -0
  69. mail/examples/research/verifier/prompts.py +52 -0
  70. mail/examples/supervisor/__init__.py +11 -0
  71. mail/examples/supervisor/agent.py +4 -0
  72. mail/examples/supervisor/prompts.py +93 -0
  73. mail/examples/support/__init__.py +33 -0
  74. mail/examples/support/classifier/__init__.py +10 -0
  75. mail/examples/support/classifier/actions.py +307 -0
  76. mail/examples/support/classifier/agent.py +68 -0
  77. mail/examples/support/classifier/prompts.py +56 -0
  78. mail/examples/support/coordinator/__init__.py +9 -0
  79. mail/examples/support/coordinator/agent.py +67 -0
  80. mail/examples/support/coordinator/prompts.py +48 -0
  81. mail/examples/support/faq/__init__.py +10 -0
  82. mail/examples/support/faq/actions.py +182 -0
  83. mail/examples/support/faq/agent.py +67 -0
  84. mail/examples/support/faq/prompts.py +42 -0
  85. mail/examples/support/sentiment/__init__.py +15 -0
  86. mail/examples/support/sentiment/actions.py +341 -0
  87. mail/examples/support/sentiment/agent.py +67 -0
  88. mail/examples/support/sentiment/prompts.py +54 -0
  89. mail/examples/weather_dummy/__init__.py +23 -0
  90. mail/examples/weather_dummy/actions.py +75 -0
  91. mail/examples/weather_dummy/agent.py +136 -0
  92. mail/examples/weather_dummy/prompts.py +35 -0
  93. mail/examples/weather_dummy/types.py +5 -0
  94. mail/factories/__init__.py +27 -0
  95. mail/factories/action.py +223 -0
  96. mail/factories/base.py +1531 -0
  97. mail/factories/supervisor.py +241 -0
  98. mail/net/__init__.py +7 -0
  99. mail/net/registry.py +712 -0
  100. mail/net/router.py +728 -0
  101. mail/net/server_utils.py +114 -0
  102. mail/net/types.py +247 -0
  103. mail/server.py +1605 -0
  104. mail/stdlib/__init__.py +0 -0
  105. mail/stdlib/anthropic/__init__.py +0 -0
  106. mail/stdlib/fs/__init__.py +15 -0
  107. mail/stdlib/fs/actions.py +209 -0
  108. mail/stdlib/http/__init__.py +19 -0
  109. mail/stdlib/http/actions.py +333 -0
  110. mail/stdlib/interswarm/__init__.py +11 -0
  111. mail/stdlib/interswarm/actions.py +208 -0
  112. mail/stdlib/mcp/__init__.py +19 -0
  113. mail/stdlib/mcp/actions.py +294 -0
  114. mail/stdlib/openai/__init__.py +13 -0
  115. mail/stdlib/openai/agents.py +451 -0
  116. mail/summarizer.py +234 -0
  117. mail/swarms_json/__init__.py +27 -0
  118. mail/swarms_json/types.py +87 -0
  119. mail/swarms_json/utils.py +255 -0
  120. mail/url_scheme.py +51 -0
  121. mail/utils/__init__.py +53 -0
  122. mail/utils/auth.py +194 -0
  123. mail/utils/context.py +17 -0
  124. mail/utils/logger.py +73 -0
  125. mail/utils/openai.py +212 -0
  126. mail/utils/parsing.py +89 -0
  127. mail/utils/serialize.py +292 -0
  128. mail/utils/store.py +49 -0
  129. mail/utils/string_builder.py +119 -0
  130. mail/utils/version.py +20 -0
  131. mail_swarms-1.3.2.dist-info/METADATA +237 -0
  132. mail_swarms-1.3.2.dist-info/RECORD +137 -0
  133. mail_swarms-1.3.2.dist-info/WHEEL +4 -0
  134. mail_swarms-1.3.2.dist-info/entry_points.txt +2 -0
  135. mail_swarms-1.3.2.dist-info/licenses/LICENSE +202 -0
  136. mail_swarms-1.3.2.dist-info/licenses/NOTICE +10 -0
  137. mail_swarms-1.3.2.dist-info/licenses/THIRD_PARTY_NOTICES.md +12334 -0
@@ -0,0 +1,337 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # Copyright (c) 2025 Charon Labs
3
+
4
+ """Verification actions for the Research Assistant swarm."""
5
+
6
+ import hashlib
7
+ import json
8
+ from random import Random
9
+ from typing import Any
10
+
11
+ from mail import action
12
+
13
+ # Source reliability ratings
14
+ SOURCE_RELIABILITY = {
15
+ "wikipedia": 0.8,
16
+ "academic": 0.9,
17
+ "news": 0.6,
18
+ "general": 0.5,
19
+ "unknown": 0.3,
20
+ }
21
+
22
+ # Keywords that indicate verifiable facts
23
+ VERIFIABLE_INDICATORS = [
24
+ "percent",
25
+ "%",
26
+ "million",
27
+ "billion",
28
+ "year",
29
+ "date",
30
+ "according to",
31
+ "research",
32
+ "study",
33
+ "report",
34
+ "data",
35
+ "survey",
36
+ "analysis",
37
+ "measured",
38
+ "recorded",
39
+ "documented",
40
+ ]
41
+
42
+ # Keywords that indicate subjective claims
43
+ SUBJECTIVE_INDICATORS = [
44
+ "best",
45
+ "worst",
46
+ "most",
47
+ "should",
48
+ "could",
49
+ "might",
50
+ "believe",
51
+ "think",
52
+ "opinion",
53
+ "feel",
54
+ "seems",
55
+ "appears",
56
+ "probably",
57
+ "possibly",
58
+ "arguably",
59
+ "supposedly",
60
+ ]
61
+
62
+
63
+ def _analyze_claim_verifiability(claim: str) -> dict[str, Any]:
64
+ """Analyze how verifiable a claim is based on its content."""
65
+ claim_lower = claim.lower()
66
+
67
+ verifiable_count = sum(1 for ind in VERIFIABLE_INDICATORS if ind in claim_lower)
68
+ subjective_count = sum(1 for ind in SUBJECTIVE_INDICATORS if ind in claim_lower)
69
+
70
+ if subjective_count > verifiable_count:
71
+ claim_type = "opinion"
72
+ verifiability = 0.3
73
+ elif verifiable_count > 0:
74
+ claim_type = "factual"
75
+ verifiability = min(0.9, 0.5 + verifiable_count * 0.1)
76
+ else:
77
+ claim_type = "statement"
78
+ verifiability = 0.5
79
+
80
+ return {
81
+ "claim_type": claim_type,
82
+ "verifiability_score": verifiability,
83
+ "verifiable_indicators": verifiable_count,
84
+ "subjective_indicators": subjective_count,
85
+ }
86
+
87
+
88
+ def _check_source_support(
89
+ claim: str, sources: list[str], rng: Random
90
+ ) -> dict[str, Any]:
91
+ """Check how well sources support a claim."""
92
+ if not sources:
93
+ return {
94
+ "support_level": "unknown",
95
+ "supporting_sources": 0,
96
+ "contradicting_sources": 0,
97
+ "neutral_sources": 0,
98
+ }
99
+
100
+ supporting = 0
101
+ contradicting = 0
102
+ neutral = 0
103
+
104
+ for source in sources:
105
+ # Simulate source checking based on deterministic randomness
106
+ source_seed = hashlib.md5((claim + source).encode()).hexdigest()
107
+ source_rng = Random(source_seed)
108
+
109
+ support_roll = source_rng.random()
110
+ if support_roll > 0.7:
111
+ supporting += 1
112
+ elif support_roll < 0.2:
113
+ contradicting += 1
114
+ else:
115
+ neutral += 1
116
+
117
+ total = len(sources)
118
+ support_ratio = supporting / total if total > 0 else 0
119
+
120
+ if support_ratio >= 0.6:
121
+ support_level = "supported"
122
+ elif contradicting > supporting:
123
+ support_level = "disputed"
124
+ else:
125
+ support_level = "inconclusive"
126
+
127
+ return {
128
+ "support_level": support_level,
129
+ "supporting_sources": supporting,
130
+ "contradicting_sources": contradicting,
131
+ "neutral_sources": neutral,
132
+ "support_ratio": round(support_ratio, 2),
133
+ }
134
+
135
+
136
+ VERIFY_CLAIM_PARAMETERS = {
137
+ "type": "object",
138
+ "properties": {
139
+ "claim": {
140
+ "type": "string",
141
+ "description": "The claim or statement to verify",
142
+ },
143
+ "sources": {
144
+ "type": "array",
145
+ "items": {"type": "string"},
146
+ "description": "List of source URLs or references to check against",
147
+ },
148
+ },
149
+ "required": ["claim"],
150
+ }
151
+
152
+
153
+ @action(
154
+ name="verify_claim",
155
+ description="Verify a claim by checking it against provided sources.",
156
+ parameters=VERIFY_CLAIM_PARAMETERS,
157
+ )
158
+ async def verify_claim(args: dict[str, Any]) -> str:
159
+ """Verify a claim against sources."""
160
+ try:
161
+ claim = args["claim"]
162
+ sources = args.get("sources", [])
163
+ except KeyError as e:
164
+ return f"Error: {e} is required"
165
+
166
+ if not claim.strip():
167
+ return json.dumps({"error": "Claim cannot be empty"})
168
+
169
+ # Analyze the claim
170
+ claim_analysis = _analyze_claim_verifiability(claim)
171
+
172
+ # Generate deterministic results
173
+ seed = hashlib.md5(claim.encode()).hexdigest()
174
+ rng = Random(seed)
175
+
176
+ # Check source support
177
+ source_check = _check_source_support(claim, sources, rng)
178
+
179
+ # Determine verification status
180
+ if claim_analysis["claim_type"] == "opinion":
181
+ status = "not_verifiable"
182
+ explanation = (
183
+ "This appears to be a subjective opinion rather than a verifiable fact."
184
+ )
185
+ elif not sources:
186
+ status = "unverified"
187
+ explanation = "No sources provided for verification. Additional sources needed."
188
+ elif source_check["support_level"] == "supported":
189
+ status = "verified"
190
+ explanation = f"Claim is supported by {source_check['supporting_sources']} of {len(sources)} sources."
191
+ elif source_check["support_level"] == "disputed":
192
+ status = "disputed"
193
+ explanation = (
194
+ f"Claim is contradicted by {source_check['contradicting_sources']} sources."
195
+ )
196
+ else:
197
+ status = "inconclusive"
198
+ explanation = "Sources provide mixed or insufficient evidence."
199
+
200
+ result = {
201
+ "claim": claim,
202
+ "status": status,
203
+ "explanation": explanation,
204
+ "claim_analysis": claim_analysis,
205
+ "source_analysis": source_check,
206
+ "sources_checked": len(sources),
207
+ "recommendation": _get_recommendation(status, source_check),
208
+ }
209
+
210
+ return json.dumps(result)
211
+
212
+
213
+ def _get_recommendation(status: str, source_check: dict) -> str:
214
+ """Generate a recommendation based on verification results."""
215
+ if status == "verified":
216
+ return "Claim can be cited with attribution to supporting sources."
217
+ elif status == "disputed":
218
+ return "Present both sides of the evidence when citing this claim."
219
+ elif status == "not_verifiable":
220
+ return "Present as opinion or perspective, not as fact."
221
+ elif status == "unverified":
222
+ return "Seek additional sources before citing this claim."
223
+ else:
224
+ return "Exercise caution; more evidence needed for confident citation."
225
+
226
+
227
+ RATE_CONFIDENCE_PARAMETERS = {
228
+ "type": "object",
229
+ "properties": {
230
+ "claim": {
231
+ "type": "string",
232
+ "description": "The claim being evaluated",
233
+ },
234
+ "evidence": {
235
+ "type": "array",
236
+ "items": {
237
+ "type": "object",
238
+ "properties": {
239
+ "source": {"type": "string"},
240
+ "source_type": {"type": "string"},
241
+ "supports": {"type": "boolean"},
242
+ },
243
+ },
244
+ "description": "Array of evidence items with source info and support status",
245
+ },
246
+ },
247
+ "required": ["claim", "evidence"],
248
+ }
249
+
250
+
251
+ @action(
252
+ name="rate_confidence",
253
+ description="Rate confidence level in a claim based on evidence quality.",
254
+ parameters=RATE_CONFIDENCE_PARAMETERS,
255
+ )
256
+ async def rate_confidence(args: dict[str, Any]) -> str:
257
+ """Rate confidence in a claim based on evidence."""
258
+ try:
259
+ claim = args["claim"]
260
+ evidence = args["evidence"]
261
+ except KeyError as e:
262
+ return f"Error: {e} is required"
263
+
264
+ if not claim.strip():
265
+ return json.dumps({"error": "Claim cannot be empty"})
266
+
267
+ if not evidence:
268
+ return json.dumps(
269
+ {
270
+ "claim": claim,
271
+ "confidence_level": "very_low",
272
+ "confidence_score": 0.1,
273
+ "reason": "No evidence provided",
274
+ "evidence_count": 0,
275
+ }
276
+ )
277
+
278
+ # Calculate weighted confidence based on evidence
279
+ total_weight: float = 0.0
280
+ support_weight: float = 0.0
281
+
282
+ for item in evidence:
283
+ source_type = item.get("source_type", "unknown")
284
+ reliability = SOURCE_RELIABILITY.get(source_type, SOURCE_RELIABILITY["unknown"])
285
+ supports = item.get("supports", False)
286
+
287
+ total_weight += float(reliability)
288
+ if supports:
289
+ support_weight += float(reliability)
290
+
291
+ if total_weight == 0:
292
+ confidence_score = 0.1
293
+ else:
294
+ confidence_score = support_weight / total_weight
295
+
296
+ # Adjust for evidence quantity
297
+ evidence_bonus = min(0.2, len(evidence) * 0.05)
298
+ confidence_score = min(1.0, confidence_score + evidence_bonus)
299
+
300
+ # Determine level
301
+ if confidence_score >= 0.8:
302
+ level = "high"
303
+ elif confidence_score >= 0.5:
304
+ level = "medium"
305
+ elif confidence_score >= 0.2:
306
+ level = "low"
307
+ else:
308
+ level = "very_low"
309
+
310
+ # Count evidence
311
+ supporting = sum(1 for e in evidence if e.get("supports", False))
312
+ contradicting = len(evidence) - supporting
313
+
314
+ result = {
315
+ "claim": claim,
316
+ "confidence_level": level,
317
+ "confidence_score": round(confidence_score, 2),
318
+ "evidence_count": len(evidence),
319
+ "supporting_evidence": supporting,
320
+ "contradicting_evidence": contradicting,
321
+ "reason": _get_confidence_reason(level, supporting, contradicting),
322
+ "reliability_note": "Confidence weighted by source reliability (academic > news > general)",
323
+ }
324
+
325
+ return json.dumps(result)
326
+
327
+
328
+ def _get_confidence_reason(level: str, supporting: int, contradicting: int) -> str:
329
+ """Generate explanation for confidence level."""
330
+ if level == "high":
331
+ return f"Strong evidence from {supporting} reliable source(s) with minimal contradiction."
332
+ elif level == "medium":
333
+ return f"Moderate evidence with {supporting} supporting and {contradicting} contradicting source(s)."
334
+ elif level == "low":
335
+ return f"Limited supporting evidence; {contradicting} source(s) present contradictory information."
336
+ else:
337
+ return "Insufficient or unreliable evidence to support this claim."
@@ -0,0 +1,67 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # Copyright (c) 2025 Charon Labs
3
+
4
+ """Verifier agent for the Research Assistant swarm."""
5
+
6
+ from collections.abc import Awaitable
7
+ from typing import Any, Literal
8
+
9
+ from mail.core.agents import AgentOutput
10
+ from mail.factories.action import LiteLLMActionAgentFunction
11
+
12
+
13
+ class LiteLLMVerifierFunction(LiteLLMActionAgentFunction):
14
+ """
15
+ Verifier agent that fact-checks claims against sources.
16
+
17
+ This agent cross-references claims and rates confidence
18
+ levels based on evidence quality.
19
+ """
20
+
21
+ def __init__(
22
+ self,
23
+ name: str,
24
+ comm_targets: list[str],
25
+ tools: list[dict[str, Any]],
26
+ llm: str,
27
+ system: str,
28
+ user_token: str = "",
29
+ enable_entrypoint: bool = False,
30
+ enable_interswarm: bool = False,
31
+ can_complete_tasks: bool = False,
32
+ tool_format: Literal["completions", "responses"] = "responses",
33
+ exclude_tools: list[str] = [],
34
+ reasoning_effort: Literal["minimal", "low", "medium", "high"] | None = None,
35
+ thinking_budget: int | None = None,
36
+ max_tokens: int | None = None,
37
+ memory: bool = True,
38
+ use_proxy: bool = True,
39
+ _debug_include_mail_tools: bool = True,
40
+ ) -> None:
41
+ super().__init__(
42
+ name=name,
43
+ comm_targets=comm_targets,
44
+ tools=tools,
45
+ llm=llm,
46
+ system=system,
47
+ user_token=user_token,
48
+ enable_entrypoint=enable_entrypoint,
49
+ enable_interswarm=enable_interswarm,
50
+ can_complete_tasks=can_complete_tasks,
51
+ tool_format=tool_format,
52
+ exclude_tools=exclude_tools,
53
+ reasoning_effort=reasoning_effort,
54
+ thinking_budget=thinking_budget,
55
+ max_tokens=max_tokens,
56
+ memory=memory,
57
+ use_proxy=use_proxy,
58
+ _debug_include_mail_tools=_debug_include_mail_tools,
59
+ )
60
+
61
+ def __call__(
62
+ self,
63
+ messages: list[dict[str, Any]],
64
+ tool_choice: str | dict[str, str] = "required",
65
+ ) -> Awaitable[AgentOutput]:
66
+ """Execute the verifier agent function."""
67
+ return super().__call__(messages, tool_choice)
@@ -0,0 +1,52 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # Copyright (c) 2025 Charon Labs
3
+
4
+ SYSPROMPT = """You are verifier@{swarm}, the fact-checking specialist for this research assistant swarm.
5
+
6
+ # Your Role
7
+ Cross-reference claims against sources and rate confidence levels to ensure research accuracy.
8
+
9
+ # Critical Rule: Responding
10
+ You CANNOT talk to users directly or call `task_complete`. You MUST use `send_response` to reply to the agent who contacted you.
11
+ - When you receive a request, note the sender (usually "researcher")
12
+ - After verification, call `send_response(target=<sender>, subject="Re: ...", body=<your findings>)`
13
+ - Include the COMPLETE verification results in your response body
14
+
15
+ # Tools
16
+
17
+ ## Verification Operations
18
+ - `verify_claim(claim, sources)`: Check a claim against provided source references
19
+ - `rate_confidence(claim, evidence)`: Rate confidence level based on evidence quality
20
+
21
+ ## Communication
22
+ - `send_response(target, subject, body)`: Reply to the agent who requested information
23
+ - `send_request(target, subject, body)`: Ask another agent (e.g., searcher) for additional sources
24
+ - `acknowledge_broadcast(note)`: Acknowledge a broadcast message
25
+ - `ignore_broadcast(reason)`: Ignore an irrelevant broadcast
26
+
27
+ # Verification Workflow
28
+
29
+ 1. Receive claim to verify from another agent
30
+ 2. Review provided sources
31
+ 3. Call `verify_claim` to check the claim
32
+ 4. Call `rate_confidence` to assess evidence quality
33
+ 5. Call `send_response` with:
34
+ - Verification status (verified/disputed/unverified)
35
+ - Confidence rating
36
+ - Supporting or contradicting evidence
37
+ - Recommendations for additional verification if needed
38
+
39
+ # Confidence Levels
40
+
41
+ - **high** (0.8-1.0): Multiple reliable sources confirm, no contradictions
42
+ - **medium** (0.5-0.8): Some supporting evidence, minor inconsistencies possible
43
+ - **low** (0.2-0.5): Limited evidence, significant uncertainty
44
+ - **very_low** (0.0-0.2): No supporting evidence or contradictory information
45
+
46
+ # Guidelines
47
+
48
+ - Be skeptical but fair - require evidence for both confirmation and rejection
49
+ - Note when sources disagree and present both sides
50
+ - Consider source reliability (academic > news > general)
51
+ - Flag claims that cannot be verified with available sources
52
+ - Use "Re: <original subject>" as your response subject"""
@@ -0,0 +1,11 @@
1
+ from .agent import (
2
+ supervisor_agent_params,
3
+ )
4
+ from .prompts import (
5
+ SYSPROMPT as SUPERVISOR_SYSPROMPT,
6
+ )
7
+
8
+ __all__ = [
9
+ "SUPERVISOR_SYSPROMPT",
10
+ "supervisor_agent_params",
11
+ ]
@@ -0,0 +1,4 @@
1
+ supervisor_agent_params = {
2
+ "llm": "openai/gpt-5-mini",
3
+ "system": "mail.examples.supervisor.prompts:SYSPROMPT",
4
+ }
@@ -0,0 +1,93 @@
1
+ SYSPROMPT = """You are supervisor@{swarm}, the orchestrator for this MAIL swarm.
2
+
3
+ # Your Role
4
+ Coordinate agents to fulfill user requests. Delegate work, integrate responses, and deliver final answers.
5
+
6
+ # Critical Rule: Task Completion
7
+ You MUST call `task_complete` to end every task. This is the ONLY way to return answers to users.
8
+ - The moment you have sufficient information to answer, call `task_complete` immediately
9
+ - Do not continue delegating once you have the answer
10
+ - Never send messages to "user" - the runtime will reject it
11
+ - Include the complete answer in `task_complete(finish_message=...)`
12
+
13
+ # Tools
14
+
15
+ ## Delegation
16
+ - `send_request(target, subject, body)`: Assign work to an agent
17
+ - Local: target="agent_name"
18
+ - Interswarm: target="agent_name@swarm_name"
19
+ - `send_response(target, subject, body)`: Reply to another agent (use for interswarm replies)
20
+ - `send_broadcast(subject, body, targets)`: Announce to multiple agents (rare)
21
+ - `send_interrupt(target, subject, body)`: Halt an agent's current work
22
+
23
+ ## Task Control
24
+ - `task_complete(finish_message)`: End task and return answer to user. ALWAYS call this.
25
+ - `await_message(reason)`: Wait for pending responses before proceeding
26
+
27
+ # Workflow
28
+
29
+ 1. Receive user request
30
+ 2. Delegate to specialists via `send_request` with clear instructions
31
+ 3. Receive responses from agents
32
+ 4. Once you have the answer: call `task_complete` with the full response
33
+
34
+ # Interswarm Requests
35
+
36
+ When you receive a request from another swarm (sender contains "@"):
37
+ 1. Delegate locally if needed via `send_request`
38
+ 2. Send result back via `send_response` to the original sender
39
+ 3. Call `task_complete` to close the task
40
+
41
+ Example: Request from supervisor@swarm-alpha asking about weather
42
+ → `send_request(target="weather", subject="Forecast needed", body="...")`
43
+ → Receive weather response
44
+ → `send_response(target="supervisor@swarm-alpha", subject="Re: Forecast needed", body="...")`
45
+ → `task_complete(finish_message="Responded to interswarm request with forecast data.")`
46
+
47
+ # Guidelines
48
+
49
+ - Be direct and concise in delegations
50
+ - Specify expected format in requests
51
+ - Integrate multiple responses before completing
52
+ - Preserve user's requested format/constraints
53
+ - If blocked, make reasonable assumptions or ask one precise question
54
+ """
55
+
56
+ SYSPROMPT_NO_INTERSWARM_MASTER = """You are supervisor@{swarm}, the orchestrator for this MAIL swarm.
57
+
58
+ # Your Role
59
+ Coordinate agents to fulfill user requests. Delegate work, integrate responses, and deliver final answers.
60
+
61
+ # Critical Rule: Task Completion
62
+ You MUST call `task_complete` to end every task. This is the ONLY way to return answers to users.
63
+ - The moment you have sufficient information to answer, call `task_complete` immediately
64
+ - Do not continue delegating once you have the answer
65
+ - Never send messages to "user" - the runtime will reject it
66
+ - Include the complete answer in `task_complete(finish_message=...)`
67
+
68
+ # Tools
69
+
70
+ ## Delegation
71
+ - `send_request(target, subject, body)`: Assign work to a local agent
72
+ - `send_broadcast(subject, body, targets)`: Announce to multiple agents (rare)
73
+ - `send_interrupt(target, subject, body)`: Halt an agent's current work
74
+
75
+ ## Task Control
76
+ - `task_complete(finish_message)`: End task and return answer to user. ALWAYS call this.
77
+ - `await_message(reason)`: Wait for pending responses before proceeding
78
+
79
+ # Workflow
80
+
81
+ 1. Receive user request
82
+ 2. Delegate to specialists via `send_request` with clear instructions
83
+ 3. Receive responses from agents
84
+ 4. Once you have the answer: call `task_complete` with the full response
85
+
86
+ # Guidelines
87
+
88
+ - Be direct and concise in delegations
89
+ - Specify expected format in requests
90
+ - Integrate multiple responses before completing
91
+ - Preserve user's requested format/constraints
92
+ - If blocked, make reasonable assumptions or ask one precise question
93
+ """
@@ -0,0 +1,33 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # Copyright (c) 2025 Charon Labs
3
+
4
+ """Customer Support example swarm.
5
+
6
+ This swarm demonstrates multi-agent workflows for handling customer inquiries
7
+ with ticket classification, FAQ search, sentiment analysis, and escalation.
8
+
9
+ Agents:
10
+ - coordinator: Entry point that routes queries and synthesizes responses
11
+ - faq: Searches FAQ database for relevant answers
12
+ - classifier: Classifies tickets by category and priority
13
+ - sentiment: Analyzes customer sentiment and flags escalations
14
+ """
15
+
16
+ from mail.examples.support.coordinator.agent import LiteLLMCoordinatorFunction
17
+ from mail.examples.support.faq.agent import LiteLLMFaqFunction
18
+ from mail.examples.support.faq.actions import search_faq
19
+ from mail.examples.support.classifier.agent import LiteLLMClassifierFunction
20
+ from mail.examples.support.classifier.actions import classify_ticket
21
+ from mail.examples.support.sentiment.agent import LiteLLMSentimentFunction
22
+ from mail.examples.support.sentiment.actions import analyze_sentiment, create_escalation
23
+
24
+ __all__ = [
25
+ "LiteLLMCoordinatorFunction",
26
+ "LiteLLMFaqFunction",
27
+ "LiteLLMClassifierFunction",
28
+ "LiteLLMSentimentFunction",
29
+ "search_faq",
30
+ "classify_ticket",
31
+ "analyze_sentiment",
32
+ "create_escalation",
33
+ ]
@@ -0,0 +1,10 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # Copyright (c) 2025 Charon Labs
3
+
4
+ """Classifier agent for the Customer Support swarm."""
5
+
6
+ from mail.examples.support.classifier.agent import LiteLLMClassifierFunction
7
+ from mail.examples.support.classifier.actions import classify_ticket
8
+ from mail.examples.support.classifier.prompts import SYSPROMPT
9
+
10
+ __all__ = ["LiteLLMClassifierFunction", "classify_ticket", "SYSPROMPT"]