mail-swarms 1.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (137) hide show
  1. mail/__init__.py +35 -0
  2. mail/api.py +1964 -0
  3. mail/cli.py +432 -0
  4. mail/client.py +1657 -0
  5. mail/config/__init__.py +8 -0
  6. mail/config/client.py +87 -0
  7. mail/config/server.py +165 -0
  8. mail/core/__init__.py +72 -0
  9. mail/core/actions.py +69 -0
  10. mail/core/agents.py +73 -0
  11. mail/core/message.py +366 -0
  12. mail/core/runtime.py +3537 -0
  13. mail/core/tasks.py +311 -0
  14. mail/core/tools.py +1206 -0
  15. mail/db/__init__.py +0 -0
  16. mail/db/init.py +182 -0
  17. mail/db/types.py +65 -0
  18. mail/db/utils.py +523 -0
  19. mail/examples/__init__.py +27 -0
  20. mail/examples/analyst_dummy/__init__.py +15 -0
  21. mail/examples/analyst_dummy/agent.py +136 -0
  22. mail/examples/analyst_dummy/prompts.py +44 -0
  23. mail/examples/consultant_dummy/__init__.py +15 -0
  24. mail/examples/consultant_dummy/agent.py +136 -0
  25. mail/examples/consultant_dummy/prompts.py +42 -0
  26. mail/examples/data_analysis/__init__.py +40 -0
  27. mail/examples/data_analysis/analyst/__init__.py +9 -0
  28. mail/examples/data_analysis/analyst/agent.py +67 -0
  29. mail/examples/data_analysis/analyst/prompts.py +53 -0
  30. mail/examples/data_analysis/processor/__init__.py +13 -0
  31. mail/examples/data_analysis/processor/actions.py +293 -0
  32. mail/examples/data_analysis/processor/agent.py +67 -0
  33. mail/examples/data_analysis/processor/prompts.py +48 -0
  34. mail/examples/data_analysis/reporter/__init__.py +10 -0
  35. mail/examples/data_analysis/reporter/actions.py +187 -0
  36. mail/examples/data_analysis/reporter/agent.py +67 -0
  37. mail/examples/data_analysis/reporter/prompts.py +49 -0
  38. mail/examples/data_analysis/statistics/__init__.py +18 -0
  39. mail/examples/data_analysis/statistics/actions.py +343 -0
  40. mail/examples/data_analysis/statistics/agent.py +67 -0
  41. mail/examples/data_analysis/statistics/prompts.py +60 -0
  42. mail/examples/mafia/__init__.py +0 -0
  43. mail/examples/mafia/game.py +1537 -0
  44. mail/examples/mafia/narrator_tools.py +396 -0
  45. mail/examples/mafia/personas.py +240 -0
  46. mail/examples/mafia/prompts.py +489 -0
  47. mail/examples/mafia/roles.py +147 -0
  48. mail/examples/mafia/spec.md +350 -0
  49. mail/examples/math_dummy/__init__.py +23 -0
  50. mail/examples/math_dummy/actions.py +252 -0
  51. mail/examples/math_dummy/agent.py +136 -0
  52. mail/examples/math_dummy/prompts.py +46 -0
  53. mail/examples/math_dummy/types.py +5 -0
  54. mail/examples/research/__init__.py +39 -0
  55. mail/examples/research/researcher/__init__.py +9 -0
  56. mail/examples/research/researcher/agent.py +67 -0
  57. mail/examples/research/researcher/prompts.py +54 -0
  58. mail/examples/research/searcher/__init__.py +10 -0
  59. mail/examples/research/searcher/actions.py +324 -0
  60. mail/examples/research/searcher/agent.py +67 -0
  61. mail/examples/research/searcher/prompts.py +53 -0
  62. mail/examples/research/summarizer/__init__.py +18 -0
  63. mail/examples/research/summarizer/actions.py +255 -0
  64. mail/examples/research/summarizer/agent.py +67 -0
  65. mail/examples/research/summarizer/prompts.py +55 -0
  66. mail/examples/research/verifier/__init__.py +10 -0
  67. mail/examples/research/verifier/actions.py +337 -0
  68. mail/examples/research/verifier/agent.py +67 -0
  69. mail/examples/research/verifier/prompts.py +52 -0
  70. mail/examples/supervisor/__init__.py +11 -0
  71. mail/examples/supervisor/agent.py +4 -0
  72. mail/examples/supervisor/prompts.py +93 -0
  73. mail/examples/support/__init__.py +33 -0
  74. mail/examples/support/classifier/__init__.py +10 -0
  75. mail/examples/support/classifier/actions.py +307 -0
  76. mail/examples/support/classifier/agent.py +68 -0
  77. mail/examples/support/classifier/prompts.py +56 -0
  78. mail/examples/support/coordinator/__init__.py +9 -0
  79. mail/examples/support/coordinator/agent.py +67 -0
  80. mail/examples/support/coordinator/prompts.py +48 -0
  81. mail/examples/support/faq/__init__.py +10 -0
  82. mail/examples/support/faq/actions.py +182 -0
  83. mail/examples/support/faq/agent.py +67 -0
  84. mail/examples/support/faq/prompts.py +42 -0
  85. mail/examples/support/sentiment/__init__.py +15 -0
  86. mail/examples/support/sentiment/actions.py +341 -0
  87. mail/examples/support/sentiment/agent.py +67 -0
  88. mail/examples/support/sentiment/prompts.py +54 -0
  89. mail/examples/weather_dummy/__init__.py +23 -0
  90. mail/examples/weather_dummy/actions.py +75 -0
  91. mail/examples/weather_dummy/agent.py +136 -0
  92. mail/examples/weather_dummy/prompts.py +35 -0
  93. mail/examples/weather_dummy/types.py +5 -0
  94. mail/factories/__init__.py +27 -0
  95. mail/factories/action.py +223 -0
  96. mail/factories/base.py +1531 -0
  97. mail/factories/supervisor.py +241 -0
  98. mail/net/__init__.py +7 -0
  99. mail/net/registry.py +712 -0
  100. mail/net/router.py +728 -0
  101. mail/net/server_utils.py +114 -0
  102. mail/net/types.py +247 -0
  103. mail/server.py +1605 -0
  104. mail/stdlib/__init__.py +0 -0
  105. mail/stdlib/anthropic/__init__.py +0 -0
  106. mail/stdlib/fs/__init__.py +15 -0
  107. mail/stdlib/fs/actions.py +209 -0
  108. mail/stdlib/http/__init__.py +19 -0
  109. mail/stdlib/http/actions.py +333 -0
  110. mail/stdlib/interswarm/__init__.py +11 -0
  111. mail/stdlib/interswarm/actions.py +208 -0
  112. mail/stdlib/mcp/__init__.py +19 -0
  113. mail/stdlib/mcp/actions.py +294 -0
  114. mail/stdlib/openai/__init__.py +13 -0
  115. mail/stdlib/openai/agents.py +451 -0
  116. mail/summarizer.py +234 -0
  117. mail/swarms_json/__init__.py +27 -0
  118. mail/swarms_json/types.py +87 -0
  119. mail/swarms_json/utils.py +255 -0
  120. mail/url_scheme.py +51 -0
  121. mail/utils/__init__.py +53 -0
  122. mail/utils/auth.py +194 -0
  123. mail/utils/context.py +17 -0
  124. mail/utils/logger.py +73 -0
  125. mail/utils/openai.py +212 -0
  126. mail/utils/parsing.py +89 -0
  127. mail/utils/serialize.py +292 -0
  128. mail/utils/store.py +49 -0
  129. mail/utils/string_builder.py +119 -0
  130. mail/utils/version.py +20 -0
  131. mail_swarms-1.3.2.dist-info/METADATA +237 -0
  132. mail_swarms-1.3.2.dist-info/RECORD +137 -0
  133. mail_swarms-1.3.2.dist-info/WHEEL +4 -0
  134. mail_swarms-1.3.2.dist-info/entry_points.txt +2 -0
  135. mail_swarms-1.3.2.dist-info/licenses/LICENSE +202 -0
  136. mail_swarms-1.3.2.dist-info/licenses/NOTICE +10 -0
  137. mail_swarms-1.3.2.dist-info/licenses/THIRD_PARTY_NOTICES.md +12334 -0
@@ -0,0 +1,324 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # Copyright (c) 2025 Charon Labs
3
+
4
+ """Search actions for the Research Assistant swarm.
5
+
6
+ This module includes dummy search implementations that simulate
7
+ real search results. In a production environment, these could be
8
+ replaced with actual API integrations.
9
+ """
10
+
11
+ import json
12
+ import hashlib
13
+ from datetime import datetime, timedelta, UTC
14
+ from random import Random
15
+ from typing import Any
16
+
17
+ from mail import action
18
+
19
+ # Simulated knowledge base for different source types
20
+ KNOWLEDGE_BASE = {
21
+ "wikipedia": {
22
+ "topics": {
23
+ "artificial intelligence": {
24
+ "summary": "Artificial intelligence (AI) is intelligence demonstrated by machines, as opposed to natural intelligence displayed by animals including humans.",
25
+ "facts": [
26
+ "AI was founded as an academic discipline in 1956",
27
+ "Machine learning is a subset of AI that enables systems to learn from data",
28
+ "Deep learning uses neural networks with many layers",
29
+ "AI applications include natural language processing, computer vision, and robotics",
30
+ ],
31
+ "url": "https://en.wikipedia.org/wiki/Artificial_intelligence",
32
+ },
33
+ "climate change": {
34
+ "summary": "Climate change refers to long-term shifts in temperatures and weather patterns, mainly caused by human activities since the 1800s.",
35
+ "facts": [
36
+ "Global average temperature has risen about 1.1°C since pre-industrial times",
37
+ "The main driver is burning fossil fuels (coal, oil, gas)",
38
+ "Effects include rising sea levels, extreme weather, and ecosystem disruption",
39
+ "The Paris Agreement aims to limit warming to 1.5°C above pre-industrial levels",
40
+ ],
41
+ "url": "https://en.wikipedia.org/wiki/Climate_change",
42
+ },
43
+ "python programming": {
44
+ "summary": "Python is a high-level, general-purpose programming language known for its readability and versatility.",
45
+ "facts": [
46
+ "Python was created by Guido van Rossum and released in 1991",
47
+ "Python emphasizes code readability with significant indentation",
48
+ "It supports multiple programming paradigms including procedural, object-oriented, and functional",
49
+ "Python is widely used in web development, data science, AI, and automation",
50
+ ],
51
+ "url": "https://en.wikipedia.org/wiki/Python_(programming_language)",
52
+ },
53
+ },
54
+ },
55
+ "academic": {
56
+ "topics": {
57
+ "artificial intelligence": {
58
+ "summary": "Recent advances in artificial intelligence have transformed multiple industries through improved algorithms and increased computational power.",
59
+ "facts": [
60
+ "Transformer architecture (2017) revolutionized NLP and led to large language models",
61
+ "GPT-4 and similar models demonstrate emergent capabilities not present in smaller models",
62
+ "AI safety research focuses on alignment, interpretability, and robustness",
63
+ "Benchmark performance on many tasks now exceeds human-level performance",
64
+ ],
65
+ "url": "https://arxiv.org/abs/ai-research",
66
+ },
67
+ "climate change": {
68
+ "summary": "Climate science research indicates accelerating impacts and the need for rapid decarbonization.",
69
+ "facts": [
70
+ "IPCC AR6 report (2021-2023) provides comprehensive assessment of climate science",
71
+ "Carbon budget for 1.5°C is estimated at 400-500 GtCO2 from 2020",
72
+ "Climate tipping points may be reached at lower temperatures than previously thought",
73
+ "Methane emissions reduction offers near-term climate benefits",
74
+ ],
75
+ "url": "https://www.ipcc.ch/reports/",
76
+ },
77
+ },
78
+ },
79
+ "news": {
80
+ "topics": {
81
+ "artificial intelligence": {
82
+ "summary": "AI developments continue to make headlines with new applications and policy discussions.",
83
+ "facts": [
84
+ "Major tech companies are investing billions in AI research and deployment",
85
+ "Governments worldwide are developing AI regulations and policies",
86
+ "AI-generated content is becoming increasingly prevalent",
87
+ "Concerns about job displacement and misinformation persist",
88
+ ],
89
+ "url": "https://news.example.com/ai-developments",
90
+ },
91
+ "climate change": {
92
+ "summary": "Climate news focuses on extreme weather events and policy developments.",
93
+ "facts": [
94
+ "Record temperatures and extreme weather events reported globally",
95
+ "COP climate conferences continue to set emissions targets",
96
+ "Renewable energy adoption is accelerating worldwide",
97
+ "Corporate sustainability commitments increasing but scrutinized for greenwashing",
98
+ ],
99
+ "url": "https://news.example.com/climate-news",
100
+ },
101
+ },
102
+ },
103
+ }
104
+
105
+
106
+ def _generate_search_results(query: str, source: str, rng: Random) -> dict[str, Any]:
107
+ """Generate search results based on query and source."""
108
+ query_lower = query.lower()
109
+
110
+ # Check if query matches known topics
111
+ source_data = KNOWLEDGE_BASE.get(source, KNOWLEDGE_BASE.get("wikipedia", {}))
112
+ topics = source_data.get("topics", {})
113
+
114
+ matched_topic = None
115
+ best_match_score = 0
116
+
117
+ for topic_name, topic_data in topics.items():
118
+ # Simple keyword matching
119
+ topic_words = set(topic_name.lower().split())
120
+ query_words = set(query_lower.split())
121
+ overlap = len(topic_words & query_words)
122
+ if overlap > best_match_score:
123
+ best_match_score = overlap
124
+ matched_topic = (topic_name, topic_data)
125
+
126
+ if matched_topic:
127
+ topic_name, topic_data = matched_topic
128
+ return {
129
+ "query": query,
130
+ "source": source,
131
+ "matched_topic": topic_name,
132
+ "relevance_score": min(0.95, 0.5 + best_match_score * 0.2),
133
+ "summary": topic_data["summary"],
134
+ "facts": topic_data["facts"],
135
+ "url": topic_data["url"],
136
+ "retrieved_at": datetime.now(UTC).isoformat(),
137
+ }
138
+
139
+ # Generate generic results for unmatched queries
140
+ return {
141
+ "query": query,
142
+ "source": source,
143
+ "matched_topic": None,
144
+ "relevance_score": 0.3,
145
+ "summary": f"Search results for '{query}' from {source} sources.",
146
+ "facts": [
147
+ f"Information about {query} is available from multiple sources",
148
+ "Further research may be needed for specific details",
149
+ "Consider searching with more specific terms",
150
+ ],
151
+ "url": f"https://search.example.com/{source}?q={query.replace(' ', '+')}",
152
+ "retrieved_at": datetime.now(UTC).isoformat(),
153
+ }
154
+
155
+
156
+ SEARCH_TOPIC_PARAMETERS = {
157
+ "type": "object",
158
+ "properties": {
159
+ "query": {
160
+ "type": "string",
161
+ "description": "The search query or topic to research",
162
+ },
163
+ "source": {
164
+ "type": "string",
165
+ "enum": ["wikipedia", "academic", "news", "general"],
166
+ "description": "The type of source to search (default: general)",
167
+ },
168
+ },
169
+ "required": ["query"],
170
+ }
171
+
172
+
173
+ @action(
174
+ name="search_topic",
175
+ description="Search for information on a topic from various sources.",
176
+ parameters=SEARCH_TOPIC_PARAMETERS,
177
+ )
178
+ async def search_topic(args: dict[str, Any]) -> str:
179
+ """Search for information on a topic."""
180
+ try:
181
+ query = args["query"]
182
+ source = args.get("source", "general")
183
+ except KeyError as e:
184
+ return f"Error: {e} is required"
185
+
186
+ if not query.strip():
187
+ return json.dumps({"error": "Search query cannot be empty"})
188
+
189
+ if source not in ["wikipedia", "academic", "news", "general"]:
190
+ source = "general"
191
+
192
+ # Generate deterministic results based on query
193
+ seed = hashlib.md5(query.encode()).hexdigest()
194
+ rng = Random(seed)
195
+
196
+ # For "general" source, aggregate from multiple sources
197
+ if source == "general":
198
+ all_results = []
199
+ for src in ["wikipedia", "academic", "news"]:
200
+ result = _generate_search_results(query, src, rng)
201
+ all_results.append(result)
202
+
203
+ # Combine results
204
+ combined_facts = []
205
+ for r in all_results:
206
+ combined_facts.extend(r.get("facts", []))
207
+
208
+ return json.dumps(
209
+ {
210
+ "query": query,
211
+ "source": "general (aggregated)",
212
+ "result_count": len(all_results),
213
+ "combined_facts": combined_facts[:8], # Top 8 facts
214
+ "sources": [
215
+ {
216
+ "source": r["source"],
217
+ "url": r["url"],
218
+ "relevance": r["relevance_score"],
219
+ }
220
+ for r in all_results
221
+ ],
222
+ "retrieved_at": datetime.now(UTC).isoformat(),
223
+ }
224
+ )
225
+
226
+ results = _generate_search_results(query, source, rng)
227
+ return json.dumps(results)
228
+
229
+
230
+ EXTRACT_FACTS_PARAMETERS = {
231
+ "type": "object",
232
+ "properties": {
233
+ "text": {
234
+ "type": "string",
235
+ "description": "The text to extract facts from",
236
+ },
237
+ },
238
+ "required": ["text"],
239
+ }
240
+
241
+
242
+ @action(
243
+ name="extract_facts",
244
+ description="Extract key facts and claims from a block of text.",
245
+ parameters=EXTRACT_FACTS_PARAMETERS,
246
+ )
247
+ async def extract_facts(args: dict[str, Any]) -> str:
248
+ """Extract key facts from text."""
249
+ try:
250
+ text = args["text"]
251
+ except KeyError as e:
252
+ return f"Error: {e} is required"
253
+
254
+ if not text.strip():
255
+ return json.dumps({"error": "Text cannot be empty"})
256
+
257
+ # Simple fact extraction based on sentence analysis
258
+ sentences = text.replace("!", ".").replace("?", ".").split(".")
259
+ sentences = [s.strip() for s in sentences if len(s.strip()) > 20]
260
+
261
+ facts = []
262
+ claims = []
263
+
264
+ for sentence in sentences[:10]: # Limit to first 10 sentences
265
+ sentence_lower = sentence.lower()
266
+
267
+ # Identify factual statements (contains numbers, dates, or definitive language)
268
+ is_fact = any(
269
+ [
270
+ any(char.isdigit() for char in sentence),
271
+ any(
272
+ word in sentence_lower
273
+ for word in ["is", "are", "was", "were", "has", "have"]
274
+ ),
275
+ any(
276
+ word in sentence_lower
277
+ for word in ["according to", "research shows", "studies indicate"]
278
+ ),
279
+ ]
280
+ )
281
+
282
+ # Identify claims (subjective or requiring verification)
283
+ is_claim = any(
284
+ [
285
+ any(
286
+ word in sentence_lower
287
+ for word in ["should", "could", "might", "may", "probably"]
288
+ ),
289
+ any(
290
+ word in sentence_lower
291
+ for word in ["best", "worst", "most", "least"]
292
+ ),
293
+ any(word in sentence_lower for word in ["believe", "think", "opinion"]),
294
+ ]
295
+ )
296
+
297
+ if is_claim:
298
+ claims.append(
299
+ {
300
+ "text": sentence,
301
+ "type": "claim",
302
+ "needs_verification": True,
303
+ }
304
+ )
305
+ elif is_fact:
306
+ facts.append(
307
+ {
308
+ "text": sentence,
309
+ "type": "fact",
310
+ "confidence": 0.7, # Default confidence
311
+ }
312
+ )
313
+
314
+ return json.dumps(
315
+ {
316
+ "original_length": len(text),
317
+ "sentences_analyzed": len(sentences),
318
+ "facts_extracted": len(facts),
319
+ "claims_identified": len(claims),
320
+ "facts": facts,
321
+ "claims": claims,
322
+ "extraction_note": "Facts are statements with specific data; claims require verification",
323
+ }
324
+ )
@@ -0,0 +1,67 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # Copyright (c) 2025 Charon Labs
3
+
4
+ """Searcher agent for the Research Assistant swarm."""
5
+
6
+ from collections.abc import Awaitable
7
+ from typing import Any, Literal
8
+
9
+ from mail.core.agents import AgentOutput
10
+ from mail.factories.action import LiteLLMActionAgentFunction
11
+
12
+
13
+ class LiteLLMSearcherFunction(LiteLLMActionAgentFunction):
14
+ """
15
+ Searcher agent that retrieves information from various sources.
16
+
17
+ This agent searches for information on topics and extracts
18
+ key facts from text.
19
+ """
20
+
21
+ def __init__(
22
+ self,
23
+ name: str,
24
+ comm_targets: list[str],
25
+ tools: list[dict[str, Any]],
26
+ llm: str,
27
+ system: str,
28
+ user_token: str = "",
29
+ enable_entrypoint: bool = False,
30
+ enable_interswarm: bool = False,
31
+ can_complete_tasks: bool = False,
32
+ tool_format: Literal["completions", "responses"] = "responses",
33
+ exclude_tools: list[str] = [],
34
+ reasoning_effort: Literal["minimal", "low", "medium", "high"] | None = None,
35
+ thinking_budget: int | None = None,
36
+ max_tokens: int | None = None,
37
+ memory: bool = True,
38
+ use_proxy: bool = True,
39
+ _debug_include_mail_tools: bool = True,
40
+ ) -> None:
41
+ super().__init__(
42
+ name=name,
43
+ comm_targets=comm_targets,
44
+ tools=tools,
45
+ llm=llm,
46
+ system=system,
47
+ user_token=user_token,
48
+ enable_entrypoint=enable_entrypoint,
49
+ enable_interswarm=enable_interswarm,
50
+ can_complete_tasks=can_complete_tasks,
51
+ tool_format=tool_format,
52
+ exclude_tools=exclude_tools,
53
+ reasoning_effort=reasoning_effort,
54
+ thinking_budget=thinking_budget,
55
+ max_tokens=max_tokens,
56
+ memory=memory,
57
+ use_proxy=use_proxy,
58
+ _debug_include_mail_tools=_debug_include_mail_tools,
59
+ )
60
+
61
+ def __call__(
62
+ self,
63
+ messages: list[dict[str, Any]],
64
+ tool_choice: str | dict[str, str] = "required",
65
+ ) -> Awaitable[AgentOutput]:
66
+ """Execute the searcher agent function."""
67
+ return super().__call__(messages, tool_choice)
@@ -0,0 +1,53 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # Copyright (c) 2025 Charon Labs
3
+
4
+ SYSPROMPT = """You are searcher@{swarm}, the information retrieval specialist for this research assistant swarm.
5
+
6
+ # Your Role
7
+ Search for information on topics and extract key facts from text to support research tasks.
8
+
9
+ # Critical Rule: Responding
10
+ You CANNOT talk to users directly or call `task_complete`. You MUST use `send_response` to reply to the agent who contacted you.
11
+ - When you receive a request, note the sender (usually "researcher")
12
+ - After searching, call `send_response(target=<sender>, subject="Re: ...", body=<your findings>)`
13
+ - Include ALL relevant search results in your response body
14
+
15
+ # Tools
16
+
17
+ ## Search Operations
18
+ - `search_topic(query, source)`: Search for information on a topic
19
+ - `extract_facts(text)`: Extract key facts from a block of text
20
+
21
+ ## Communication
22
+ - `send_response(target, subject, body)`: Reply to the agent who requested information
23
+ - `send_request(target, subject, body)`: Ask another agent for information
24
+ - `acknowledge_broadcast(note)`: Acknowledge a broadcast message
25
+ - `ignore_broadcast(reason)`: Ignore an irrelevant broadcast
26
+
27
+ # Available Sources
28
+
29
+ For `search_topic`, you can specify:
30
+ - **wikipedia**: Encyclopedia-style factual information
31
+ - **academic**: Scholarly and research-based sources
32
+ - **news**: Current events and news articles
33
+ - **general**: General web search results
34
+
35
+ # Workflow
36
+
37
+ 1. Receive request from another agent (note the sender)
38
+ 2. Determine the best search strategy:
39
+ - Use `search_topic` to find information
40
+ - Use `extract_facts` if given raw text to analyze
41
+ 3. Review and organize the results
42
+ 4. Call `send_response` to the original sender with:
43
+ - Search results or extracted facts
44
+ - Source attribution
45
+ - Any relevant caveats about the information
46
+
47
+ # Guidelines
48
+
49
+ - Try multiple sources if initial results are sparse
50
+ - Note the recency and reliability of sources
51
+ - Extract specific facts rather than vague summaries
52
+ - Include source URLs when available
53
+ - Use "Re: <original subject>" as your response subject"""
@@ -0,0 +1,18 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # Copyright (c) 2025 Charon Labs
3
+
4
+ """Summarizer agent for the Research Assistant swarm."""
5
+
6
+ from mail.examples.research.summarizer.agent import LiteLLMSummarizerFunction
7
+ from mail.examples.research.summarizer.actions import (
8
+ summarize_text,
9
+ create_bibliography,
10
+ )
11
+ from mail.examples.research.summarizer.prompts import SYSPROMPT
12
+
13
+ __all__ = [
14
+ "LiteLLMSummarizerFunction",
15
+ "summarize_text",
16
+ "create_bibliography",
17
+ "SYSPROMPT",
18
+ ]