noesium 0.1.0__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. noesium/agents/askura_agent/__init__.py +22 -0
  2. noesium/agents/askura_agent/askura_agent.py +480 -0
  3. noesium/agents/askura_agent/conversation.py +164 -0
  4. noesium/agents/askura_agent/extractor.py +175 -0
  5. noesium/agents/askura_agent/memory.py +14 -0
  6. noesium/agents/askura_agent/models.py +239 -0
  7. noesium/agents/askura_agent/prompts.py +202 -0
  8. noesium/agents/askura_agent/reflection.py +234 -0
  9. noesium/agents/askura_agent/summarizer.py +30 -0
  10. noesium/agents/askura_agent/utils.py +6 -0
  11. noesium/agents/deep_research/__init__.py +13 -0
  12. noesium/agents/deep_research/agent.py +398 -0
  13. noesium/agents/deep_research/prompts.py +84 -0
  14. noesium/agents/deep_research/schemas.py +42 -0
  15. noesium/agents/deep_research/state.py +54 -0
  16. noesium/agents/search/__init__.py +5 -0
  17. noesium/agents/search/agent.py +474 -0
  18. noesium/agents/search/state.py +28 -0
  19. noesium/core/__init__.py +1 -1
  20. noesium/core/agent/base.py +10 -2
  21. noesium/core/goalith/decomposer/llm_decomposer.py +1 -1
  22. noesium/core/llm/__init__.py +1 -1
  23. noesium/core/llm/base.py +2 -2
  24. noesium/core/llm/litellm.py +42 -21
  25. noesium/core/llm/llamacpp.py +25 -4
  26. noesium/core/llm/ollama.py +43 -22
  27. noesium/core/llm/openai.py +25 -5
  28. noesium/core/llm/openrouter.py +1 -1
  29. noesium/core/toolify/base.py +9 -2
  30. noesium/core/toolify/config.py +2 -2
  31. noesium/core/toolify/registry.py +21 -5
  32. noesium/core/tracing/opik_tracing.py +7 -7
  33. noesium/core/vector_store/__init__.py +2 -2
  34. noesium/core/vector_store/base.py +1 -1
  35. noesium/core/vector_store/pgvector.py +10 -13
  36. noesium/core/vector_store/weaviate.py +2 -1
  37. noesium/toolkits/__init__.py +1 -0
  38. noesium/toolkits/arxiv_toolkit.py +310 -0
  39. noesium/toolkits/audio_aliyun_toolkit.py +441 -0
  40. noesium/toolkits/audio_toolkit.py +370 -0
  41. noesium/toolkits/bash_toolkit.py +332 -0
  42. noesium/toolkits/document_toolkit.py +454 -0
  43. noesium/toolkits/file_edit_toolkit.py +552 -0
  44. noesium/toolkits/github_toolkit.py +395 -0
  45. noesium/toolkits/gmail_toolkit.py +575 -0
  46. noesium/toolkits/image_toolkit.py +425 -0
  47. noesium/toolkits/memory_toolkit.py +398 -0
  48. noesium/toolkits/python_executor_toolkit.py +334 -0
  49. noesium/toolkits/search_toolkit.py +451 -0
  50. noesium/toolkits/serper_toolkit.py +623 -0
  51. noesium/toolkits/tabular_data_toolkit.py +537 -0
  52. noesium/toolkits/user_interaction_toolkit.py +365 -0
  53. noesium/toolkits/video_toolkit.py +168 -0
  54. noesium/toolkits/wikipedia_toolkit.py +420 -0
  55. noesium-0.2.1.dist-info/METADATA +253 -0
  56. {noesium-0.1.0.dist-info → noesium-0.2.1.dist-info}/RECORD +59 -23
  57. {noesium-0.1.0.dist-info → noesium-0.2.1.dist-info}/licenses/LICENSE +1 -1
  58. noesium-0.1.0.dist-info/METADATA +0 -525
  59. {noesium-0.1.0.dist-info → noesium-0.2.1.dist-info}/WHEEL +0 -0
  60. {noesium-0.1.0.dist-info → noesium-0.2.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,420 @@
1
+ """
2
+ Wikipedia toolkit for encyclopedia search and content retrieval.
3
+
4
+ Provides tools for searching Wikipedia articles, retrieving content,
5
+ and accessing Wikipedia's vast knowledge base through the MediaWiki API.
6
+ """
7
+
8
+ import datetime
9
+ from typing import Callable, Dict, List, Optional
10
+
11
+ import aiohttp
12
+
13
+ from noesium.core.toolify.base import AsyncBaseToolkit
14
+ from noesium.core.toolify.config import ToolkitConfig
15
+ from noesium.core.toolify.registry import register_toolkit
16
+ from noesium.core.utils.logging import get_logger
17
+
18
+ logger = get_logger(__name__)
19
+
20
+ try:
21
+ import wikipediaapi
22
+
23
+ WIKIPEDIA_API_AVAILABLE = True
24
+ except ImportError:
25
+ wikipediaapi = None
26
+ WIKIPEDIA_API_AVAILABLE = False
27
+
28
+
29
+ @register_toolkit("wikipedia")
30
+ class WikipediaToolkit(AsyncBaseToolkit):
31
+ """
32
+ Toolkit for Wikipedia search and content retrieval.
33
+
34
+ This toolkit provides comprehensive access to Wikipedia's content through
35
+ both the wikipedia-api library and direct MediaWiki API calls. It supports
36
+ multiple languages, different content formats, and various search modes.
37
+
38
+ Features:
39
+ - Multi-language Wikipedia support
40
+ - Full article content and summaries
41
+ - Page search and disambiguation
42
+ - Category and link information
43
+ - Recent changes and trending topics
44
+ - Configurable output formats (Wiki markup or HTML)
45
+ - Rate limiting and error handling
46
+
47
+ Required dependency: wikipedia-api
48
+ Install with: pip install wikipedia-api
49
+ """
50
+
51
+ def __init__(self, config: ToolkitConfig = None):
52
+ """
53
+ Initialize the Wikipedia toolkit.
54
+
55
+ Args:
56
+ config: Toolkit configuration
57
+
58
+ Raises:
59
+ ImportError: If wikipedia-api package is not installed
60
+ """
61
+ super().__init__(config)
62
+
63
+ if not WIKIPEDIA_API_AVAILABLE:
64
+ raise ImportError(
65
+ "wikipedia-api package is required for WikipediaToolkit. " "Install with: pip install wikipedia-api"
66
+ )
67
+
68
+ # Configuration
69
+ self.user_agent = self.config.config.get("user_agent", "noesium-wikipedia-toolkit")
70
+ self.language = self.config.config.get("language", "en")
71
+ self.content_type = self.config.config.get("content_type", "text") # "text" or "summary"
72
+ self.extract_format = self.config.config.get("extract_format", "WIKI") # "WIKI" or "HTML"
73
+
74
+ # Map string format to wikipediaapi.ExtractFormat
75
+ extract_format_map = {
76
+ "WIKI": wikipediaapi.ExtractFormat.WIKI,
77
+ "HTML": wikipediaapi.ExtractFormat.HTML,
78
+ }
79
+
80
+ if self.extract_format not in extract_format_map:
81
+ self.logger.warning(f"Invalid extract_format: {self.extract_format}, using WIKI")
82
+ self.extract_format = "WIKI"
83
+
84
+ # Initialize Wikipedia API client
85
+ self.wiki_client = wikipediaapi.Wikipedia(
86
+ user_agent=self.user_agent, language=self.language, extract_format=extract_format_map[self.extract_format]
87
+ )
88
+
89
+ # MediaWiki API configuration
90
+ self.api_base_url = f"https://{self.language}.wikipedia.org/w/api.php"
91
+
92
+ self.logger.info(f"Wikipedia toolkit initialized for language: {self.language}")
93
+
94
+ async def _make_api_request(self, params: Dict) -> Dict:
95
+ """
96
+ Make a request to the MediaWiki API.
97
+
98
+ Args:
99
+ params: API parameters
100
+
101
+ Returns:
102
+ API response as dictionary
103
+ """
104
+ default_params = {"format": "json", "formatversion": "2"}
105
+ params.update(default_params)
106
+
107
+ try:
108
+ async with aiohttp.ClientSession() as session:
109
+ async with session.get(self.api_base_url, params=params) as response:
110
+ response.raise_for_status()
111
+ return await response.json()
112
+
113
+ except Exception as e:
114
+ self.logger.error(f"Wikipedia API request failed: {e}")
115
+ return {"error": f"API request failed: {str(e)}"}
116
+
117
+ async def search_wikipedia(self, query: str, num_results: int = 5) -> List[Dict]:
118
+ """
119
+ Search Wikipedia for articles matching the query.
120
+
121
+ This tool searches Wikipedia for articles related to your query and returns
122
+ a list of matching articles with basic information. It's useful for finding
123
+ relevant Wikipedia pages before retrieving detailed content.
124
+
125
+ Args:
126
+ query: Search query string
127
+ num_results: Maximum number of results to return (default: 5)
128
+
129
+ Returns:
130
+ List of dictionaries containing search results with:
131
+ - title: Article title
132
+ - pageid: Wikipedia page ID
133
+ - snippet: Brief text snippet with search terms highlighted
134
+ - wordcount: Number of words in the article
135
+ - size: Article size in bytes
136
+ - timestamp: Last modification timestamp
137
+
138
+ Example:
139
+ results = await search_wikipedia("artificial intelligence", 3)
140
+ for result in results:
141
+ print(f"Title: {result['title']}")
142
+ print(f"Snippet: {result['snippet']}")
143
+ """
144
+ self.logger.info(f"Searching Wikipedia for: {query}")
145
+
146
+ try:
147
+ params = {
148
+ "action": "query",
149
+ "list": "search",
150
+ "srsearch": query,
151
+ "srlimit": min(num_results, 50), # API limit
152
+ "srprop": "snippet|titlesnippet|size|wordcount|timestamp",
153
+ }
154
+
155
+ response = await self._make_api_request(params)
156
+
157
+ if "error" in response:
158
+ return [{"error": response["error"]}]
159
+
160
+ search_results = response.get("query", {}).get("search", [])
161
+
162
+ results = []
163
+ for result in search_results:
164
+ results.append(
165
+ {
166
+ "title": result.get("title", ""),
167
+ "pageid": result.get("pageid"),
168
+ "snippet": result.get("snippet", "")
169
+ .replace('<span class="searchmatch">', "")
170
+ .replace("</span>", ""),
171
+ "wordcount": result.get("wordcount", 0),
172
+ "size": result.get("size", 0),
173
+ "timestamp": result.get("timestamp", ""),
174
+ }
175
+ )
176
+
177
+ self.logger.info(f"Found {len(results)} search results")
178
+ return results
179
+
180
+ except Exception as e:
181
+ error_msg = f"Wikipedia search failed: {str(e)}"
182
+ self.logger.error(error_msg)
183
+ return [{"error": error_msg}]
184
+
185
+ async def get_wikipedia_page(self, title: str, content_type: Optional[str] = None) -> Dict:
186
+ """
187
+ Retrieve a Wikipedia page by title.
188
+
189
+ This tool fetches the complete content of a Wikipedia article by its title.
190
+ It can return either the full article text or just a summary, depending
191
+ on the configuration.
192
+
193
+ Args:
194
+ title: Wikipedia article title
195
+ content_type: "text" for full article, "summary" for summary only
196
+
197
+ Returns:
198
+ Dictionary containing:
199
+ - title: Article title
200
+ - content: Article content (full text or summary)
201
+ - url: Wikipedia URL
202
+ - exists: Whether the page exists
203
+ - categories: List of categories
204
+ - links: List of internal links
205
+ - references: List of external references
206
+ - summary: Article summary (always included)
207
+
208
+ Example:
209
+ page = await get_wikipedia_page("Python (programming language)")
210
+ print(f"Title: {page['title']}")
211
+ print(f"Summary: {page['summary'][:200]}...")
212
+ """
213
+ content_type = content_type or self.content_type
214
+
215
+ self.logger.info(f"Retrieving Wikipedia page: {title}")
216
+
217
+ try:
218
+ # Get the page using wikipedia-api
219
+ page = self.wiki_client.page(title)
220
+
221
+ if not page.exists():
222
+ return {"title": title, "exists": False, "error": f"Wikipedia page '{title}' does not exist"}
223
+
224
+ # Extract content based on type
225
+ if content_type == "summary":
226
+ content = page.summary
227
+ else:
228
+ content = page.text
229
+
230
+ # Get additional information
231
+ categories = list(page.categories.keys()) if hasattr(page, "categories") else []
232
+ links = list(page.links.keys()) if hasattr(page, "links") else []
233
+
234
+ result = {
235
+ "title": page.title,
236
+ "content": content,
237
+ "summary": page.summary,
238
+ "url": page.fullurl,
239
+ "exists": True,
240
+ "categories": categories[:20], # Limit to first 20
241
+ "links": links[:50], # Limit to first 50
242
+ "page_id": getattr(page, "pageid", None),
243
+ "language": self.language,
244
+ "content_type": content_type,
245
+ }
246
+
247
+ # Get references using API
248
+ try:
249
+ refs_params = {"action": "query", "prop": "extlinks", "titles": title, "ellimit": 20}
250
+ refs_response = await self._make_api_request(refs_params)
251
+
252
+ pages = refs_response.get("query", {}).get("pages", [])
253
+ if pages:
254
+ extlinks = pages[0].get("extlinks", [])
255
+ result["references"] = [link.get("*", "") for link in extlinks]
256
+ else:
257
+ result["references"] = []
258
+
259
+ except Exception:
260
+ result["references"] = []
261
+
262
+ self.logger.info(f"Retrieved page: {page.title} ({len(content)} characters)")
263
+ return result
264
+
265
+ except Exception as e:
266
+ error_msg = f"Failed to retrieve Wikipedia page '{title}': {str(e)}"
267
+ self.logger.error(error_msg)
268
+ return {"title": title, "exists": False, "error": error_msg}
269
+
270
+ async def get_wikipedia_summary(self, title: str, sentences: int = 3) -> str:
271
+ """
272
+ Get a concise summary of a Wikipedia article.
273
+
274
+ Args:
275
+ title: Wikipedia article title
276
+ sentences: Number of sentences to include in summary
277
+
278
+ Returns:
279
+ Article summary text
280
+ """
281
+ try:
282
+ page_data = await self.get_wikipedia_page(title, content_type="summary")
283
+
284
+ if not page_data.get("exists", False):
285
+ return page_data.get("error", "Page not found")
286
+
287
+ summary = page_data.get("summary", "")
288
+
289
+ # Limit to specified number of sentences
290
+ if sentences > 0:
291
+ sentences_list = summary.split(". ")
292
+ if len(sentences_list) > sentences:
293
+ summary = ". ".join(sentences_list[:sentences]) + "."
294
+
295
+ return summary
296
+
297
+ except Exception as e:
298
+ return f"Failed to get summary: {str(e)}"
299
+
300
+ async def get_random_wikipedia_page(self) -> Dict:
301
+ """
302
+ Get a random Wikipedia article.
303
+
304
+ Returns:
305
+ Dictionary with random article information
306
+ """
307
+ self.logger.info("Getting random Wikipedia page")
308
+
309
+ try:
310
+ params = {"action": "query", "list": "random", "rnnamespace": 0, "rnlimit": 1} # Main namespace only
311
+
312
+ response = await self._make_api_request(params)
313
+
314
+ if "error" in response:
315
+ return {"error": response["error"]}
316
+
317
+ random_pages = response.get("query", {}).get("random", [])
318
+
319
+ if not random_pages:
320
+ return {"error": "No random page found"}
321
+
322
+ random_title = random_pages[0].get("title")
323
+
324
+ # Get the full page content
325
+ return await self.get_wikipedia_page(random_title)
326
+
327
+ except Exception as e:
328
+ error_msg = f"Failed to get random page: {str(e)}"
329
+ self.logger.error(error_msg)
330
+ return {"error": error_msg}
331
+
332
+ async def get_wikipedia_categories(self, title: str) -> List[str]:
333
+ """
334
+ Get categories for a Wikipedia article.
335
+
336
+ Args:
337
+ title: Wikipedia article title
338
+
339
+ Returns:
340
+ List of category names
341
+ """
342
+ try:
343
+ params = {"action": "query", "prop": "categories", "titles": title, "cllimit": 50}
344
+
345
+ response = await self._make_api_request(params)
346
+
347
+ if "error" in response:
348
+ return [f"Error: {response['error']}"]
349
+
350
+ pages = response.get("query", {}).get("pages", [])
351
+
352
+ if not pages:
353
+ return ["No categories found"]
354
+
355
+ categories = pages[0].get("categories", [])
356
+ return [cat.get("title", "").replace("Category:", "") for cat in categories]
357
+
358
+ except Exception as e:
359
+ return [f"Error getting categories: {str(e)}"]
360
+
361
+ async def get_page_views(self, title: str, days: int = 30) -> Dict:
362
+ """
363
+ Get page view statistics for a Wikipedia article.
364
+
365
+ Args:
366
+ title: Wikipedia article title
367
+ days: Number of days to look back (default: 30)
368
+
369
+ Returns:
370
+ Dictionary with view statistics
371
+ """
372
+ try:
373
+ # Calculate date range
374
+ end_date = datetime.datetime.now()
375
+ start_date = end_date - datetime.timedelta(days=days)
376
+
377
+ # Format dates for API
378
+ start_str = start_date.strftime("%Y%m%d")
379
+ end_str = end_date.strftime("%Y%m%d")
380
+
381
+ # Use Wikimedia REST API for pageviews
382
+ url = f"https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/{self.language}.wikipedia/all-access/user/{title}/daily/{start_str}/{end_str}"
383
+
384
+ async with aiohttp.ClientSession() as session:
385
+ async with session.get(url) as response:
386
+ if response.status == 200:
387
+ data = await response.json()
388
+ items = data.get("items", [])
389
+
390
+ total_views = sum(item.get("views", 0) for item in items)
391
+ avg_daily_views = total_views / len(items) if items else 0
392
+
393
+ return {
394
+ "title": title,
395
+ "total_views": total_views,
396
+ "average_daily_views": round(avg_daily_views, 2),
397
+ "days_analyzed": len(items),
398
+ "date_range": f"{start_str} to {end_str}",
399
+ }
400
+ else:
401
+ return {"error": f"Failed to get page views: HTTP {response.status}"}
402
+
403
+ except Exception as e:
404
+ return {"error": f"Failed to get page views: {str(e)}"}
405
+
406
+ async def get_tools_map(self) -> Dict[str, Callable]:
407
+ """
408
+ Get the mapping of tool names to their implementation functions.
409
+
410
+ Returns:
411
+ Dictionary mapping tool names to callable functions
412
+ """
413
+ return {
414
+ "search_wikipedia": self.search_wikipedia,
415
+ "get_wikipedia_page": self.get_wikipedia_page,
416
+ "get_wikipedia_summary": self.get_wikipedia_summary,
417
+ "get_random_wikipedia_page": self.get_random_wikipedia_page,
418
+ "get_wikipedia_categories": self.get_wikipedia_categories,
419
+ "get_page_views": self.get_page_views,
420
+ }
@@ -0,0 +1,253 @@
1
+ Metadata-Version: 2.4
2
+ Name: noesium
3
+ Version: 0.2.1
4
+ Summary: Towards a cognitive agentic framework
5
+ Author-email: Xiaming Chen <chenxm35@gmail.com>
6
+ Maintainer-email: Xiaming Chen <chenxm35@gmail.com>
7
+ License-Expression: MIT
8
+ Project-URL: Homepage, https://github.com/mirasoth/noesium
9
+ Project-URL: Repository, https://github.com/mirasoth/noesium
10
+ Keywords: agents,multi-agent system,cognition,artificial intelligence
11
+ Classifier: Development Status :: 4 - Beta
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: Operating System :: OS Independent
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.11
16
+ Classifier: Programming Language :: Python :: 3.12
17
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
18
+ Requires-Python: >=3.11
19
+ Description-Content-Type: text/markdown
20
+ License-File: LICENSE
21
+ Requires-Dist: pydantic>=2.0.0
22
+ Requires-Dist: requests>=2.31.0
23
+ Requires-Dist: aiohttp>=3.12.15
24
+ Requires-Dist: python-dotenv>=1.1.1
25
+ Requires-Dist: colorlog>=6.8.0
26
+ Requires-Dist: typing-extensions>=4.8.0
27
+ Provides-Extra: google
28
+ Requires-Dist: google-genai>=1.5.0; extra == "google"
29
+ Requires-Dist: google-api-python-client>=2.174.0; extra == "google"
30
+ Requires-Dist: google-auth-oauthlib>=1.2.2; extra == "google"
31
+ Requires-Dist: google-auth>=2.40.3; extra == "google"
32
+ Provides-Extra: aliyun
33
+ Requires-Dist: aliyun-python-sdk-core<3.0.0,>=2.13.1; extra == "aliyun"
34
+ Provides-Extra: llm
35
+ Requires-Dist: litellm>=1.0.0; extra == "llm"
36
+ Requires-Dist: openai>=1.0.0; extra == "llm"
37
+ Requires-Dist: instructor>=1.10.0; extra == "llm"
38
+ Provides-Extra: local-llm
39
+ Requires-Dist: ollama>=0.5.3; extra == "local-llm"
40
+ Requires-Dist: llama-cpp-python>=0.3.16; extra == "local-llm"
41
+ Requires-Dist: huggingface-hub>=0.34.4; extra == "local-llm"
42
+ Provides-Extra: ai-providers-all
43
+ Requires-Dist: noesium[aliyun,google,llm,local-llm]; extra == "ai-providers-all"
44
+ Provides-Extra: langchain
45
+ Requires-Dist: langchain-core>=0.3.72; extra == "langchain"
46
+ Requires-Dist: langchain-text-splitters>=0.3.0; extra == "langchain"
47
+ Requires-Dist: langchain-ollama>=0.2.0; extra == "langchain"
48
+ Requires-Dist: langgraph>=0.5.4; extra == "langchain"
49
+ Provides-Extra: agents
50
+ Requires-Dist: noesium[langchain]; extra == "agents"
51
+ Requires-Dist: bubus>=1.5.6; extra == "agents"
52
+ Provides-Extra: postgres
53
+ Requires-Dist: psycopg2-binary>=2.9.0; extra == "postgres"
54
+ Requires-Dist: psycopg2>=2.9.10; extra == "postgres"
55
+ Provides-Extra: weaviate
56
+ Requires-Dist: weaviate-client<5,>=4; extra == "weaviate"
57
+ Requires-Dist: protobuf<6,>=5; extra == "weaviate"
58
+ Provides-Extra: datascience
59
+ Requires-Dist: networkx>=3.5; extra == "datascience"
60
+ Requires-Dist: matplotlib>=3.8.0; extra == "datascience"
61
+ Requires-Dist: pexpect>=4.9.0; extra == "datascience"
62
+ Requires-Dist: ipython>=8.18.0; extra == "datascience"
63
+ Requires-Dist: pandas>=2.0.0; extra == "datascience"
64
+ Provides-Extra: mcp
65
+ Requires-Dist: mcp>=1.0.0; extra == "mcp"
66
+ Provides-Extra: tools
67
+ Requires-Dist: noesium[aliyun,datascience,google,mcp]; extra == "tools"
68
+ Requires-Dist: wizsearch<2.0.0,>=1.0.1; extra == "tools"
69
+ Requires-Dist: arxiv>=2.2.0; extra == "tools"
70
+ Requires-Dist: pillow<12.0,>=10.1.0; extra == "tools"
71
+ Requires-Dist: pymupdf>=1.23.0; extra == "tools"
72
+ Requires-Dist: openpyxl>=3.1.5; extra == "tools"
73
+ Requires-Dist: wikipedia-api>=0.6.0; extra == "tools"
74
+ Requires-Dist: aiofiles>=24.1.0; extra == "tools"
75
+ Provides-Extra: all
76
+ Requires-Dist: noesium[agents,ai-providers-all,postgres,tools,weaviate]; extra == "all"
77
+ Provides-Extra: dev
78
+ Requires-Dist: pytest<9,>=8.2; extra == "dev"
79
+ Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
80
+ Requires-Dist: pytest-asyncio>=1.1.0; extra == "dev"
81
+ Requires-Dist: black>=23.0.0; extra == "dev"
82
+ Requires-Dist: isort>=5.12.0; extra == "dev"
83
+ Requires-Dist: mypy>=1.10.0; extra == "dev"
84
+ Requires-Dist: autoflake>=2.3.1; extra == "dev"
85
+ Requires-Dist: flake8>=7.3.0; extra == "dev"
86
+ Dynamic: license-file
87
+
88
+ # Noesium
89
+
90
+ [![CI](https://github.com/mirasoth/noesium/actions/workflows/ci.yml/badge.svg)](https://github.com/mirasoth/noesium/actions/workflows/ci.yml)
91
+ [![PyPI version](https://img.shields.io/pypi/v/noesium.svg)](https://pypi.org/project/noesium/)
92
+ [![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/mirasoth/noesium)
93
+
94
+ Project Noesium is an initiative to develop a computation-driven, cognitive agentic system. This repo contains the foundational abstractions (Agent, Memory, Tool, Goal, Orchestration, and more) along with essential modules such as LLM clients, logging, message buses, model routing, and observability. For the underlying philosophy, refer to my talk on MAS ([link](https://github.com/caesar0301/mas-talk-2508/blob/master/mas-talk-xmingc.pdf)).
95
+
96
+ ## Installation
97
+
98
+ ```bash
99
+ pip install -U noesium
100
+ ```
101
+
102
+ ## Core Modules
103
+
104
+ | Module | Description |
105
+ |--------|-------------|
106
+ | **LLM Integration** (`noesium.core.llm`) | Multi-provider support (OpenAI, OpenRouter, Ollama, LlamaCPP, LiteLLM), dynamic routing, token tracking |
107
+ | **Goal Management** (`noesium.core.goalith`) | LLM-based goal decomposition, DAG-based goal graph, dependency tracking |
108
+ | **Tool Management** (`noesium.core.toolify`) | Tool registry, MCP integration, 17+ built-in toolkits |
109
+ | **Memory** (`noesium.core.memory`) | MemU integration, embedding-based retrieval, multi-category storage |
110
+ | **Vector Store** (`noesium.core.vector_store`) | PGVector and Weaviate support, semantic search |
111
+ | **Message Bus** (`noesium.core.msgbus`) | Event-driven architecture, watchdog patterns |
112
+ | **Routing** (`noesium.core.routing`) | Dynamic complexity-based model selection |
113
+ | **Tracing** (`noesium.core.tracing`) | Token usage monitoring, Opik integration |
114
+
115
+ ## Built-in Agents
116
+
117
+ - **AskuraAgent** - Conversational agent for collecting semi-structured information via human-in-the-loop workflows
118
+ - **SearchAgent** - Web search with query polishing, multi-engine support, and optional content crawling
119
+ - **DeepResearchAgent** - Iterative research with LLM-powered reflection and citation generation
120
+ - **MemoryAgent** - Memory management with categorization, embedding search, and memory linking
121
+
122
+ ## Quick Start
123
+
124
+ ### LLM Client
125
+
126
+ ```python
127
+ from noesium.core.llm import get_llm_client
128
+
129
+ # Create client (supports openai, openrouter, ollama, llamacpp)
130
+ client = get_llm_client(provider="openai", api_key="sk-...")
131
+
132
+ # Chat completion
133
+ response = client.completion([{"role": "user", "content": "Hello!"}])
134
+
135
+ # Structured output
136
+ from pydantic import BaseModel
137
+
138
+ class Answer(BaseModel):
139
+ text: str
140
+ confidence: float
141
+
142
+ client = get_llm_client(provider="openai", structured_output=True)
143
+ result = client.structured_completion(messages, Answer)
144
+ ```
145
+
146
+ ### Tool Management
147
+
148
+ ```python
149
+ from noesium.core.toolify import BaseToolkit, ToolkitConfig, ToolkitRegistry, register_toolkit
150
+
151
+ @register_toolkit("calculator")
152
+ class CalculatorToolkit(BaseToolkit):
153
+ def get_tools_map(self):
154
+ return {"add": self.add, "multiply": self.multiply}
155
+
156
+ def add(self, a: float, b: float) -> float:
157
+ return a + b
158
+
159
+ def multiply(self, a: float, b: float) -> float:
160
+ return a * b
161
+
162
+ # Use toolkit
163
+ config = ToolkitConfig(name="calculator")
164
+ calc = ToolkitRegistry.create_toolkit("calculator", config)
165
+ result = calc.call_tool("add", a=5, b=3)
166
+ ```
167
+
168
+ ### Goal Decomposition
169
+
170
+ ```python
171
+ from noesium.core.goalith.goalgraph.node import GoalNode
172
+ from noesium.core.goalith.goalgraph.graph import GoalGraph
173
+ from noesium.core.goalith.decomposer import LLMDecomposer
174
+
175
+ # Create and decompose a goal
176
+ goal = GoalNode(description="Plan a product launch", priority=8.0)
177
+ graph = GoalGraph()
178
+ graph.add_node(goal)
179
+
180
+ decomposer = LLMDecomposer()
181
+ subgoals = decomposer.decompose(goal, context={"budget": "$50,000"})
182
+ ```
183
+
184
+ ### Search Agent
185
+
186
+ ```python
187
+ from noesium.agents.search import SearchAgent, SearchConfig
188
+
189
+ config = SearchConfig(
190
+ polish_query=True,
191
+ search_engines=["tavily"],
192
+ max_results_per_engine=5
193
+ )
194
+ agent = SearchAgent(config=config)
195
+ results = await agent.search("latest developments in quantum computing")
196
+ ```
197
+
198
+ ### Deep Research Agent
199
+
200
+ ```python
201
+ from noesium.agents.deep_research import DeepResearchAgent, DeepResearchConfig
202
+
203
+ config = DeepResearchConfig(
204
+ number_of_initial_queries=3,
205
+ max_research_loops=3,
206
+ web_search_citation_enabled=True
207
+ )
208
+ agent = DeepResearchAgent(config=config)
209
+ result = await agent.research("What are the implications of AI on healthcare?")
210
+ ```
211
+
212
+ ## Environment Variables
213
+
214
+ ```bash
215
+ # LLM Providers
216
+ export NOESIUM_LLM_PROVIDER="openai"
217
+ export OPENAI_API_KEY="sk-..."
218
+ export OPENROUTER_API_KEY="sk-..."
219
+ export OLLAMA_BASE_URL="http://localhost:11434"
220
+ export LLAMACPP_MODEL_PATH="/path/to/model.gguf"
221
+
222
+ # Vector Store (PostgreSQL)
223
+ export POSTGRES_HOST="localhost"
224
+ export POSTGRES_PORT="5432"
225
+ export POSTGRES_DB="vectordb"
226
+ export POSTGRES_USER="postgres"
227
+ export POSTGRES_PASSWORD="postgres"
228
+
229
+ # Search Tools
230
+ export SERPER_API_KEY="..."
231
+ export JINA_API_KEY="..."
232
+ ```
233
+
234
+ ## Examples
235
+
236
+ See the `examples/` directory for comprehensive usage examples:
237
+
238
+ - `examples/agents/` - Agent demos (Askura, Search, DeepResearch)
239
+ - `examples/llm/` - LLM provider examples and token tracking
240
+ - `examples/goals/` - Goal decomposition patterns
241
+ - `examples/memory/` - Memory agent operations
242
+ - `examples/tools/` - Toolkit demonstrations
243
+ - `examples/vector_store/` - PGVector and Weaviate usage
244
+
245
+ ## Documentation
246
+
247
+ - **Design Specifications**: `specs/` directory contains RFCs for system architecture
248
+ - **Agent Details**: See `AGENTS.md` for comprehensive agent and toolkit documentation
249
+ - **Toolify System**: `noesium/core/toolify/README.md`
250
+
251
+ ## License
252
+
253
+ MIT License - see [LICENSE](LICENSE) file for details.