noesium 0.1.0__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- noesium/agents/askura_agent/__init__.py +22 -0
- noesium/agents/askura_agent/askura_agent.py +480 -0
- noesium/agents/askura_agent/conversation.py +164 -0
- noesium/agents/askura_agent/extractor.py +175 -0
- noesium/agents/askura_agent/memory.py +14 -0
- noesium/agents/askura_agent/models.py +239 -0
- noesium/agents/askura_agent/prompts.py +202 -0
- noesium/agents/askura_agent/reflection.py +234 -0
- noesium/agents/askura_agent/summarizer.py +30 -0
- noesium/agents/askura_agent/utils.py +6 -0
- noesium/agents/deep_research/__init__.py +13 -0
- noesium/agents/deep_research/agent.py +398 -0
- noesium/agents/deep_research/prompts.py +84 -0
- noesium/agents/deep_research/schemas.py +42 -0
- noesium/agents/deep_research/state.py +54 -0
- noesium/agents/search/__init__.py +5 -0
- noesium/agents/search/agent.py +474 -0
- noesium/agents/search/state.py +28 -0
- noesium/core/__init__.py +1 -1
- noesium/core/agent/base.py +10 -2
- noesium/core/goalith/decomposer/llm_decomposer.py +1 -1
- noesium/core/llm/__init__.py +1 -1
- noesium/core/llm/base.py +2 -2
- noesium/core/llm/litellm.py +42 -21
- noesium/core/llm/llamacpp.py +25 -4
- noesium/core/llm/ollama.py +43 -22
- noesium/core/llm/openai.py +25 -5
- noesium/core/llm/openrouter.py +1 -1
- noesium/core/toolify/base.py +9 -2
- noesium/core/toolify/config.py +2 -2
- noesium/core/toolify/registry.py +21 -5
- noesium/core/tracing/opik_tracing.py +7 -7
- noesium/core/vector_store/__init__.py +2 -2
- noesium/core/vector_store/base.py +1 -1
- noesium/core/vector_store/pgvector.py +10 -13
- noesium/core/vector_store/weaviate.py +2 -1
- noesium/toolkits/__init__.py +1 -0
- noesium/toolkits/arxiv_toolkit.py +310 -0
- noesium/toolkits/audio_aliyun_toolkit.py +441 -0
- noesium/toolkits/audio_toolkit.py +370 -0
- noesium/toolkits/bash_toolkit.py +332 -0
- noesium/toolkits/document_toolkit.py +454 -0
- noesium/toolkits/file_edit_toolkit.py +552 -0
- noesium/toolkits/github_toolkit.py +395 -0
- noesium/toolkits/gmail_toolkit.py +575 -0
- noesium/toolkits/image_toolkit.py +425 -0
- noesium/toolkits/memory_toolkit.py +398 -0
- noesium/toolkits/python_executor_toolkit.py +334 -0
- noesium/toolkits/search_toolkit.py +451 -0
- noesium/toolkits/serper_toolkit.py +623 -0
- noesium/toolkits/tabular_data_toolkit.py +537 -0
- noesium/toolkits/user_interaction_toolkit.py +365 -0
- noesium/toolkits/video_toolkit.py +168 -0
- noesium/toolkits/wikipedia_toolkit.py +420 -0
- {noesium-0.1.0.dist-info → noesium-0.2.0.dist-info}/METADATA +56 -48
- {noesium-0.1.0.dist-info → noesium-0.2.0.dist-info}/RECORD +59 -23
- {noesium-0.1.0.dist-info → noesium-0.2.0.dist-info}/licenses/LICENSE +1 -1
- {noesium-0.1.0.dist-info → noesium-0.2.0.dist-info}/WHEEL +0 -0
- {noesium-0.1.0.dist-info → noesium-0.2.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,420 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Wikipedia toolkit for encyclopedia search and content retrieval.
|
|
3
|
+
|
|
4
|
+
Provides tools for searching Wikipedia articles, retrieving content,
|
|
5
|
+
and accessing Wikipedia's vast knowledge base through the MediaWiki API.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import datetime
|
|
9
|
+
from typing import Callable, Dict, List, Optional
|
|
10
|
+
|
|
11
|
+
import aiohttp
|
|
12
|
+
|
|
13
|
+
from noesium.core.toolify.base import AsyncBaseToolkit
|
|
14
|
+
from noesium.core.toolify.config import ToolkitConfig
|
|
15
|
+
from noesium.core.toolify.registry import register_toolkit
|
|
16
|
+
from noesium.core.utils.logging import get_logger
|
|
17
|
+
|
|
18
|
+
logger = get_logger(__name__)
|
|
19
|
+
|
|
20
|
+
try:
|
|
21
|
+
import wikipediaapi
|
|
22
|
+
|
|
23
|
+
WIKIPEDIA_API_AVAILABLE = True
|
|
24
|
+
except ImportError:
|
|
25
|
+
wikipediaapi = None
|
|
26
|
+
WIKIPEDIA_API_AVAILABLE = False
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@register_toolkit("wikipedia")
|
|
30
|
+
class WikipediaToolkit(AsyncBaseToolkit):
|
|
31
|
+
"""
|
|
32
|
+
Toolkit for Wikipedia search and content retrieval.
|
|
33
|
+
|
|
34
|
+
This toolkit provides comprehensive access to Wikipedia's content through
|
|
35
|
+
both the wikipedia-api library and direct MediaWiki API calls. It supports
|
|
36
|
+
multiple languages, different content formats, and various search modes.
|
|
37
|
+
|
|
38
|
+
Features:
|
|
39
|
+
- Multi-language Wikipedia support
|
|
40
|
+
- Full article content and summaries
|
|
41
|
+
- Page search and disambiguation
|
|
42
|
+
- Category and link information
|
|
43
|
+
- Recent changes and trending topics
|
|
44
|
+
- Configurable output formats (Wiki markup or HTML)
|
|
45
|
+
- Rate limiting and error handling
|
|
46
|
+
|
|
47
|
+
Required dependency: wikipedia-api
|
|
48
|
+
Install with: pip install wikipedia-api
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
def __init__(self, config: ToolkitConfig = None):
|
|
52
|
+
"""
|
|
53
|
+
Initialize the Wikipedia toolkit.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
config: Toolkit configuration
|
|
57
|
+
|
|
58
|
+
Raises:
|
|
59
|
+
ImportError: If wikipedia-api package is not installed
|
|
60
|
+
"""
|
|
61
|
+
super().__init__(config)
|
|
62
|
+
|
|
63
|
+
if not WIKIPEDIA_API_AVAILABLE:
|
|
64
|
+
raise ImportError(
|
|
65
|
+
"wikipedia-api package is required for WikipediaToolkit. " "Install with: pip install wikipedia-api"
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
# Configuration
|
|
69
|
+
self.user_agent = self.config.config.get("user_agent", "noesium-wikipedia-toolkit")
|
|
70
|
+
self.language = self.config.config.get("language", "en")
|
|
71
|
+
self.content_type = self.config.config.get("content_type", "text") # "text" or "summary"
|
|
72
|
+
self.extract_format = self.config.config.get("extract_format", "WIKI") # "WIKI" or "HTML"
|
|
73
|
+
|
|
74
|
+
# Map string format to wikipediaapi.ExtractFormat
|
|
75
|
+
extract_format_map = {
|
|
76
|
+
"WIKI": wikipediaapi.ExtractFormat.WIKI,
|
|
77
|
+
"HTML": wikipediaapi.ExtractFormat.HTML,
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
if self.extract_format not in extract_format_map:
|
|
81
|
+
self.logger.warning(f"Invalid extract_format: {self.extract_format}, using WIKI")
|
|
82
|
+
self.extract_format = "WIKI"
|
|
83
|
+
|
|
84
|
+
# Initialize Wikipedia API client
|
|
85
|
+
self.wiki_client = wikipediaapi.Wikipedia(
|
|
86
|
+
user_agent=self.user_agent, language=self.language, extract_format=extract_format_map[self.extract_format]
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
# MediaWiki API configuration
|
|
90
|
+
self.api_base_url = f"https://{self.language}.wikipedia.org/w/api.php"
|
|
91
|
+
|
|
92
|
+
self.logger.info(f"Wikipedia toolkit initialized for language: {self.language}")
|
|
93
|
+
|
|
94
|
+
async def _make_api_request(self, params: Dict) -> Dict:
|
|
95
|
+
"""
|
|
96
|
+
Make a request to the MediaWiki API.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
params: API parameters
|
|
100
|
+
|
|
101
|
+
Returns:
|
|
102
|
+
API response as dictionary
|
|
103
|
+
"""
|
|
104
|
+
default_params = {"format": "json", "formatversion": "2"}
|
|
105
|
+
params.update(default_params)
|
|
106
|
+
|
|
107
|
+
try:
|
|
108
|
+
async with aiohttp.ClientSession() as session:
|
|
109
|
+
async with session.get(self.api_base_url, params=params) as response:
|
|
110
|
+
response.raise_for_status()
|
|
111
|
+
return await response.json()
|
|
112
|
+
|
|
113
|
+
except Exception as e:
|
|
114
|
+
self.logger.error(f"Wikipedia API request failed: {e}")
|
|
115
|
+
return {"error": f"API request failed: {str(e)}"}
|
|
116
|
+
|
|
117
|
+
async def search_wikipedia(self, query: str, num_results: int = 5) -> List[Dict]:
|
|
118
|
+
"""
|
|
119
|
+
Search Wikipedia for articles matching the query.
|
|
120
|
+
|
|
121
|
+
This tool searches Wikipedia for articles related to your query and returns
|
|
122
|
+
a list of matching articles with basic information. It's useful for finding
|
|
123
|
+
relevant Wikipedia pages before retrieving detailed content.
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
query: Search query string
|
|
127
|
+
num_results: Maximum number of results to return (default: 5)
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
List of dictionaries containing search results with:
|
|
131
|
+
- title: Article title
|
|
132
|
+
- pageid: Wikipedia page ID
|
|
133
|
+
- snippet: Brief text snippet with search terms highlighted
|
|
134
|
+
- wordcount: Number of words in the article
|
|
135
|
+
- size: Article size in bytes
|
|
136
|
+
- timestamp: Last modification timestamp
|
|
137
|
+
|
|
138
|
+
Example:
|
|
139
|
+
results = await search_wikipedia("artificial intelligence", 3)
|
|
140
|
+
for result in results:
|
|
141
|
+
print(f"Title: {result['title']}")
|
|
142
|
+
print(f"Snippet: {result['snippet']}")
|
|
143
|
+
"""
|
|
144
|
+
self.logger.info(f"Searching Wikipedia for: {query}")
|
|
145
|
+
|
|
146
|
+
try:
|
|
147
|
+
params = {
|
|
148
|
+
"action": "query",
|
|
149
|
+
"list": "search",
|
|
150
|
+
"srsearch": query,
|
|
151
|
+
"srlimit": min(num_results, 50), # API limit
|
|
152
|
+
"srprop": "snippet|titlesnippet|size|wordcount|timestamp",
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
response = await self._make_api_request(params)
|
|
156
|
+
|
|
157
|
+
if "error" in response:
|
|
158
|
+
return [{"error": response["error"]}]
|
|
159
|
+
|
|
160
|
+
search_results = response.get("query", {}).get("search", [])
|
|
161
|
+
|
|
162
|
+
results = []
|
|
163
|
+
for result in search_results:
|
|
164
|
+
results.append(
|
|
165
|
+
{
|
|
166
|
+
"title": result.get("title", ""),
|
|
167
|
+
"pageid": result.get("pageid"),
|
|
168
|
+
"snippet": result.get("snippet", "")
|
|
169
|
+
.replace('<span class="searchmatch">', "")
|
|
170
|
+
.replace("</span>", ""),
|
|
171
|
+
"wordcount": result.get("wordcount", 0),
|
|
172
|
+
"size": result.get("size", 0),
|
|
173
|
+
"timestamp": result.get("timestamp", ""),
|
|
174
|
+
}
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
self.logger.info(f"Found {len(results)} search results")
|
|
178
|
+
return results
|
|
179
|
+
|
|
180
|
+
except Exception as e:
|
|
181
|
+
error_msg = f"Wikipedia search failed: {str(e)}"
|
|
182
|
+
self.logger.error(error_msg)
|
|
183
|
+
return [{"error": error_msg}]
|
|
184
|
+
|
|
185
|
+
async def get_wikipedia_page(self, title: str, content_type: Optional[str] = None) -> Dict:
|
|
186
|
+
"""
|
|
187
|
+
Retrieve a Wikipedia page by title.
|
|
188
|
+
|
|
189
|
+
This tool fetches the complete content of a Wikipedia article by its title.
|
|
190
|
+
It can return either the full article text or just a summary, depending
|
|
191
|
+
on the configuration.
|
|
192
|
+
|
|
193
|
+
Args:
|
|
194
|
+
title: Wikipedia article title
|
|
195
|
+
content_type: "text" for full article, "summary" for summary only
|
|
196
|
+
|
|
197
|
+
Returns:
|
|
198
|
+
Dictionary containing:
|
|
199
|
+
- title: Article title
|
|
200
|
+
- content: Article content (full text or summary)
|
|
201
|
+
- url: Wikipedia URL
|
|
202
|
+
- exists: Whether the page exists
|
|
203
|
+
- categories: List of categories
|
|
204
|
+
- links: List of internal links
|
|
205
|
+
- references: List of external references
|
|
206
|
+
- summary: Article summary (always included)
|
|
207
|
+
|
|
208
|
+
Example:
|
|
209
|
+
page = await get_wikipedia_page("Python (programming language)")
|
|
210
|
+
print(f"Title: {page['title']}")
|
|
211
|
+
print(f"Summary: {page['summary'][:200]}...")
|
|
212
|
+
"""
|
|
213
|
+
content_type = content_type or self.content_type
|
|
214
|
+
|
|
215
|
+
self.logger.info(f"Retrieving Wikipedia page: {title}")
|
|
216
|
+
|
|
217
|
+
try:
|
|
218
|
+
# Get the page using wikipedia-api
|
|
219
|
+
page = self.wiki_client.page(title)
|
|
220
|
+
|
|
221
|
+
if not page.exists():
|
|
222
|
+
return {"title": title, "exists": False, "error": f"Wikipedia page '{title}' does not exist"}
|
|
223
|
+
|
|
224
|
+
# Extract content based on type
|
|
225
|
+
if content_type == "summary":
|
|
226
|
+
content = page.summary
|
|
227
|
+
else:
|
|
228
|
+
content = page.text
|
|
229
|
+
|
|
230
|
+
# Get additional information
|
|
231
|
+
categories = list(page.categories.keys()) if hasattr(page, "categories") else []
|
|
232
|
+
links = list(page.links.keys()) if hasattr(page, "links") else []
|
|
233
|
+
|
|
234
|
+
result = {
|
|
235
|
+
"title": page.title,
|
|
236
|
+
"content": content,
|
|
237
|
+
"summary": page.summary,
|
|
238
|
+
"url": page.fullurl,
|
|
239
|
+
"exists": True,
|
|
240
|
+
"categories": categories[:20], # Limit to first 20
|
|
241
|
+
"links": links[:50], # Limit to first 50
|
|
242
|
+
"page_id": getattr(page, "pageid", None),
|
|
243
|
+
"language": self.language,
|
|
244
|
+
"content_type": content_type,
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
# Get references using API
|
|
248
|
+
try:
|
|
249
|
+
refs_params = {"action": "query", "prop": "extlinks", "titles": title, "ellimit": 20}
|
|
250
|
+
refs_response = await self._make_api_request(refs_params)
|
|
251
|
+
|
|
252
|
+
pages = refs_response.get("query", {}).get("pages", [])
|
|
253
|
+
if pages:
|
|
254
|
+
extlinks = pages[0].get("extlinks", [])
|
|
255
|
+
result["references"] = [link.get("*", "") for link in extlinks]
|
|
256
|
+
else:
|
|
257
|
+
result["references"] = []
|
|
258
|
+
|
|
259
|
+
except Exception:
|
|
260
|
+
result["references"] = []
|
|
261
|
+
|
|
262
|
+
self.logger.info(f"Retrieved page: {page.title} ({len(content)} characters)")
|
|
263
|
+
return result
|
|
264
|
+
|
|
265
|
+
except Exception as e:
|
|
266
|
+
error_msg = f"Failed to retrieve Wikipedia page '{title}': {str(e)}"
|
|
267
|
+
self.logger.error(error_msg)
|
|
268
|
+
return {"title": title, "exists": False, "error": error_msg}
|
|
269
|
+
|
|
270
|
+
async def get_wikipedia_summary(self, title: str, sentences: int = 3) -> str:
|
|
271
|
+
"""
|
|
272
|
+
Get a concise summary of a Wikipedia article.
|
|
273
|
+
|
|
274
|
+
Args:
|
|
275
|
+
title: Wikipedia article title
|
|
276
|
+
sentences: Number of sentences to include in summary
|
|
277
|
+
|
|
278
|
+
Returns:
|
|
279
|
+
Article summary text
|
|
280
|
+
"""
|
|
281
|
+
try:
|
|
282
|
+
page_data = await self.get_wikipedia_page(title, content_type="summary")
|
|
283
|
+
|
|
284
|
+
if not page_data.get("exists", False):
|
|
285
|
+
return page_data.get("error", "Page not found")
|
|
286
|
+
|
|
287
|
+
summary = page_data.get("summary", "")
|
|
288
|
+
|
|
289
|
+
# Limit to specified number of sentences
|
|
290
|
+
if sentences > 0:
|
|
291
|
+
sentences_list = summary.split(". ")
|
|
292
|
+
if len(sentences_list) > sentences:
|
|
293
|
+
summary = ". ".join(sentences_list[:sentences]) + "."
|
|
294
|
+
|
|
295
|
+
return summary
|
|
296
|
+
|
|
297
|
+
except Exception as e:
|
|
298
|
+
return f"Failed to get summary: {str(e)}"
|
|
299
|
+
|
|
300
|
+
async def get_random_wikipedia_page(self) -> Dict:
|
|
301
|
+
"""
|
|
302
|
+
Get a random Wikipedia article.
|
|
303
|
+
|
|
304
|
+
Returns:
|
|
305
|
+
Dictionary with random article information
|
|
306
|
+
"""
|
|
307
|
+
self.logger.info("Getting random Wikipedia page")
|
|
308
|
+
|
|
309
|
+
try:
|
|
310
|
+
params = {"action": "query", "list": "random", "rnnamespace": 0, "rnlimit": 1} # Main namespace only
|
|
311
|
+
|
|
312
|
+
response = await self._make_api_request(params)
|
|
313
|
+
|
|
314
|
+
if "error" in response:
|
|
315
|
+
return {"error": response["error"]}
|
|
316
|
+
|
|
317
|
+
random_pages = response.get("query", {}).get("random", [])
|
|
318
|
+
|
|
319
|
+
if not random_pages:
|
|
320
|
+
return {"error": "No random page found"}
|
|
321
|
+
|
|
322
|
+
random_title = random_pages[0].get("title")
|
|
323
|
+
|
|
324
|
+
# Get the full page content
|
|
325
|
+
return await self.get_wikipedia_page(random_title)
|
|
326
|
+
|
|
327
|
+
except Exception as e:
|
|
328
|
+
error_msg = f"Failed to get random page: {str(e)}"
|
|
329
|
+
self.logger.error(error_msg)
|
|
330
|
+
return {"error": error_msg}
|
|
331
|
+
|
|
332
|
+
async def get_wikipedia_categories(self, title: str) -> List[str]:
|
|
333
|
+
"""
|
|
334
|
+
Get categories for a Wikipedia article.
|
|
335
|
+
|
|
336
|
+
Args:
|
|
337
|
+
title: Wikipedia article title
|
|
338
|
+
|
|
339
|
+
Returns:
|
|
340
|
+
List of category names
|
|
341
|
+
"""
|
|
342
|
+
try:
|
|
343
|
+
params = {"action": "query", "prop": "categories", "titles": title, "cllimit": 50}
|
|
344
|
+
|
|
345
|
+
response = await self._make_api_request(params)
|
|
346
|
+
|
|
347
|
+
if "error" in response:
|
|
348
|
+
return [f"Error: {response['error']}"]
|
|
349
|
+
|
|
350
|
+
pages = response.get("query", {}).get("pages", [])
|
|
351
|
+
|
|
352
|
+
if not pages:
|
|
353
|
+
return ["No categories found"]
|
|
354
|
+
|
|
355
|
+
categories = pages[0].get("categories", [])
|
|
356
|
+
return [cat.get("title", "").replace("Category:", "") for cat in categories]
|
|
357
|
+
|
|
358
|
+
except Exception as e:
|
|
359
|
+
return [f"Error getting categories: {str(e)}"]
|
|
360
|
+
|
|
361
|
+
async def get_page_views(self, title: str, days: int = 30) -> Dict:
|
|
362
|
+
"""
|
|
363
|
+
Get page view statistics for a Wikipedia article.
|
|
364
|
+
|
|
365
|
+
Args:
|
|
366
|
+
title: Wikipedia article title
|
|
367
|
+
days: Number of days to look back (default: 30)
|
|
368
|
+
|
|
369
|
+
Returns:
|
|
370
|
+
Dictionary with view statistics
|
|
371
|
+
"""
|
|
372
|
+
try:
|
|
373
|
+
# Calculate date range
|
|
374
|
+
end_date = datetime.datetime.now()
|
|
375
|
+
start_date = end_date - datetime.timedelta(days=days)
|
|
376
|
+
|
|
377
|
+
# Format dates for API
|
|
378
|
+
start_str = start_date.strftime("%Y%m%d")
|
|
379
|
+
end_str = end_date.strftime("%Y%m%d")
|
|
380
|
+
|
|
381
|
+
# Use Wikimedia REST API for pageviews
|
|
382
|
+
url = f"https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/{self.language}.wikipedia/all-access/user/{title}/daily/{start_str}/{end_str}"
|
|
383
|
+
|
|
384
|
+
async with aiohttp.ClientSession() as session:
|
|
385
|
+
async with session.get(url) as response:
|
|
386
|
+
if response.status == 200:
|
|
387
|
+
data = await response.json()
|
|
388
|
+
items = data.get("items", [])
|
|
389
|
+
|
|
390
|
+
total_views = sum(item.get("views", 0) for item in items)
|
|
391
|
+
avg_daily_views = total_views / len(items) if items else 0
|
|
392
|
+
|
|
393
|
+
return {
|
|
394
|
+
"title": title,
|
|
395
|
+
"total_views": total_views,
|
|
396
|
+
"average_daily_views": round(avg_daily_views, 2),
|
|
397
|
+
"days_analyzed": len(items),
|
|
398
|
+
"date_range": f"{start_str} to {end_str}",
|
|
399
|
+
}
|
|
400
|
+
else:
|
|
401
|
+
return {"error": f"Failed to get page views: HTTP {response.status}"}
|
|
402
|
+
|
|
403
|
+
except Exception as e:
|
|
404
|
+
return {"error": f"Failed to get page views: {str(e)}"}
|
|
405
|
+
|
|
406
|
+
async def get_tools_map(self) -> Dict[str, Callable]:
|
|
407
|
+
"""
|
|
408
|
+
Get the mapping of tool names to their implementation functions.
|
|
409
|
+
|
|
410
|
+
Returns:
|
|
411
|
+
Dictionary mapping tool names to callable functions
|
|
412
|
+
"""
|
|
413
|
+
return {
|
|
414
|
+
"search_wikipedia": self.search_wikipedia,
|
|
415
|
+
"get_wikipedia_page": self.get_wikipedia_page,
|
|
416
|
+
"get_wikipedia_summary": self.get_wikipedia_summary,
|
|
417
|
+
"get_random_wikipedia_page": self.get_random_wikipedia_page,
|
|
418
|
+
"get_wikipedia_categories": self.get_wikipedia_categories,
|
|
419
|
+
"get_page_views": self.get_page_views,
|
|
420
|
+
}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: noesium
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.2.0
|
|
4
4
|
Summary: Towards a cognitive agentic framework
|
|
5
5
|
Author-email: Xiaming Chen <chenxm35@gmail.com>
|
|
6
6
|
Maintainer-email: Xiaming Chen <chenxm35@gmail.com>
|
|
@@ -20,50 +20,79 @@ Description-Content-Type: text/markdown
|
|
|
20
20
|
License-File: LICENSE
|
|
21
21
|
Requires-Dist: pydantic>=2.0.0
|
|
22
22
|
Requires-Dist: requests>=2.31.0
|
|
23
|
+
Requires-Dist: httpx>=0.28.1
|
|
24
|
+
Requires-Dist: aiohttp>=3.12.15
|
|
25
|
+
Requires-Dist: aiofiles>=24.1.0
|
|
26
|
+
Requires-Dist: anyio>=4.9.0
|
|
23
27
|
Requires-Dist: python-dotenv>=1.1.1
|
|
24
28
|
Requires-Dist: colorlog>=6.8.0
|
|
25
29
|
Requires-Dist: typing-extensions>=4.8.0
|
|
26
30
|
Requires-Dist: deprecated>=1.2.18
|
|
27
|
-
Requires-Dist:
|
|
28
|
-
Requires-Dist: openai>=1.0.0
|
|
29
|
-
Requires-Dist: instructor>=1.10.0
|
|
30
|
-
Requires-Dist: google-genai>=1.5.0
|
|
31
|
-
Requires-Dist: litellm>=1.0.0
|
|
32
|
-
Requires-Dist: ollama>=0.5.3
|
|
33
|
-
Requires-Dist: huggingface-hub>=0.34.4
|
|
34
|
-
Requires-Dist: llama-cpp-python>=0.3.16
|
|
35
|
-
Requires-Dist: langchain-core>=0.3.72
|
|
36
|
-
Requires-Dist: langchain-text-splitters>=0.3.0
|
|
37
|
-
Requires-Dist: langchain-ollama>=0.2.0
|
|
38
|
-
Requires-Dist: langgraph>=0.5.4
|
|
31
|
+
Requires-Dist: psutil>=7.0.0
|
|
39
32
|
Requires-Dist: networkx>=3.5
|
|
40
|
-
Requires-Dist:
|
|
41
|
-
|
|
42
|
-
Requires-Dist:
|
|
43
|
-
Requires-Dist:
|
|
44
|
-
|
|
33
|
+
Requires-Dist: bubus>=1.5.6
|
|
34
|
+
Provides-Extra: openai
|
|
35
|
+
Requires-Dist: openai>=1.0.0; extra == "openai"
|
|
36
|
+
Requires-Dist: instructor>=1.10.0; extra == "openai"
|
|
37
|
+
Provides-Extra: google
|
|
38
|
+
Requires-Dist: google-genai>=1.5.0; extra == "google"
|
|
39
|
+
Requires-Dist: google-api-python-client>=2.174.0; extra == "google"
|
|
40
|
+
Requires-Dist: google-auth-oauthlib>=1.2.2; extra == "google"
|
|
41
|
+
Requires-Dist: google-auth>=2.40.3; extra == "google"
|
|
42
|
+
Provides-Extra: aliyun
|
|
43
|
+
Requires-Dist: aliyun-python-sdk-core<3.0.0,>=2.13.1; extra == "aliyun"
|
|
44
|
+
Provides-Extra: litellm
|
|
45
|
+
Requires-Dist: litellm>=1.0.0; extra == "litellm"
|
|
46
|
+
Provides-Extra: local-llm
|
|
47
|
+
Requires-Dist: ollama>=0.5.3; extra == "local-llm"
|
|
48
|
+
Requires-Dist: llama-cpp-python>=0.3.16; extra == "local-llm"
|
|
49
|
+
Requires-Dist: huggingface-hub>=0.34.4; extra == "local-llm"
|
|
50
|
+
Provides-Extra: langchain
|
|
51
|
+
Requires-Dist: langchain-core>=0.3.72; extra == "langchain"
|
|
52
|
+
Requires-Dist: langchain-text-splitters>=0.3.0; extra == "langchain"
|
|
53
|
+
Requires-Dist: langchain-ollama>=0.2.0; extra == "langchain"
|
|
54
|
+
Requires-Dist: langgraph>=0.5.4; extra == "langchain"
|
|
55
|
+
Provides-Extra: postgres
|
|
56
|
+
Requires-Dist: psycopg2-binary>=2.9.0; extra == "postgres"
|
|
57
|
+
Requires-Dist: psycopg2>=2.9.10; extra == "postgres"
|
|
58
|
+
Provides-Extra: weaviate
|
|
59
|
+
Requires-Dist: weaviate-client<5,>=4; extra == "weaviate"
|
|
60
|
+
Requires-Dist: protobuf<6,>=5; extra == "weaviate"
|
|
61
|
+
Provides-Extra: datascience
|
|
62
|
+
Requires-Dist: matplotlib>=3.8.0; extra == "datascience"
|
|
63
|
+
Requires-Dist: pexpect>=4.9.0; extra == "datascience"
|
|
64
|
+
Requires-Dist: ipython>=8.18.0; extra == "datascience"
|
|
65
|
+
Requires-Dist: pandas>=2.0.0; extra == "datascience"
|
|
66
|
+
Provides-Extra: mcp
|
|
67
|
+
Requires-Dist: mcp>=1.0.0; extra == "mcp"
|
|
68
|
+
Provides-Extra: tools
|
|
69
|
+
Requires-Dist: noesium[aliyun,datascience,google,mcp]; extra == "tools"
|
|
70
|
+
Requires-Dist: wizsearch<2.0.0,>=1.0.1; extra == "tools"
|
|
71
|
+
Requires-Dist: arxiv>=2.2.0; extra == "tools"
|
|
72
|
+
Requires-Dist: pillow<12.0,>=10.1.0; extra == "tools"
|
|
73
|
+
Requires-Dist: pymupdf>=1.23.0; extra == "tools"
|
|
74
|
+
Requires-Dist: openpyxl>=3.1.5; extra == "tools"
|
|
75
|
+
Requires-Dist: wikipedia-api>=0.6.0; extra == "tools"
|
|
76
|
+
Provides-Extra: all
|
|
77
|
+
Requires-Dist: noesium[google,langchain,local-llm,openai,postgres,tools,weaviate]; extra == "all"
|
|
45
78
|
Provides-Extra: dev
|
|
46
79
|
Requires-Dist: pytest<9,>=8.2; extra == "dev"
|
|
47
80
|
Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
|
|
48
81
|
Requires-Dist: pytest-asyncio>=1.1.0; extra == "dev"
|
|
49
|
-
Requires-Dist: pytest-tornasync>=0.6.0.post2; extra == "dev"
|
|
50
|
-
Requires-Dist: pytest-trio>=0.8.0; extra == "dev"
|
|
51
|
-
Requires-Dist: pytest-twisted>=1.14.3; extra == "dev"
|
|
52
|
-
Requires-Dist: twisted>=25.5.0; extra == "dev"
|
|
53
82
|
Requires-Dist: black>=23.0.0; extra == "dev"
|
|
54
83
|
Requires-Dist: isort>=5.12.0; extra == "dev"
|
|
55
|
-
Requires-Dist:
|
|
84
|
+
Requires-Dist: mypy>=1.10.0; extra == "dev"
|
|
56
85
|
Requires-Dist: autoflake>=2.3.1; extra == "dev"
|
|
57
|
-
Requires-Dist:
|
|
86
|
+
Requires-Dist: flake8>=7.3.0; extra == "dev"
|
|
58
87
|
Dynamic: license-file
|
|
59
88
|
|
|
60
|
-
#
|
|
89
|
+
# Noesium-core
|
|
61
90
|
|
|
62
91
|
[](https://github.com/mirasoth/noesium/actions/workflows/ci.yml)
|
|
63
92
|
[](https://pypi.org/project/noesium/)
|
|
64
93
|
[](https://deepwiki.com/mirasoth/noesium)
|
|
65
94
|
|
|
66
|
-
|
|
95
|
+
Project Noesium is an initiative to develop a computation-driven, cognitive agentic system. This repo contains the foundational abstractions (Agent, Memory, Tool, Goal, Orchestration, and more) along with essential modules such as LLM clients, logging, message buses, model routing, and observability. For the underlying philosophy, refer to my talk on MAS ([link](https://github.com/caesar0301/mas-talk-2508/blob/master/mas-talk-xmingc.pdf)).
|
|
67
96
|
|
|
68
97
|
## Installation
|
|
69
98
|
|
|
@@ -117,27 +146,6 @@ Noesium offers a comprehensive set of modules for creating intelligent agent-bas
|
|
|
117
146
|
- **Opik integration**: Production-ready observability
|
|
118
147
|
- **LangGraph hooks**: Workflow tracing and debugging
|
|
119
148
|
|
|
120
|
-
## Project Structure
|
|
121
|
-
|
|
122
|
-
```
|
|
123
|
-
noesium/core/
|
|
124
|
-
├── agent/ # Base agent classes and models
|
|
125
|
-
├── goalith/ # Goal management and planning system
|
|
126
|
-
│ ├── decomposer/ # Goal decomposition strategies
|
|
127
|
-
│ ├── goalgraph/ # Graph data structures
|
|
128
|
-
│ ├── conflict/ # Conflict detection
|
|
129
|
-
│ └── replanner/ # Dynamic replanning
|
|
130
|
-
├── llm/ # LLM provider implementations
|
|
131
|
-
├── memory/ # Memory management system
|
|
132
|
-
│ └── memu/ # MemU memory agent integration
|
|
133
|
-
├── toolify/ # Tool management and execution
|
|
134
|
-
├── vector_store/ # Vector database integrations
|
|
135
|
-
├── msgbus/ # Message bus system
|
|
136
|
-
├── routing/ # LLM routing strategies
|
|
137
|
-
├── tracing/ # Token tracking and observability
|
|
138
|
-
└── utils/ # Utilities and logging
|
|
139
|
-
```
|
|
140
|
-
|
|
141
149
|
## Quick Start
|
|
142
150
|
|
|
143
151
|
### 1. LLM Client Usage
|
|
@@ -386,7 +394,7 @@ Set these environment variables for different providers:
|
|
|
386
394
|
|
|
387
395
|
```bash
|
|
388
396
|
# Default LLM provider
|
|
389
|
-
export
|
|
397
|
+
export NOESIUM_LLM_PROVIDER="openai"
|
|
390
398
|
|
|
391
399
|
# OpenAI
|
|
392
400
|
export OPENAI_API_KEY="sk-..."
|