aiecs 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aiecs might be problematic. Click here for more details.

Files changed (90) hide show
  1. aiecs/__init__.py +75 -0
  2. aiecs/__main__.py +41 -0
  3. aiecs/aiecs_client.py +295 -0
  4. aiecs/application/__init__.py +10 -0
  5. aiecs/application/executors/__init__.py +10 -0
  6. aiecs/application/executors/operation_executor.py +341 -0
  7. aiecs/config/__init__.py +15 -0
  8. aiecs/config/config.py +117 -0
  9. aiecs/config/registry.py +19 -0
  10. aiecs/core/__init__.py +46 -0
  11. aiecs/core/interface/__init__.py +34 -0
  12. aiecs/core/interface/execution_interface.py +150 -0
  13. aiecs/core/interface/storage_interface.py +214 -0
  14. aiecs/domain/__init__.py +20 -0
  15. aiecs/domain/context/__init__.py +28 -0
  16. aiecs/domain/context/content_engine.py +982 -0
  17. aiecs/domain/context/conversation_models.py +306 -0
  18. aiecs/domain/execution/__init__.py +12 -0
  19. aiecs/domain/execution/model.py +49 -0
  20. aiecs/domain/task/__init__.py +13 -0
  21. aiecs/domain/task/dsl_processor.py +460 -0
  22. aiecs/domain/task/model.py +50 -0
  23. aiecs/domain/task/task_context.py +257 -0
  24. aiecs/infrastructure/__init__.py +26 -0
  25. aiecs/infrastructure/messaging/__init__.py +13 -0
  26. aiecs/infrastructure/messaging/celery_task_manager.py +341 -0
  27. aiecs/infrastructure/messaging/websocket_manager.py +289 -0
  28. aiecs/infrastructure/monitoring/__init__.py +12 -0
  29. aiecs/infrastructure/monitoring/executor_metrics.py +138 -0
  30. aiecs/infrastructure/monitoring/structured_logger.py +50 -0
  31. aiecs/infrastructure/monitoring/tracing_manager.py +376 -0
  32. aiecs/infrastructure/persistence/__init__.py +12 -0
  33. aiecs/infrastructure/persistence/database_manager.py +286 -0
  34. aiecs/infrastructure/persistence/file_storage.py +671 -0
  35. aiecs/infrastructure/persistence/redis_client.py +162 -0
  36. aiecs/llm/__init__.py +54 -0
  37. aiecs/llm/base_client.py +99 -0
  38. aiecs/llm/client_factory.py +339 -0
  39. aiecs/llm/custom_callbacks.py +228 -0
  40. aiecs/llm/openai_client.py +125 -0
  41. aiecs/llm/vertex_client.py +186 -0
  42. aiecs/llm/xai_client.py +184 -0
  43. aiecs/main.py +351 -0
  44. aiecs/scripts/DEPENDENCY_SYSTEM_SUMMARY.md +241 -0
  45. aiecs/scripts/README_DEPENDENCY_CHECKER.md +309 -0
  46. aiecs/scripts/README_WEASEL_PATCH.md +126 -0
  47. aiecs/scripts/__init__.py +3 -0
  48. aiecs/scripts/dependency_checker.py +825 -0
  49. aiecs/scripts/dependency_fixer.py +348 -0
  50. aiecs/scripts/download_nlp_data.py +348 -0
  51. aiecs/scripts/fix_weasel_validator.py +121 -0
  52. aiecs/scripts/fix_weasel_validator.sh +82 -0
  53. aiecs/scripts/patch_weasel_library.sh +188 -0
  54. aiecs/scripts/quick_dependency_check.py +269 -0
  55. aiecs/scripts/run_weasel_patch.sh +41 -0
  56. aiecs/scripts/setup_nlp_data.sh +217 -0
  57. aiecs/tasks/__init__.py +2 -0
  58. aiecs/tasks/worker.py +111 -0
  59. aiecs/tools/__init__.py +196 -0
  60. aiecs/tools/base_tool.py +202 -0
  61. aiecs/tools/langchain_adapter.py +361 -0
  62. aiecs/tools/task_tools/__init__.py +82 -0
  63. aiecs/tools/task_tools/chart_tool.py +704 -0
  64. aiecs/tools/task_tools/classfire_tool.py +901 -0
  65. aiecs/tools/task_tools/image_tool.py +397 -0
  66. aiecs/tools/task_tools/office_tool.py +600 -0
  67. aiecs/tools/task_tools/pandas_tool.py +565 -0
  68. aiecs/tools/task_tools/report_tool.py +499 -0
  69. aiecs/tools/task_tools/research_tool.py +363 -0
  70. aiecs/tools/task_tools/scraper_tool.py +548 -0
  71. aiecs/tools/task_tools/search_api.py +7 -0
  72. aiecs/tools/task_tools/stats_tool.py +513 -0
  73. aiecs/tools/temp_file_manager.py +126 -0
  74. aiecs/tools/tool_executor/__init__.py +35 -0
  75. aiecs/tools/tool_executor/tool_executor.py +518 -0
  76. aiecs/utils/LLM_output_structor.py +409 -0
  77. aiecs/utils/__init__.py +23 -0
  78. aiecs/utils/base_callback.py +50 -0
  79. aiecs/utils/execution_utils.py +158 -0
  80. aiecs/utils/logging.py +1 -0
  81. aiecs/utils/prompt_loader.py +13 -0
  82. aiecs/utils/token_usage_repository.py +279 -0
  83. aiecs/ws/__init__.py +0 -0
  84. aiecs/ws/socket_server.py +41 -0
  85. aiecs-1.0.0.dist-info/METADATA +610 -0
  86. aiecs-1.0.0.dist-info/RECORD +90 -0
  87. aiecs-1.0.0.dist-info/WHEEL +5 -0
  88. aiecs-1.0.0.dist-info/entry_points.txt +7 -0
  89. aiecs-1.0.0.dist-info/licenses/LICENSE +225 -0
  90. aiecs-1.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,184 @@
1
+ import json
2
+ import asyncio
3
+ import logging
4
+ from typing import Dict, Any, Optional, List, AsyncGenerator
5
+ from openai import AsyncOpenAI
6
+ from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type
7
+
8
+ from .base_client import BaseLLMClient, LLMMessage, LLMResponse, ProviderNotAvailableError, RateLimitError
9
+ from aiecs.config.config import get_settings
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+ class XAIClient(BaseLLMClient):
14
+ """xAI (Grok) provider client"""
15
+
16
+ def __init__(self):
17
+ super().__init__("xAI")
18
+ self.settings = get_settings()
19
+ self._openai_client: Optional[AsyncOpenAI] = None
20
+
21
+ # Enhanced model mapping for all Grok models
22
+ self.model_map = {
23
+ # Legacy Grok models
24
+ "grok-beta": "grok-beta",
25
+ "grok": "grok-beta",
26
+
27
+ # Current Grok models
28
+ "Grok 2": "grok-2",
29
+ "grok-2": "grok-2",
30
+ "Grok 2 Vision": "grok-2-vision",
31
+ "grok-2-vision": "grok-2-vision",
32
+
33
+ # Grok 3 models
34
+ "Grok 3 Normal": "grok-3",
35
+ "grok-3": "grok-3",
36
+ "Grok 3 Fast": "grok-3-fast",
37
+ "grok-3-fast": "grok-3-fast",
38
+
39
+ # Grok 3 Mini models
40
+ "Grok 3 Mini Normal": "grok-3-mini",
41
+ "grok-3-mini": "grok-3-mini",
42
+ "Grok 3 Mini Fast": "grok-3-mini-fast",
43
+ "grok-3-mini-fast": "grok-3-mini-fast",
44
+
45
+ # Grok 3 Reasoning models
46
+ "Grok 3 Reasoning Normal": "grok-3-reasoning",
47
+ "grok-3-reasoning": "grok-3-reasoning",
48
+ "Grok 3 Reasoning Fast": "grok-3-reasoning-fast",
49
+ "grok-3-reasoning-fast": "grok-3-reasoning-fast",
50
+
51
+ # Grok 3 Mini Reasoning models
52
+ "Grok 3 Mini Reasoning Normal": "grok-3-mini-reasoning",
53
+ "grok-3-mini-reasoning": "grok-3-mini-reasoning",
54
+ "Grok 3 Mini Reasoning Fast": "grok-3-mini-reasoning-fast",
55
+ "grok-3-mini-reasoning-fast": "grok-3-mini-reasoning-fast",
56
+
57
+ # Grok 4 models
58
+ "Grok 4 Normal": "grok-4",
59
+ "grok-4": "grok-4",
60
+ "Grok 4 Fast": "grok-4-fast",
61
+ "grok-4-fast": "grok-4-fast",
62
+ "Grok 4 0709": "grok-4-0709",
63
+ "grok-4-0709": "grok-4-0709",
64
+ }
65
+
66
+ def _get_openai_client(self) -> AsyncOpenAI:
67
+ """Lazy initialization of OpenAI client for XAI"""
68
+ if not self._openai_client:
69
+ api_key = self._get_api_key()
70
+ self._openai_client = AsyncOpenAI(
71
+ api_key=api_key,
72
+ base_url="https://api.x.ai/v1",
73
+ timeout=360.0 # Override default timeout with longer timeout for reasoning models
74
+ )
75
+ return self._openai_client
76
+
77
+ def _get_api_key(self) -> str:
78
+ """Get API key with backward compatibility"""
79
+ # Support both xai_api_key and grok_api_key for backward compatibility
80
+ api_key = getattr(self.settings, 'xai_api_key', None) or getattr(self.settings, 'grok_api_key', None)
81
+ if not api_key:
82
+ raise ProviderNotAvailableError("xAI API key not configured")
83
+ return api_key
84
+
85
+ @retry(
86
+ stop=stop_after_attempt(3),
87
+ wait=wait_exponential(multiplier=1, min=4, max=10),
88
+ retry=retry_if_exception_type((Exception, RateLimitError))
89
+ )
90
+ async def generate_text(
91
+ self,
92
+ messages: List[LLMMessage],
93
+ model: Optional[str] = None,
94
+ temperature: float = 0.7,
95
+ max_tokens: Optional[int] = None,
96
+ **kwargs
97
+ ) -> LLMResponse:
98
+ """Generate text using xAI API via OpenAI library (supports all Grok models)"""
99
+ # Check API key availability
100
+ api_key = self._get_api_key()
101
+ if not api_key:
102
+ raise ProviderNotAvailableError("xAI API key is not configured.")
103
+
104
+ client = self._get_openai_client()
105
+
106
+ selected_model = model or "grok-4" # Default to grok-4 as in the example
107
+ api_model = self.model_map.get(selected_model, selected_model)
108
+
109
+ # Convert to OpenAI format
110
+ openai_messages = [{"role": msg.role, "content": msg.content} for msg in messages]
111
+
112
+ try:
113
+ completion = await client.chat.completions.create(
114
+ model=api_model,
115
+ messages=openai_messages,
116
+ temperature=temperature,
117
+ max_tokens=max_tokens,
118
+ **kwargs
119
+ )
120
+
121
+ content = completion.choices[0].message.content
122
+ tokens_used = completion.usage.total_tokens if completion.usage else None
123
+
124
+ return LLMResponse(
125
+ content=content,
126
+ provider=self.provider_name,
127
+ model=selected_model,
128
+ tokens_used=tokens_used,
129
+ cost_estimate=0.0 # xAI pricing not available yet
130
+ )
131
+
132
+ except Exception as e:
133
+ if "rate limit" in str(e).lower() or "429" in str(e):
134
+ raise RateLimitError(f"xAI rate limit exceeded: {str(e)}")
135
+ logger.error(f"xAI API error: {str(e)}")
136
+ raise
137
+
138
+ async def stream_text(
139
+ self,
140
+ messages: List[LLMMessage],
141
+ model: Optional[str] = None,
142
+ temperature: float = 0.7,
143
+ max_tokens: Optional[int] = None,
144
+ **kwargs
145
+ ) -> AsyncGenerator[str, None]:
146
+ """Stream text using xAI API via OpenAI library (supports all Grok models)"""
147
+ # Check API key availability
148
+ api_key = self._get_api_key()
149
+ if not api_key:
150
+ raise ProviderNotAvailableError("xAI API key is not configured.")
151
+
152
+ client = self._get_openai_client()
153
+
154
+ selected_model = model or "grok-4" # Default to grok-4
155
+ api_model = self.model_map.get(selected_model, selected_model)
156
+
157
+ # Convert to OpenAI format
158
+ openai_messages = [{"role": msg.role, "content": msg.content} for msg in messages]
159
+
160
+ try:
161
+ stream = await client.chat.completions.create(
162
+ model=api_model,
163
+ messages=openai_messages,
164
+ temperature=temperature,
165
+ max_tokens=max_tokens,
166
+ stream=True,
167
+ **kwargs
168
+ )
169
+
170
+ async for chunk in stream:
171
+ if chunk.choices[0].delta.content is not None:
172
+ yield chunk.choices[0].delta.content
173
+
174
+ except Exception as e:
175
+ if "rate limit" in str(e).lower() or "429" in str(e):
176
+ raise RateLimitError(f"xAI rate limit exceeded: {str(e)}")
177
+ logger.error(f"xAI API streaming error: {str(e)}")
178
+ raise
179
+
180
+ async def close(self):
181
+ """Clean up resources"""
182
+ if self._openai_client:
183
+ await self._openai_client.close()
184
+ self._openai_client = None
aiecs/main.py ADDED
@@ -0,0 +1,351 @@
1
+ """
2
+ AIECS - AI Execute Services
3
+ A middleware service for AI-powered task execution and tool orchestration
4
+ """
5
+
6
+ from fastapi import FastAPI, HTTPException, Request, status
7
+ from fastapi.middleware.cors import CORSMiddleware
8
+ from fastapi.responses import JSONResponse
9
+ from contextlib import asynccontextmanager
10
+ import logging
11
+ import os
12
+ import sys
13
+ import asyncio
14
+ from typing import Optional, Dict, Any
15
+ import socketio
16
+
17
+ # Import configuration
18
+ from aiecs.config.config import get_settings
19
+ from aiecs.config.registry import get_ai_service
20
+
21
+ # Import WebSocket server
22
+ from aiecs.ws.socket_server import sio
23
+
24
+ # Import infrastructure
25
+ from aiecs.infrastructure.persistence.database_manager import DatabaseManager
26
+ from aiecs.infrastructure.messaging.celery_task_manager import CeleryTaskManager
27
+ from aiecs.infrastructure.monitoring.structured_logger import setup_structured_logging
28
+
29
+ # Import LLM client factory
30
+ from aiecs.llm.client_factory import LLMClientFactory
31
+
32
+ # Import domain models
33
+ from aiecs.domain.execution.model import TaskStatus
34
+ from aiecs.domain.task.task_context import TaskContext
35
+
36
+ # Import tool discovery
37
+ from aiecs.tools import discover_tools
38
+
39
+ # Setup logging
40
+ logger = logging.getLogger(__name__)
41
+
42
+ # Get settings
43
+ settings = get_settings()
44
+
45
+ # Global instances
46
+ db_manager: Optional[DatabaseManager] = None
47
+ task_manager: Optional[CeleryTaskManager] = None
48
+
49
+
50
+ @asynccontextmanager
51
+ async def lifespan(app: FastAPI):
52
+ """Application lifespan manager"""
53
+ global db_manager, task_manager
54
+
55
+ logger.info("Starting AIECS - AI Execute Services...")
56
+
57
+ # Setup structured logging
58
+ setup_structured_logging()
59
+
60
+ # Initialize database connection
61
+ try:
62
+ db_manager = DatabaseManager()
63
+ await db_manager.connect()
64
+ logger.info("Database connection established")
65
+ except Exception as e:
66
+ logger.error(f"Failed to connect to database: {e}")
67
+ raise
68
+
69
+ # Initialize task manager
70
+ try:
71
+ task_manager = CeleryTaskManager()
72
+ logger.info("Task manager initialized")
73
+ except Exception as e:
74
+ logger.error(f"Failed to initialize task manager: {e}")
75
+ raise
76
+
77
+ # Discover and register tools
78
+ try:
79
+ discover_tools("aiecs.tools")
80
+ logger.info("Tools discovered and registered")
81
+ except Exception as e:
82
+ logger.error(f"Failed to discover tools: {e}")
83
+ raise
84
+
85
+ # Application startup complete
86
+ logger.info("AIECS startup complete")
87
+
88
+ yield
89
+
90
+ # Shutdown
91
+ logger.info("Shutting down AIECS...")
92
+
93
+ # Close database connection
94
+ if db_manager:
95
+ await db_manager.disconnect()
96
+ logger.info("Database connection closed")
97
+
98
+ # Close all LLM clients
99
+ await LLMClientFactory.close_all()
100
+ logger.info("LLM clients closed")
101
+
102
+ logger.info("AIECS shutdown complete")
103
+
104
+
105
+ # Create FastAPI app
106
+ app = FastAPI(
107
+ title="AIECS - AI Execute Services",
108
+ description="Middleware service for AI-powered task execution and tool orchestration",
109
+ version="1.0.0",
110
+ lifespan=lifespan
111
+ )
112
+
113
+ # Configure CORS
114
+ allowed_origins = settings.cors_allowed_origins.split(",") if settings.cors_allowed_origins else ["*"]
115
+ app.add_middleware(
116
+ CORSMiddleware,
117
+ allow_origins=allowed_origins,
118
+ allow_credentials=True,
119
+ allow_methods=["*"],
120
+ allow_headers=["*"],
121
+ )
122
+
123
+ # Mount Socket.IO app
124
+ socket_app = socketio.ASGIApp(sio, other_asgi_app=app)
125
+
126
+
127
+ # Health check endpoint
128
+ @app.get("/health")
129
+ async def health_check():
130
+ """Health check endpoint"""
131
+ return {
132
+ "status": "healthy",
133
+ "service": "aiecs",
134
+ "version": "1.0.0"
135
+ }
136
+
137
+
138
+ # Get available tools
139
+ @app.get("/api/tools")
140
+ async def get_available_tools():
141
+ """Get list of available tools"""
142
+ from aiecs.tools import list_tools
143
+ tools = list_tools()
144
+ return {
145
+ "tools": tools,
146
+ "count": len(tools)
147
+ }
148
+
149
+
150
+ # Execute task endpoint
151
+ @app.post("/api/execute")
152
+ async def execute_task(request: Request):
153
+ """Execute a task with given parameters"""
154
+ try:
155
+ data = await request.json()
156
+
157
+ # Extract required fields
158
+ task_type = data.get("type", "task")
159
+ mode = data.get("mode", "execute")
160
+ service = data.get("service", "default")
161
+ user_id = data.get("userId", "anonymous")
162
+ context_data = data.get("context", {})
163
+
164
+ # Build task context
165
+ task_context = TaskContext(
166
+ mode=mode,
167
+ service=service,
168
+ user_id=user_id,
169
+ metadata=context_data.get("metadata", {}),
170
+ data=context_data.get("data", {}),
171
+ tools=context_data.get("tools", [])
172
+ )
173
+
174
+ # Submit task to queue
175
+ if not task_manager:
176
+ raise HTTPException(
177
+ status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
178
+ detail="Task manager not initialized"
179
+ )
180
+
181
+ task_id = await task_manager.submit_task(
182
+ context=task_context,
183
+ task_type=task_type
184
+ )
185
+
186
+ return {
187
+ "taskId": task_id,
188
+ "status": "queued",
189
+ "message": "Task submitted successfully"
190
+ }
191
+
192
+ except Exception as e:
193
+ logger.error(f"Error executing task: {e}")
194
+ raise HTTPException(
195
+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
196
+ detail=str(e)
197
+ )
198
+
199
+
200
+ # Get task status
201
+ @app.get("/api/task/{task_id}")
202
+ async def get_task_status(task_id: str):
203
+ """Get status of a specific task"""
204
+ try:
205
+ if not task_manager:
206
+ raise HTTPException(
207
+ status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
208
+ detail="Task manager not initialized"
209
+ )
210
+
211
+ status = await task_manager.get_task_status(task_id)
212
+
213
+ if not status:
214
+ raise HTTPException(
215
+ status_code=status.HTTP_404_NOT_FOUND,
216
+ detail="Task not found"
217
+ )
218
+
219
+ return status
220
+
221
+ except HTTPException:
222
+ raise
223
+ except Exception as e:
224
+ logger.error(f"Error getting task status: {e}")
225
+ raise HTTPException(
226
+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
227
+ detail=str(e)
228
+ )
229
+
230
+
231
+ # Cancel task
232
+ @app.delete("/api/task/{task_id}")
233
+ async def cancel_task(task_id: str):
234
+ """Cancel a running task"""
235
+ try:
236
+ if not task_manager:
237
+ raise HTTPException(
238
+ status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
239
+ detail="Task manager not initialized"
240
+ )
241
+
242
+ success = await task_manager.cancel_task(task_id)
243
+
244
+ if not success:
245
+ raise HTTPException(
246
+ status_code=status.HTTP_400_BAD_REQUEST,
247
+ detail="Failed to cancel task"
248
+ )
249
+
250
+ return {
251
+ "taskId": task_id,
252
+ "status": "cancelled",
253
+ "message": "Task cancelled successfully"
254
+ }
255
+
256
+ except HTTPException:
257
+ raise
258
+ except Exception as e:
259
+ logger.error(f"Error cancelling task: {e}")
260
+ raise HTTPException(
261
+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
262
+ detail=str(e)
263
+ )
264
+
265
+
266
+ # Get service info
267
+ @app.get("/api/services")
268
+ async def get_services():
269
+ """Get available AI services"""
270
+ from aiecs.config.registry import AI_SERVICE_REGISTRY
271
+
272
+ services = []
273
+ for (mode, service), cls in AI_SERVICE_REGISTRY.items():
274
+ services.append({
275
+ "mode": mode,
276
+ "service": service,
277
+ "class": cls.__name__,
278
+ "module": cls.__module__
279
+ })
280
+
281
+ return {
282
+ "services": services,
283
+ "count": len(services)
284
+ }
285
+
286
+
287
+ # Get LLM providers
288
+ @app.get("/api/providers")
289
+ async def get_providers():
290
+ """Get available LLM providers"""
291
+ from aiecs.llm.client_factory import AIProvider
292
+
293
+ providers = [
294
+ {
295
+ "name": provider.value,
296
+ "enabled": True
297
+ }
298
+ for provider in AIProvider
299
+ ]
300
+
301
+ return {
302
+ "providers": providers,
303
+ "count": len(providers)
304
+ }
305
+
306
+
307
+ # Exception handlers
308
+ @app.exception_handler(HTTPException)
309
+ async def http_exception_handler(request: Request, exc: HTTPException):
310
+ """Handle HTTP exceptions"""
311
+ return JSONResponse(
312
+ status_code=exc.status_code,
313
+ content={
314
+ "error": exc.detail,
315
+ "status": exc.status_code
316
+ }
317
+ )
318
+
319
+
320
+ @app.exception_handler(Exception)
321
+ async def general_exception_handler(request: Request, exc: Exception):
322
+ """Handle general exceptions"""
323
+ logger.error(f"Unhandled exception: {exc}", exc_info=True)
324
+ return JSONResponse(
325
+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
326
+ content={
327
+ "error": "Internal server error",
328
+ "status": 500
329
+ }
330
+ )
331
+
332
+
333
+ # Main entry point
334
+ if __name__ == "__main__":
335
+ import uvicorn
336
+
337
+ # Get port from environment or use default
338
+ port = int(os.environ.get("PORT", 8000))
339
+
340
+ # Run the application with Socket.IO support
341
+ uvicorn.run(
342
+ socket_app, # Use the combined Socket.IO + FastAPI app
343
+ host="0.0.0.0",
344
+ port=port,
345
+ log_level="info",
346
+ reload=bool(os.environ.get("RELOAD", False))
347
+ )
348
+
349
+
350
+
351
+