aiecs 1.0.7__tar.gz → 1.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aiecs might be problematic. Click here for more details.

Files changed (122) hide show
  1. {aiecs-1.0.7/aiecs.egg-info → aiecs-1.1.0}/PKG-INFO +1 -1
  2. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/__init__.py +1 -1
  3. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/aiecs_client.py +159 -1
  4. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/config/config.py +4 -0
  5. aiecs-1.1.0/aiecs/domain/context/__init__.py +53 -0
  6. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/infrastructure/persistence/context_engine_client.py +9 -5
  7. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/main.py +20 -2
  8. aiecs-1.1.0/aiecs/scripts/dependance_check/__init__.py +18 -0
  9. {aiecs-1.0.7/aiecs/scripts → aiecs-1.1.0/aiecs/scripts/dependance_check}/download_nlp_data.py +50 -8
  10. aiecs-1.1.0/aiecs/scripts/dependance_patch/__init__.py +8 -0
  11. aiecs-1.1.0/aiecs/scripts/dependance_patch/fix_weasel/__init__.py +12 -0
  12. aiecs-1.1.0/aiecs/scripts/tools_develop/README.md +340 -0
  13. aiecs-1.1.0/aiecs/scripts/tools_develop/__init__.py +16 -0
  14. aiecs-1.1.0/aiecs/scripts/tools_develop/check_type_annotations.py +263 -0
  15. aiecs-1.1.0/aiecs/scripts/tools_develop/validate_tool_schemas.py +346 -0
  16. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/tools/__init__.py +33 -14
  17. aiecs-1.1.0/aiecs/tools/docs/__init__.py +103 -0
  18. aiecs-1.1.0/aiecs/tools/docs/ai_document_orchestrator.py +543 -0
  19. aiecs-1.1.0/aiecs/tools/docs/ai_document_writer_orchestrator.py +2199 -0
  20. aiecs-1.1.0/aiecs/tools/docs/content_insertion_tool.py +1214 -0
  21. aiecs-1.1.0/aiecs/tools/docs/document_creator_tool.py +1161 -0
  22. aiecs-1.1.0/aiecs/tools/docs/document_layout_tool.py +1090 -0
  23. aiecs-1.1.0/aiecs/tools/docs/document_parser_tool.py +904 -0
  24. aiecs-1.1.0/aiecs/tools/docs/document_writer_tool.py +1583 -0
  25. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/tools/langchain_adapter.py +102 -51
  26. aiecs-1.1.0/aiecs/tools/schema_generator.py +265 -0
  27. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/tools/task_tools/image_tool.py +1 -1
  28. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/tools/task_tools/office_tool.py +9 -0
  29. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/tools/task_tools/scraper_tool.py +1 -1
  30. {aiecs-1.0.7 → aiecs-1.1.0/aiecs.egg-info}/PKG-INFO +1 -1
  31. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs.egg-info/SOURCES.txt +28 -12
  32. aiecs-1.1.0/aiecs.egg-info/entry_points.txt +9 -0
  33. {aiecs-1.0.7 → aiecs-1.1.0}/pyproject.toml +8 -6
  34. aiecs-1.0.7/aiecs/domain/context/__init__.py +0 -29
  35. aiecs-1.0.7/aiecs.egg-info/entry_points.txt +0 -7
  36. {aiecs-1.0.7 → aiecs-1.1.0}/LICENSE +0 -0
  37. {aiecs-1.0.7 → aiecs-1.1.0}/MANIFEST.in +0 -0
  38. {aiecs-1.0.7 → aiecs-1.1.0}/README.md +0 -0
  39. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/__main__.py +0 -0
  40. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/application/__init__.py +0 -0
  41. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/application/executors/__init__.py +0 -0
  42. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/application/executors/operation_executor.py +0 -0
  43. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/config/__init__.py +0 -0
  44. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/config/registry.py +0 -0
  45. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/core/__init__.py +0 -0
  46. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/core/interface/__init__.py +0 -0
  47. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/core/interface/execution_interface.py +0 -0
  48. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/core/interface/storage_interface.py +0 -0
  49. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/domain/__init__.py +0 -0
  50. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/domain/community/collaborative_workflow.py +0 -0
  51. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/domain/community/community_integration.py +0 -0
  52. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/domain/community/community_manager.py +0 -0
  53. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/domain/community/decision_engine.py +0 -0
  54. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/domain/community/models/community_models.py +0 -0
  55. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/domain/community/resource_manager.py +0 -0
  56. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/domain/context/context_engine.py +0 -0
  57. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/domain/context/conversation_models.py +0 -0
  58. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/domain/execution/__init__.py +0 -0
  59. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/domain/execution/model.py +0 -0
  60. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/domain/task/__init__.py +0 -0
  61. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/domain/task/dsl_processor.py +0 -0
  62. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/domain/task/model.py +0 -0
  63. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/domain/task/task_context.py +0 -0
  64. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/infrastructure/__init__.py +0 -0
  65. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/infrastructure/messaging/__init__.py +0 -0
  66. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/infrastructure/messaging/celery_task_manager.py +0 -0
  67. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/infrastructure/messaging/websocket_manager.py +0 -0
  68. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/infrastructure/monitoring/__init__.py +0 -0
  69. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/infrastructure/monitoring/executor_metrics.py +0 -0
  70. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/infrastructure/monitoring/structured_logger.py +0 -0
  71. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/infrastructure/monitoring/tracing_manager.py +0 -0
  72. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/infrastructure/persistence/__init__.py +0 -0
  73. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/infrastructure/persistence/database_manager.py +0 -0
  74. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/infrastructure/persistence/file_storage.py +0 -0
  75. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/infrastructure/persistence/redis_client.py +0 -0
  76. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/llm/__init__.py +0 -0
  77. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/llm/base_client.py +0 -0
  78. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/llm/client_factory.py +0 -0
  79. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/llm/custom_callbacks.py +0 -0
  80. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/llm/googleai_client.py +0 -0
  81. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/llm/openai_client.py +0 -0
  82. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/llm/vertex_client.py +0 -0
  83. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/llm/xai_client.py +0 -0
  84. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/scripts/__init__.py +0 -0
  85. {aiecs-1.0.7/aiecs/scripts → aiecs-1.1.0/aiecs/scripts/dependance_check}/DEPENDENCY_SYSTEM_SUMMARY.md +0 -0
  86. {aiecs-1.0.7/aiecs/scripts → aiecs-1.1.0/aiecs/scripts/dependance_check}/README_DEPENDENCY_CHECKER.md +0 -0
  87. {aiecs-1.0.7/aiecs/scripts → aiecs-1.1.0/aiecs/scripts/dependance_check}/dependency_checker.py +0 -0
  88. {aiecs-1.0.7/aiecs/scripts → aiecs-1.1.0/aiecs/scripts/dependance_check}/dependency_fixer.py +0 -0
  89. {aiecs-1.0.7/aiecs/scripts → aiecs-1.1.0/aiecs/scripts/dependance_check}/quick_dependency_check.py +0 -0
  90. {aiecs-1.0.7/aiecs/scripts → aiecs-1.1.0/aiecs/scripts/dependance_check}/setup_nlp_data.sh +0 -0
  91. {aiecs-1.0.7/aiecs/scripts → aiecs-1.1.0/aiecs/scripts/dependance_patch/fix_weasel}/README_WEASEL_PATCH.md +0 -0
  92. {aiecs-1.0.7/aiecs/scripts → aiecs-1.1.0/aiecs/scripts/dependance_patch/fix_weasel}/fix_weasel_validator.py +0 -0
  93. {aiecs-1.0.7/aiecs/scripts → aiecs-1.1.0/aiecs/scripts/dependance_patch/fix_weasel}/fix_weasel_validator.sh +0 -0
  94. {aiecs-1.0.7/aiecs/scripts → aiecs-1.1.0/aiecs/scripts/dependance_patch/fix_weasel}/patch_weasel_library.sh +0 -0
  95. {aiecs-1.0.7/aiecs/scripts → aiecs-1.1.0/aiecs/scripts/dependance_patch/fix_weasel}/run_weasel_patch.sh +0 -0
  96. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/tasks/__init__.py +0 -0
  97. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/tasks/worker.py +0 -0
  98. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/tools/base_tool.py +0 -0
  99. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/tools/task_tools/__init__.py +0 -0
  100. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/tools/task_tools/chart_tool.py +0 -0
  101. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/tools/task_tools/classfire_tool.py +0 -0
  102. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/tools/task_tools/pandas_tool.py +0 -0
  103. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/tools/task_tools/report_tool.py +0 -0
  104. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/tools/task_tools/research_tool.py +0 -0
  105. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/tools/task_tools/search_api.py +0 -0
  106. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/tools/task_tools/stats_tool.py +0 -0
  107. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/tools/temp_file_manager.py +0 -0
  108. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/tools/tool_executor/__init__.py +0 -0
  109. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/tools/tool_executor/tool_executor.py +0 -0
  110. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/utils/LLM_output_structor.py +0 -0
  111. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/utils/__init__.py +0 -0
  112. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/utils/base_callback.py +0 -0
  113. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/utils/execution_utils.py +0 -0
  114. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/utils/logging.py +0 -0
  115. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/utils/prompt_loader.py +0 -0
  116. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/utils/token_usage_repository.py +0 -0
  117. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/ws/__init__.py +0 -0
  118. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs/ws/socket_server.py +0 -0
  119. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs.egg-info/dependency_links.txt +0 -0
  120. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs.egg-info/requires.txt +0 -0
  121. {aiecs-1.0.7 → aiecs-1.1.0}/aiecs.egg-info/top_level.txt +0 -0
  122. {aiecs-1.0.7 → aiecs-1.1.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aiecs
3
- Version: 1.0.7
3
+ Version: 1.1.0
4
4
  Summary: AI Execute Services - A middleware framework for AI-powered task execution and tool orchestration
5
5
  Author-email: AIECS Team <iretbl@gmail.com>
6
6
  License-Expression: MIT
@@ -5,7 +5,7 @@ A powerful Python middleware framework for building AI-powered applications
5
5
  with tool orchestration, task execution, and multi-provider LLM support.
6
6
  """
7
7
 
8
- __version__ = "1.0.7"
8
+ __version__ = "1.1.0"
9
9
  __author__ = "AIECS Team"
10
10
  __email__ = "iretbl@gmail.com"
11
11
 
@@ -11,7 +11,8 @@ from contextlib import asynccontextmanager
11
11
  from aiecs.config.config import get_settings, validate_required_settings
12
12
  from aiecs.domain.task.task_context import TaskContext
13
13
  from aiecs.tools import discover_tools, list_tools, get_tool
14
- from aiecs.llm.client_factory import LLMClientFactory
14
+ from aiecs.llm.client_factory import LLMClientFactory, LLMClientManager, AIProvider
15
+ from aiecs.llm.base_client import LLMMessage
15
16
 
16
17
  logger = logging.getLogger(__name__)
17
18
 
@@ -46,6 +47,7 @@ class AIECS:
46
47
  self.db_manager = None
47
48
  self.task_manager = None
48
49
  self.operation_executor = None
50
+ self.llm_manager = None
49
51
 
50
52
  # State
51
53
  self._initialized = False
@@ -65,6 +67,10 @@ class AIECS:
65
67
  self._tools_discovered = True
66
68
  logger.info("Tools discovered and registered")
67
69
 
70
+ # Initialize LLM manager (available in both modes)
71
+ self.llm_manager = LLMClientManager()
72
+ logger.info("LLM manager initialized")
73
+
68
74
  if self.mode == "simple":
69
75
  # Simple mode: only tools, no database/Celery
70
76
  logger.info("Simple mode: tools only")
@@ -210,6 +216,158 @@ class AIECS:
210
216
 
211
217
  return get_tool(tool_name)
212
218
 
219
+ def process_task(self, task_context: TaskContext) -> Dict[str, Any]:
220
+ """
221
+ Process a task synchronously (for compatibility with synchronous tool calls)
222
+
223
+ Args:
224
+ task_context: TaskContext containing the task data
225
+
226
+ Returns:
227
+ Task processing result with AI-generated response
228
+ """
229
+ # Run the async method in a new event loop if needed
230
+ try:
231
+ loop = asyncio.get_event_loop()
232
+ if loop.is_running():
233
+ # If called from async context, create a new thread
234
+ import concurrent.futures
235
+ with concurrent.futures.ThreadPoolExecutor() as executor:
236
+ future = executor.submit(
237
+ asyncio.run,
238
+ self.process_task_async(task_context)
239
+ )
240
+ return future.result()
241
+ else:
242
+ # Run in current event loop
243
+ return loop.run_until_complete(self.process_task_async(task_context))
244
+ except RuntimeError:
245
+ # No event loop, create one
246
+ return asyncio.run(self.process_task_async(task_context))
247
+
248
+ async def process_task_async(self, task_context: TaskContext) -> Dict[str, Any]:
249
+ """
250
+ Process a task asynchronously using AI providers
251
+
252
+ Args:
253
+ task_context: TaskContext containing the task data
254
+
255
+ Returns:
256
+ Task processing result with AI-generated response
257
+ """
258
+ if not self._initialized:
259
+ await self.initialize()
260
+
261
+ if not self.llm_manager:
262
+ raise RuntimeError("LLM manager not initialized")
263
+
264
+ try:
265
+ # Extract data from TaskContext
266
+ context_dict = task_context.to_dict()
267
+ metadata = context_dict.get("metadata", {})
268
+
269
+ # Get AI provider preference from metadata
270
+ ai_preference = metadata.get("aiPreference", "default")
271
+ provider = None
272
+ model = None
273
+
274
+ # Parse AI preference
275
+ if isinstance(ai_preference, str):
276
+ # Simple string preference
277
+ if ai_preference.lower() != "default":
278
+ try:
279
+ provider = AIProvider(ai_preference)
280
+ except ValueError:
281
+ logger.warning(f"Unknown AI provider: {ai_preference}, using default")
282
+ elif isinstance(ai_preference, dict):
283
+ # Dictionary with provider and model
284
+ provider_str = ai_preference.get("provider")
285
+ if provider_str:
286
+ try:
287
+ provider = AIProvider(provider_str)
288
+ except ValueError:
289
+ logger.warning(f"Unknown AI provider: {provider_str}, using default")
290
+ model = ai_preference.get("model")
291
+
292
+ # Build prompt from context data
293
+ # The prompt could come from various sources in the context
294
+ prompt = None
295
+
296
+ # Check for direct prompt in metadata
297
+ if "prompt" in metadata:
298
+ prompt = metadata["prompt"]
299
+ # Check for input_data (common in document generation)
300
+ elif "input_data" in context_dict:
301
+ input_data = context_dict["input_data"]
302
+ if isinstance(input_data, dict) and "prompt" in input_data:
303
+ prompt = input_data["prompt"]
304
+ elif isinstance(input_data, str):
305
+ prompt = input_data
306
+
307
+ if not prompt:
308
+ # Fallback: construct a simple prompt from available data
309
+ prompt = f"Task: {context_dict.get('task_type', 'general')}\nData: {context_dict}"
310
+
311
+ # Get temperature and other parameters from metadata
312
+ temperature = metadata.get("temperature", 0.7)
313
+ max_tokens = metadata.get("max_tokens", 2000)
314
+
315
+ # Generate text using LLM manager
316
+ messages = [LLMMessage(role="user", content=prompt)]
317
+
318
+ response = await self.llm_manager.generate_text(
319
+ messages=messages,
320
+ provider=provider,
321
+ model=model,
322
+ context=context_dict,
323
+ temperature=temperature,
324
+ max_tokens=max_tokens
325
+ )
326
+
327
+ # Track model usage in context
328
+ if hasattr(task_context, 'track_model_usage'):
329
+ task_context.track_model_usage(
330
+ model_id=response.model,
331
+ provider_id=response.provider,
332
+ mode="generate"
333
+ )
334
+
335
+ # Return result in expected format
336
+ return {
337
+ "status": "completed",
338
+ "response": response.content,
339
+ "provider": response.provider,
340
+ "model": response.model,
341
+ "tokens_used": response.tokens_used,
342
+ "cost_estimate": response.cost_estimate,
343
+ "context_id": context_dict.get("chat_id", "unknown")
344
+ }
345
+
346
+ except Exception as e:
347
+ logger.error(f"Task processing failed: {e}", exc_info=True)
348
+
349
+ # For testing/development, provide a mock response when AI provider is unavailable
350
+ error_str = str(e).lower()
351
+ if "api key not configured" in error_str or "providernotavailable" in error_str:
352
+ logger.warning("AI provider unavailable, using mock response for testing")
353
+ mock_content = f"Mock AI-generated content for prompt: {prompt[:100] if len(prompt) > 100 else prompt}..."
354
+ return {
355
+ "status": "completed",
356
+ "response": mock_content,
357
+ "provider": "mock",
358
+ "model": "mock-model",
359
+ "tokens_used": len(mock_content.split()),
360
+ "cost_estimate": 0.0,
361
+ "context_id": context_dict.get("chat_id", "unknown"),
362
+ "mock": True
363
+ }
364
+
365
+ return {
366
+ "status": "failed",
367
+ "error": str(e),
368
+ "context_id": task_context.chat_id if hasattr(task_context, 'chat_id') else "unknown"
369
+ }
370
+
213
371
  async def _wait_for_task_completion(self, task_id: str, timeout: int = 300) -> Dict[str, Any]:
214
372
  """
215
373
  Wait for task completion with timeout
@@ -40,6 +40,10 @@ class Settings(BaseSettings):
40
40
  # Vector store backend selection (Qdrant deprecated, using Vertex AI by default)
41
41
  vector_store_backend: str = Field("vertex", alias="VECTOR_STORE_BACKEND") # "vertex" (qdrant deprecated)
42
42
 
43
+ # Development/Server Configuration
44
+ reload: bool = Field(default=False, alias="RELOAD")
45
+ port: int = Field(default=8000, alias="PORT")
46
+
43
47
  model_config = ConfigDict(env_file=".env", env_file_encoding="utf-8")
44
48
 
45
49
  @property
@@ -0,0 +1,53 @@
1
+ """
2
+ Context Management Domain
3
+
4
+ This module provides advanced context and session management capabilities
5
+ for the Python middleware application.
6
+
7
+ Components:
8
+ - ContextEngine: Advanced context and session management with Redis backend
9
+ - Integration with TaskContext for enhanced functionality
10
+ - Support for BaseServiceCheckpointer and LangGraph workflows
11
+
12
+ Usage:
13
+ # For creating ContextEngine instances directly:
14
+ from aiecs.domain.context import ContextEngine
15
+ engine = ContextEngine(use_existing_redis=True)
16
+ await engine.initialize()
17
+
18
+ # For using the global singleton instance (recommended):
19
+ from aiecs.infrastructure.persistence import (
20
+ get_context_engine,
21
+ initialize_context_engine,
22
+ close_context_engine
23
+ )
24
+
25
+ # The global instance is automatically initialized in main.py lifespan
26
+ context_engine = get_context_engine()
27
+ if context_engine:
28
+ await context_engine.add_conversation_message(...)
29
+
30
+ Architecture Note:
31
+ - This package contains DOMAIN layer classes (business logic)
32
+ - Global instance management is in INFRASTRUCTURE layer:
33
+ aiecs.infrastructure.persistence.context_engine_client
34
+ - This separation follows Clean Architecture / DDD principles
35
+ """
36
+
37
+ from .context_engine import ContextEngine, SessionMetrics, ConversationMessage
38
+ from .conversation_models import (
39
+ ConversationParticipant, ConversationSession, AgentCommunicationMessage,
40
+ create_session_key, validate_conversation_isolation_pattern
41
+ )
42
+
43
+ __all__ = [
44
+ 'ContextEngine',
45
+ 'SessionMetrics',
46
+ 'ConversationMessage',
47
+ 'ConversationParticipant',
48
+ 'ConversationSession',
49
+ 'AgentCommunicationMessage',
50
+ 'create_session_key',
51
+ 'validate_conversation_isolation_pattern'
52
+ ]
53
+
@@ -25,11 +25,14 @@ _global_context_engine: Optional['ContextEngine'] = None
25
25
  _initialization_lock = asyncio.Lock()
26
26
  _initialized = False
27
27
 
28
- try:
29
- from aiecs.domain.context.context_engine import ContextEngine
30
- except ImportError:
31
- ContextEngine = None
32
- logger.warning("ContextEngine not available - aiecs package may not be installed")
28
+ def _get_context_engine_class():
29
+ """Lazy import of ContextEngine to avoid circular dependencies."""
30
+ try:
31
+ from aiecs.domain.context.context_engine import ContextEngine
32
+ return ContextEngine
33
+ except ImportError as e:
34
+ logger.warning(f"ContextEngine not available - {e}")
35
+ return None
33
36
 
34
37
 
35
38
  async def initialize_context_engine(use_existing_redis: bool = True) -> Optional['ContextEngine']:
@@ -66,6 +69,7 @@ async def initialize_context_engine(use_existing_redis: bool = True) -> Optional
66
69
  if _initialized and _global_context_engine:
67
70
  return _global_context_engine
68
71
 
72
+ ContextEngine = _get_context_engine_class()
69
73
  if not ContextEngine:
70
74
  logger.error("ContextEngine class not available - cannot initialize")
71
75
  return None
@@ -23,6 +23,10 @@ from aiecs.ws.socket_server import sio
23
23
 
24
24
  # Import infrastructure
25
25
  from aiecs.infrastructure.persistence.database_manager import DatabaseManager
26
+ from aiecs.infrastructure.persistence import (
27
+ initialize_context_engine,
28
+ close_context_engine
29
+ )
26
30
  from aiecs.infrastructure.messaging.celery_task_manager import CeleryTaskManager
27
31
  from aiecs.infrastructure.monitoring.structured_logger import setup_structured_logging
28
32
 
@@ -82,6 +86,13 @@ async def lifespan(app: FastAPI):
82
86
  logger.error(f"Failed to discover tools: {e}")
83
87
  raise
84
88
 
89
+ # Initialize ContextEngine (optional, graceful degradation)
90
+ try:
91
+ await initialize_context_engine()
92
+ logger.info("ContextEngine initialized")
93
+ except Exception as e:
94
+ logger.warning(f"ContextEngine initialization failed (continuing without it): {e}")
95
+
85
96
  # Application startup complete
86
97
  logger.info("AIECS startup complete")
87
98
 
@@ -90,6 +101,13 @@ async def lifespan(app: FastAPI):
90
101
  # Shutdown
91
102
  logger.info("Shutting down AIECS...")
92
103
 
104
+ # Close ContextEngine
105
+ try:
106
+ await close_context_engine()
107
+ logger.info("ContextEngine closed")
108
+ except Exception as e:
109
+ logger.warning(f"Error closing ContextEngine: {e}")
110
+
93
111
  # Close database connection
94
112
  if db_manager:
95
113
  await db_manager.disconnect()
@@ -106,7 +124,7 @@ async def lifespan(app: FastAPI):
106
124
  app = FastAPI(
107
125
  title="AIECS - AI Execute Services",
108
126
  description="Middleware service for AI-powered task execution and tool orchestration",
109
- version="1.0.5",
127
+ version="1.1.0",
110
128
  lifespan=lifespan
111
129
  )
112
130
 
@@ -131,7 +149,7 @@ async def health_check():
131
149
  return {
132
150
  "status": "healthy",
133
151
  "service": "aiecs",
134
- "version": "1.0.7"
152
+ "version": "1.1.0"
135
153
  }
136
154
 
137
155
 
@@ -0,0 +1,18 @@
1
+ """
2
+ 依赖检查和修复工具
3
+
4
+ 提供 AIECS 系统依赖的检查、修复和 NLP 数据下载功能。
5
+ """
6
+
7
+ from .dependency_checker import main as dependency_checker_main
8
+ from .dependency_fixer import main as dependency_fixer_main
9
+ from .quick_dependency_check import main as quick_dependency_check_main
10
+ from .download_nlp_data import main as download_nlp_data_main
11
+
12
+ __all__ = [
13
+ 'dependency_checker_main',
14
+ 'dependency_fixer_main',
15
+ 'quick_dependency_check_main',
16
+ 'download_nlp_data_main',
17
+ ]
18
+
@@ -305,21 +305,21 @@ def verify_installation(logger: logging.Logger) -> bool:
305
305
  return success
306
306
 
307
307
 
308
- def main():
309
- """Main function to download all required NLP data."""
308
+ def download_all_nlp_data():
309
+ """Download all required NLP data."""
310
310
  logger = setup_logging()
311
311
  logger.info("Starting AIECS NLP data download process...")
312
-
312
+
313
313
  success = True
314
-
314
+
315
315
  # Download NLTK data
316
316
  if not download_nltk_data(logger):
317
317
  success = False
318
-
318
+
319
319
  # Download spaCy English model
320
320
  if not download_spacy_model('en_core_web_sm', logger):
321
321
  success = False
322
-
322
+
323
323
  # Download spaCy Chinese model (optional)
324
324
  if not download_spacy_model('zh_core_web_sm', logger):
325
325
  logger.warning("Chinese model download failed, but this is optional")
@@ -329,10 +329,10 @@ def main():
329
329
  if not download_spacy_pkuseg_model(logger):
330
330
  logger.warning("spaCy PKUSeg model download failed, but this is optional")
331
331
  # Don't mark as failure for PKUSeg model
332
-
332
+
333
333
  # Check RAKE-NLTK (optional)
334
334
  download_rake_nltk_data(logger)
335
-
335
+
336
336
  # Verify installation
337
337
  if success and verify_installation(logger):
338
338
  logger.info("✅ All NLP data downloaded and verified successfully!")
@@ -344,5 +344,47 @@ def main():
344
344
  return 1
345
345
 
346
346
 
347
+ def main():
348
+ """Main entry point with argument parsing."""
349
+ import argparse
350
+
351
+ parser = argparse.ArgumentParser(
352
+ description='Download NLP data for AIECS tools',
353
+ formatter_class=argparse.RawDescriptionHelpFormatter,
354
+ epilog="""
355
+ Examples:
356
+ # Show this help message
357
+ aiecs-download-nlp-data --help
358
+
359
+ # Download all NLP data
360
+ aiecs-download-nlp-data --download
361
+ aiecs-download-nlp-data -d
362
+
363
+ NLP Data Includes:
364
+ - NLTK packages: stopwords, punkt, wordnet, averaged_perceptron_tagger
365
+ - spaCy models: en_core_web_sm (English), zh_core_web_sm (Chinese, optional)
366
+ - spaCy PKUSeg model (Chinese segmentation, optional)
367
+ - RAKE-NLTK data (keyword extraction, optional)
368
+ """
369
+ )
370
+
371
+ parser.add_argument(
372
+ '-d', '--download',
373
+ action='store_true',
374
+ help='Download all NLP data packages'
375
+ )
376
+
377
+ args = parser.parse_args()
378
+
379
+ # If no arguments provided, show help
380
+ if not args.download:
381
+ parser.print_help()
382
+ print("\n⚠️ No action specified. Use --download or -d to download NLP data.")
383
+ return 0
384
+
385
+ # Execute download
386
+ return download_all_nlp_data()
387
+
388
+
347
389
  if __name__ == "__main__":
348
390
  sys.exit(main())
@@ -0,0 +1,8 @@
1
+ """
2
+ 依赖补丁工具
3
+
4
+ 提供各种依赖库的补丁修复功能。
5
+ """
6
+
7
+ __all__ = []
8
+
@@ -0,0 +1,12 @@
1
+ """
2
+ Weasel 库补丁工具
3
+
4
+ 修复 Weasel 库的验证器问题。
5
+ """
6
+
7
+ from .fix_weasel_validator import main as fix_weasel_validator_main
8
+
9
+ __all__ = [
10
+ 'fix_weasel_validator_main',
11
+ ]
12
+