aiecs 1.0.5__py3-none-any.whl → 1.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aiecs might be problematic. Click here for more details.

@@ -0,0 +1,176 @@
1
+ """
2
+ Global ContextEngine Manager
3
+
4
+ This module provides a singleton ContextEngine instance that can be shared
5
+ across all components in the application. It follows the same pattern as
6
+ the Redis client initialization in aiecs.infrastructure.persistence.redis_client.
7
+
8
+ Usage:
9
+ # In main.py startup:
10
+ await initialize_context_engine()
11
+
12
+ # In any component:
13
+ from aiecs.infrastructure.persistence.context_engine_client import get_context_engine
14
+ context_engine = get_context_engine()
15
+ """
16
+
17
+ import logging
18
+ from typing import Optional
19
+ import asyncio
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+ # Global singleton instance
24
+ _global_context_engine: Optional['ContextEngine'] = None
25
+ _initialization_lock = asyncio.Lock()
26
+ _initialized = False
27
+
28
+ try:
29
+ from aiecs.domain.context.context_engine import ContextEngine
30
+ except ImportError:
31
+ ContextEngine = None
32
+ logger.warning("ContextEngine not available - aiecs package may not be installed")
33
+
34
+
35
+ async def initialize_context_engine(use_existing_redis: bool = True) -> Optional['ContextEngine']:
36
+ """
37
+ Initialize the global ContextEngine instance.
38
+
39
+ This should be called once during application startup (in main.py lifespan).
40
+
41
+ Args:
42
+ use_existing_redis: Whether to use existing Redis client (default: True)
43
+
44
+ Returns:
45
+ The initialized ContextEngine instance or None if initialization fails
46
+
47
+ Example:
48
+ @asynccontextmanager
49
+ async def lifespan(app: FastAPI):
50
+ # Startup
51
+ await initialize_redis_client()
52
+ await initialize_context_engine() # Initialize after Redis
53
+ yield
54
+ # Shutdown
55
+ await close_context_engine()
56
+ await close_redis_client()
57
+ """
58
+ global _global_context_engine, _initialized
59
+
60
+ if _initialized and _global_context_engine:
61
+ logger.info("ContextEngine already initialized")
62
+ return _global_context_engine
63
+
64
+ async with _initialization_lock:
65
+ # Double-check after acquiring lock
66
+ if _initialized and _global_context_engine:
67
+ return _global_context_engine
68
+
69
+ if not ContextEngine:
70
+ logger.error("ContextEngine class not available - cannot initialize")
71
+ return None
72
+
73
+ try:
74
+ logger.info("Initializing global ContextEngine...")
75
+ _global_context_engine = ContextEngine(use_existing_redis=use_existing_redis)
76
+ await _global_context_engine.initialize()
77
+ _initialized = True
78
+ logger.info("✅ Global ContextEngine initialized successfully")
79
+ return _global_context_engine
80
+
81
+ except Exception as e:
82
+ logger.error(f"❌ Failed to initialize global ContextEngine: {e}")
83
+ logger.warning("Application will continue without ContextEngine (degraded mode)")
84
+ _global_context_engine = None
85
+ _initialized = False
86
+ return None
87
+
88
+
89
+ def get_context_engine() -> Optional['ContextEngine']:
90
+ """
91
+ Get the global ContextEngine instance.
92
+
93
+ Returns:
94
+ The global ContextEngine instance or None if not initialized
95
+
96
+ Example:
97
+ from aiecs.infrastructure.persistence.context_engine_client import get_context_engine
98
+
99
+ context_engine = get_context_engine()
100
+ if context_engine:
101
+ await context_engine.add_conversation_message(...)
102
+ else:
103
+ # Fallback to local storage
104
+ logger.warning("ContextEngine not available")
105
+ """
106
+ if not _initialized:
107
+ logger.warning(
108
+ "ContextEngine not initialized. Call initialize_context_engine() "
109
+ "during application startup."
110
+ )
111
+ return _global_context_engine
112
+
113
+
114
+ async def close_context_engine() -> None:
115
+ """
116
+ Close and cleanup the global ContextEngine instance.
117
+
118
+ This should be called during application shutdown (in main.py lifespan).
119
+
120
+ Example:
121
+ @asynccontextmanager
122
+ async def lifespan(app: FastAPI):
123
+ # Startup
124
+ await initialize_context_engine()
125
+ yield
126
+ # Shutdown
127
+ await close_context_engine()
128
+ """
129
+ global _global_context_engine, _initialized
130
+
131
+ async with _initialization_lock:
132
+ if _global_context_engine:
133
+ try:
134
+ logger.info("Closing global ContextEngine...")
135
+ # ContextEngine cleanup if needed
136
+ if hasattr(_global_context_engine, 'close'):
137
+ await _global_context_engine.close()
138
+ logger.info("✅ Global ContextEngine closed successfully")
139
+ except Exception as e:
140
+ logger.error(f"Error closing ContextEngine: {e}")
141
+ finally:
142
+ _global_context_engine = None
143
+ _initialized = False
144
+
145
+
146
+ def is_context_engine_initialized() -> bool:
147
+ """
148
+ Check if the global ContextEngine is initialized.
149
+
150
+ Returns:
151
+ True if ContextEngine is initialized and available, False otherwise
152
+ """
153
+ return _initialized and _global_context_engine is not None
154
+
155
+
156
+ # Convenience function for testing
157
+ async def reset_context_engine() -> None:
158
+ """
159
+ Reset the global ContextEngine instance.
160
+
161
+ This is primarily for testing purposes to allow re-initialization.
162
+ Should NOT be used in production code.
163
+ """
164
+ global _global_context_engine, _initialized
165
+
166
+ async with _initialization_lock:
167
+ if _global_context_engine:
168
+ try:
169
+ if hasattr(_global_context_engine, 'close'):
170
+ await _global_context_engine.close()
171
+ except Exception as e:
172
+ logger.warning(f"Error during ContextEngine reset: {e}")
173
+
174
+ _global_context_engine = None
175
+ _initialized = False
176
+ logger.info("ContextEngine reset completed")
aiecs/llm/__init__.py CHANGED
@@ -26,6 +26,7 @@ from .client_factory import (
26
26
 
27
27
  from .openai_client import OpenAIClient
28
28
  from .vertex_client import VertexAIClient
29
+ from .googleai_client import GoogleAIClient
29
30
  from .xai_client import XAIClient
30
31
 
31
32
  __all__ = [
@@ -46,6 +47,7 @@ __all__ = [
46
47
  # Individual clients
47
48
  'OpenAIClient',
48
49
  'VertexAIClient',
50
+ 'GoogleAIClient',
49
51
  'XAIClient',
50
52
 
51
53
  # Convenience functions
aiecs/llm/base_client.py CHANGED
@@ -21,6 +21,7 @@ class LLMResponse:
21
21
  completion_tokens: Optional[int] = None
22
22
  cost_estimate: Optional[float] = None
23
23
  response_time: Optional[float] = None
24
+ metadata: Optional[Dict[str, Any]] = None # Added for backward compatibility
24
25
 
25
26
  def __post_init__(self):
26
27
  """Ensure consistency of token data"""
@@ -5,6 +5,7 @@ from enum import Enum
5
5
  from .base_client import BaseLLMClient, LLMMessage, LLMResponse
6
6
  from .openai_client import OpenAIClient
7
7
  from .vertex_client import VertexAIClient
8
+ from .googleai_client import GoogleAIClient
8
9
  from .xai_client import XAIClient
9
10
  from ..utils.base_callback import CustomAsyncCallbackHandler
10
11
 
@@ -13,6 +14,7 @@ logger = logging.getLogger(__name__)
13
14
  class AIProvider(str, Enum):
14
15
  OPENAI = "OpenAI"
15
16
  VERTEX = "Vertex"
17
+ GOOGLEAI = "GoogleAI"
16
18
  XAI = "xAI"
17
19
 
18
20
  class LLMClientFactory:
@@ -41,6 +43,8 @@ class LLMClientFactory:
41
43
  return OpenAIClient()
42
44
  elif provider == AIProvider.VERTEX:
43
45
  return VertexAIClient()
46
+ elif provider == AIProvider.GOOGLEAI:
47
+ return GoogleAIClient()
44
48
  elif provider == AIProvider.XAI:
45
49
  return XAIClient()
46
50
  else:
@@ -0,0 +1,165 @@
1
+ import asyncio
2
+ import logging
3
+ import os
4
+ from typing import Dict, Any, Optional, List, AsyncGenerator
5
+
6
+ import google.generativeai as genai
7
+ from google.generativeai.types import GenerationConfig, HarmCategory, HarmBlockThreshold
8
+
9
+ from aiecs.llm.base_client import BaseLLMClient, LLMMessage, LLMResponse, ProviderNotAvailableError, RateLimitError
10
+ from aiecs.config.config import get_settings
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+ class GoogleAIClient(BaseLLMClient):
15
+ """Google AI (Gemini) provider client"""
16
+
17
+ def __init__(self):
18
+ super().__init__("GoogleAI")
19
+ self.settings = get_settings()
20
+ self._initialized = False
21
+ self.client = None
22
+
23
+ # Token cost estimates for Gemini 2.5 series
24
+ self.token_costs = {
25
+ "gemini-2.5-pro": {"input": 0.00125, "output": 0.00375},
26
+ "gemini-2.5-flash": {"input": 0.000075, "output": 0.0003},
27
+ }
28
+
29
+ def _init_google_ai(self):
30
+ """Lazy initialization of Google AI SDK"""
31
+ if not self._initialized:
32
+ api_key = self.settings.googleai_api_key or os.environ.get("GOOGLEAI_API_KEY")
33
+ if not api_key:
34
+ raise ProviderNotAvailableError("Google AI API key not configured. Set GOOGLEAI_API_KEY.")
35
+
36
+ try:
37
+ genai.configure(api_key=api_key)
38
+ self._initialized = True
39
+ self.logger.info("Google AI SDK initialized successfully.")
40
+ except Exception as e:
41
+ raise ProviderNotAvailableError(f"Failed to initialize Google AI SDK: {str(e)}")
42
+
43
+ async def generate_text(
44
+ self,
45
+ messages: List[LLMMessage],
46
+ model: Optional[str] = None,
47
+ temperature: float = 0.7,
48
+ max_tokens: Optional[int] = None,
49
+ **kwargs
50
+ ) -> LLMResponse:
51
+ """Generate text using Google AI"""
52
+ self._init_google_ai()
53
+ model_name = model or "gemini-2.5-pro"
54
+
55
+ try:
56
+ model_instance = genai.GenerativeModel(model_name)
57
+
58
+ # Convert messages to Google AI format
59
+ history = [{"role": msg.role, "parts": [msg.content]} for msg in messages]
60
+
61
+ # The last message is the prompt
62
+ prompt = history.pop()
63
+
64
+ # Create GenerationConfig
65
+ generation_config = GenerationConfig(
66
+ temperature=temperature,
67
+ max_output_tokens=max_tokens or 8192,
68
+ top_p=kwargs.get("top_p", 0.95),
69
+ top_k=kwargs.get("top_k", 40)
70
+ )
71
+
72
+ # Safety settings to match vertex_client
73
+ safety_settings = {
74
+ HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
75
+ HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
76
+ HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
77
+ HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
78
+ }
79
+
80
+ response = await model_instance.generate_content_async(
81
+ contents=prompt['parts'],
82
+ generation_config=generation_config,
83
+ safety_settings=safety_settings
84
+ )
85
+
86
+ content = response.text
87
+ prompt_tokens = response.usage_metadata.prompt_token_count
88
+ completion_tokens = response.usage_metadata.candidates_token_count
89
+ total_tokens = response.usage_metadata.total_token_count
90
+
91
+ cost = self._estimate_cost(
92
+ model_name,
93
+ prompt_tokens,
94
+ completion_tokens,
95
+ self.token_costs
96
+ )
97
+
98
+ return LLMResponse(
99
+ content=content,
100
+ provider=self.provider_name,
101
+ model=model_name,
102
+ tokens_used=total_tokens,
103
+ prompt_tokens=prompt_tokens,
104
+ completion_tokens=completion_tokens,
105
+ cost_estimate=cost
106
+ )
107
+
108
+ except Exception as e:
109
+ if "quota" in str(e).lower():
110
+ raise RateLimitError(f"Google AI quota exceeded: {str(e)}")
111
+ self.logger.error(f"Error generating text with Google AI: {e}")
112
+ raise
113
+
114
+ async def stream_text(
115
+ self,
116
+ messages: List[LLMMessage],
117
+ model: Optional[str] = None,
118
+ temperature: float = 0.7,
119
+ max_tokens: Optional[int] = None,
120
+ **kwargs
121
+ ) -> AsyncGenerator[str, None]:
122
+ """Stream text generation using Google AI"""
123
+ self._init_google_ai()
124
+ model_name = model or "gemini-2.5-pro"
125
+
126
+ try:
127
+ model_instance = genai.GenerativeModel(model_name)
128
+
129
+ # Convert messages to Google AI format
130
+ history = [{"role": msg.role, "parts": [msg.content]} for msg in messages]
131
+ prompt = history.pop()
132
+
133
+ generation_config = GenerationConfig(
134
+ temperature=temperature,
135
+ max_output_tokens=max_tokens or 8192,
136
+ top_p=kwargs.get("top_p", 0.95),
137
+ top_k=kwargs.get("top_k", 40)
138
+ )
139
+
140
+ safety_settings = {
141
+ HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
142
+ HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
143
+ HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
144
+ HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
145
+ }
146
+
147
+ response_stream = await model_instance.generate_content_async(
148
+ contents=prompt['parts'],
149
+ generation_config=generation_config,
150
+ safety_settings=safety_settings,
151
+ stream=True
152
+ )
153
+
154
+ async for chunk in response_stream:
155
+ if chunk.text:
156
+ yield chunk.text
157
+
158
+ except Exception as e:
159
+ self.logger.error(f"Error streaming text with Google AI: {e}")
160
+ raise
161
+
162
+ async def close(self):
163
+ """Clean up resources"""
164
+ # Google AI SDK does not require explicit closing of a client
165
+ self._initialized = False
@@ -2,8 +2,8 @@ import asyncio
2
2
  import logging
3
3
  import os
4
4
  from typing import Dict, Any, Optional, List, AsyncGenerator
5
- from vertexai.generative_models import GenerativeModel, HarmCategory, HarmBlockThreshold
6
5
  import vertexai
6
+ from vertexai.generative_models import GenerativeModel, HarmCategory, HarmBlockThreshold, GenerationConfig, SafetySetting
7
7
  from google.oauth2 import service_account
8
8
 
9
9
  from aiecs.llm.base_client import BaseLLMClient, LLMMessage, LLMResponse, ProviderNotAvailableError, RateLimitError
@@ -76,6 +76,7 @@ class VertexAIClient(BaseLLMClient):
76
76
  try:
77
77
  # Use the stable Vertex AI API
78
78
  model_instance = GenerativeModel(model_name)
79
+ self.logger.debug(f"Initialized Vertex AI model: {model_name}")
79
80
 
80
81
  # Convert messages to Vertex AI format
81
82
  if len(messages) == 1 and messages[0].role == "user":
@@ -84,50 +85,82 @@ class VertexAIClient(BaseLLMClient):
84
85
  # For multi-turn conversations, combine messages
85
86
  prompt = "\n".join([f"{msg.role}: {msg.content}" for msg in messages])
86
87
 
88
+ # Use modern GenerationConfig object
89
+ generation_config = GenerationConfig(
90
+ temperature=temperature,
91
+ max_output_tokens=max_tokens or 8192, # Increased to account for thinking tokens
92
+ top_p=0.95,
93
+ top_k=40,
94
+ )
95
+
96
+ # Modern safety settings configuration using SafetySetting objects
97
+ safety_settings = [
98
+ SafetySetting(category=HarmCategory.HARM_CATEGORY_HARASSMENT, threshold=HarmBlockThreshold.BLOCK_NONE),
99
+ SafetySetting(category=HarmCategory.HARM_CATEGORY_HATE_SPEECH, threshold=HarmBlockThreshold.BLOCK_NONE),
100
+ SafetySetting(category=HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, threshold=HarmBlockThreshold.BLOCK_NONE),
101
+ SafetySetting(category=HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold=HarmBlockThreshold.BLOCK_NONE),
102
+ ]
103
+
87
104
  response = await asyncio.get_event_loop().run_in_executor(
88
105
  None,
89
106
  lambda: model_instance.generate_content(
90
107
  prompt,
91
- generation_config={
92
- "temperature": temperature,
93
- "max_output_tokens": max_tokens or 8192, # Increased to account for thinking tokens
94
- "top_p": 0.95,
95
- "top_k": 40,
96
- },
97
- safety_settings={
98
- HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
99
- HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
100
- HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
101
- HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
102
- }
108
+ generation_config=generation_config,
109
+ safety_settings=safety_settings
103
110
  )
104
111
  )
105
112
 
106
- # Handle response content safely
113
+ # Handle response content safely - improved multi-part response handling
114
+ content = None
107
115
  try:
116
+ # First try to get text directly
108
117
  content = response.text
109
118
  self.logger.debug(f"Vertex AI response received: {content[:100]}...")
110
- except ValueError as ve:
111
- # Handle cases where response has no content (safety filters, etc.)
112
- self.logger.warning(f"Vertex AI response error: {str(ve)}")
113
- self.logger.debug(f"Full response object: {response}")
114
-
115
- # Check if response has candidates but no text
119
+ except (ValueError, AttributeError) as ve:
120
+ # Handle multi-part responses and other issues
121
+ self.logger.warning(f"Cannot get response text directly: {str(ve)}")
122
+
123
+ # Try to extract content from candidates with multi-part support
116
124
  if hasattr(response, 'candidates') and response.candidates:
117
125
  candidate = response.candidates[0]
118
126
  self.logger.debug(f"Candidate finish_reason: {getattr(candidate, 'finish_reason', 'unknown')}")
119
-
120
- # If finish_reason is MAX_TOKENS, it might be due to thinking tokens
121
- if hasattr(candidate, 'finish_reason') and candidate.finish_reason == 'MAX_TOKENS':
122
- content = "[Response truncated due to token limit - consider increasing max_tokens for Gemini 2.5 models]"
123
- self.logger.warning("Response truncated due to MAX_TOKENS - Gemini 2.5 uses thinking tokens")
124
- elif "no parts" in str(ve).lower() or "safety filters" in str(ve).lower():
125
- content = "[Response blocked by safety filters or has no content]"
126
- self.logger.warning(f"Vertex AI response blocked or empty: {str(ve)}")
127
- else:
128
- content = f"[Response error: {str(ve)}]"
127
+
128
+ # Handle multi-part content
129
+ if hasattr(candidate, 'content') and hasattr(candidate.content, 'parts'):
130
+ try:
131
+ # Extract text from all parts
132
+ text_parts = []
133
+ for part in candidate.content.parts:
134
+ if hasattr(part, 'text') and part.text:
135
+ text_parts.append(part.text)
136
+
137
+ if text_parts:
138
+ content = "\n".join(text_parts)
139
+ self.logger.info(f"Successfully extracted multi-part response: {len(text_parts)} parts")
140
+ else:
141
+ self.logger.warning("No text content found in multi-part response")
142
+ except Exception as part_error:
143
+ self.logger.error(f"Failed to extract content from multi-part response: {str(part_error)}")
144
+
145
+ # If still no content, check finish reason
146
+ if not content:
147
+ if hasattr(candidate, 'finish_reason'):
148
+ if candidate.finish_reason == 'MAX_TOKENS':
149
+ content = "[Response truncated due to token limit - consider increasing max_tokens for Gemini 2.5 models]"
150
+ self.logger.warning("Response truncated due to MAX_TOKENS - Gemini 2.5 uses thinking tokens")
151
+ elif candidate.finish_reason in ['SAFETY', 'RECITATION']:
152
+ content = "[Response blocked by safety filters or content policy]"
153
+ self.logger.warning(f"Response blocked by safety filters: {candidate.finish_reason}")
154
+ else:
155
+ content = f"[Response error: Cannot get response text - {candidate.finish_reason}]"
156
+ else:
157
+ content = "[Response error: Cannot get the response text]"
129
158
  else:
130
- content = f"[Response error: {str(ve)}]"
159
+ content = f"[Response error: No candidates found - {str(ve)}]"
160
+
161
+ # Final fallback
162
+ if not content:
163
+ content = "[Response error: Cannot get the response text. Multiple content parts are not supported.]"
131
164
 
132
165
  # Vertex AI doesn't provide detailed token usage in the response
133
166
  tokens_used = self._count_tokens_estimate(prompt + content)
@@ -150,11 +183,16 @@ class VertexAIClient(BaseLLMClient):
150
183
  if "quota" in str(e).lower() or "limit" in str(e).lower():
151
184
  raise RateLimitError(f"Vertex AI quota exceeded: {str(e)}")
152
185
  # Handle specific Vertex AI response errors
153
- if "cannot get the response text" in str(e).lower() or "safety filters" in str(e).lower():
186
+ if any(keyword in str(e).lower() for keyword in [
187
+ "cannot get the response text",
188
+ "safety filters",
189
+ "multiple content parts are not supported",
190
+ "cannot get the candidate text"
191
+ ]):
154
192
  self.logger.warning(f"Vertex AI response issue: {str(e)}")
155
193
  # Return a response indicating the issue
156
194
  return LLMResponse(
157
- content="[Response unavailable due to safety filters or content policy]",
195
+ content="[Response unavailable due to content processing issues or safety filters]",
158
196
  provider=self.provider_name,
159
197
  model=model_name,
160
198
  tokens_used=self._count_tokens_estimate(prompt),
aiecs/main.py CHANGED
@@ -131,7 +131,7 @@ async def health_check():
131
131
  return {
132
132
  "status": "healthy",
133
133
  "service": "aiecs",
134
- "version": "1.0.0"
134
+ "version": "1.0.7"
135
135
  }
136
136
 
137
137
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aiecs
3
- Version: 1.0.5
3
+ Version: 1.0.7
4
4
  Summary: AI Execute Services - A middleware framework for AI-powered task execution and tool orchestration
5
5
  Author-email: AIECS Team <iretbl@gmail.com>
6
6
  License-Expression: MIT
@@ -37,7 +37,8 @@ Requires-Dist: python-engineio<5.0.0,>=4.12.1
37
37
  Requires-Dist: tenacity<10.0.0,>=9.1.2
38
38
  Requires-Dist: flower<3.0.0,>=2.0.1
39
39
  Requires-Dist: openai<1.76.0,>=1.68.2
40
- Requires-Dist: google-cloud-aiplatform<2.0.0,>=1.71.1
40
+ Requires-Dist: google-cloud-aiplatform<2.0.0,>=1.80.0
41
+ Requires-Dist: google-generativeai<1.0.0,>=0.8.0
41
42
  Requires-Dist: langchain<0.4.0,>=0.3.26
42
43
  Requires-Dist: langgraph<0.6.0,>=0.5.3
43
44
  Requires-Dist: weasel==0.4.1
@@ -1,18 +1,24 @@
1
- aiecs/__init__.py,sha256=_hZLs48lP9j2hr3Wdf2KYKIED9YMWtDD7s-SSfyslgs,1859
1
+ aiecs/__init__.py,sha256=AWE44N5Mrei5upCp4_F1Qunc9mkcD42vvqtf48d5VCw,1859
2
2
  aiecs/__main__.py,sha256=AfQpzy3SgwWuP4DuymYcm4MISMuzqwhxxGSYo53PBvY,1035
3
3
  aiecs/aiecs_client.py,sha256=gJbCY6zuHR9TZPCgHhxd-d4CwCW9P_lUrtTSC5-ADWE,10527
4
- aiecs/main.py,sha256=3ThBIeGWw-tIZguWd9HMSlFEcTOQeiFFlWBGq1INKY4,9306
4
+ aiecs/main.py,sha256=dzXOz3feZEwSEOJse4ZqGHRkoNjQoUh6htj4y460oVw,9306
5
5
  aiecs/application/__init__.py,sha256=NkmrUH1DqxJ3vaVC8QwscNdlWqHfC7ZagL4k3nZ_qz4,192
6
6
  aiecs/application/executors/__init__.py,sha256=WIl7L9HBsEhNfbNtJdvBvFUJXzESvNZVaiAA6tdtJcs,191
7
7
  aiecs/application/executors/operation_executor.py,sha256=-7mFo1hUnWdehVPg0fnSiRhW3LACpIiyLSH-iu7bX4U,13818
8
8
  aiecs/config/__init__.py,sha256=HykU6FgZrUx0w8V1_kAjP9NpXZTddZ9M3xo0fmBwMU8,336
9
- aiecs/config/config.py,sha256=vWkbWpRLxkRDdsl8hwgNpTKdvOhlxXiNh7oLQZBse-U,4993
9
+ aiecs/config/config.py,sha256=L37G1xOQggnuHgYp9uOvbFsGlXI3JzHgZZ8mZpQVyGo,5065
10
10
  aiecs/config/registry.py,sha256=5CPJcjeMu3FLc_keuCtJT60DtUxF6w-I68uIoxpcdq8,637
11
11
  aiecs/core/__init__.py,sha256=H0ZIk96q0KHKivcobnUCVJdJZmewucVJ9MKhRgUxmk0,1037
12
12
  aiecs/core/interface/__init__.py,sha256=soI7zdoN3eQynVb9uiwmgXkM5E75JYffTILktHb48x8,688
13
13
  aiecs/core/interface/execution_interface.py,sha256=6bXruts8dyAg647lxPDQkF-cdJG1W8ZqpxFQ6hjVrd4,4810
14
14
  aiecs/core/interface/storage_interface.py,sha256=F7GQEZ_ZiRWeen7oZO6A4S0nW0VORYsygk2BYLw5aiY,5680
15
15
  aiecs/domain/__init__.py,sha256=fwcoCiZxcRmXPhRUEiYVYdY9QAB29Dmst1oPJC9jYvU,875
16
+ aiecs/domain/community/collaborative_workflow.py,sha256=XheSSBzEOQM9bT1cPd2CJD9q-ORNe-oMedVGEnM1teM,15441
17
+ aiecs/domain/community/community_integration.py,sha256=Xy4bRy7S4IqIHl2sWIclb_k8ZqAhAs8nnAxdxQnYE7s,16219
18
+ aiecs/domain/community/community_manager.py,sha256=D-HrQTXO-uu9MK6Utr9aCVlQug0UAJc3eYamecQZulc,12119
19
+ aiecs/domain/community/decision_engine.py,sha256=QZnSo0KxHk8HclUiB2sTYUI-H26ZI48StYXVIssO5kA,13389
20
+ aiecs/domain/community/resource_manager.py,sha256=REWe6dSzxzWIxW-Xu07k3n8wSGfTw4frPWSfEW50t70,15912
21
+ aiecs/domain/community/models/community_models.py,sha256=75h8lb1LmOVfpAw5Eh4_2djbSZkcvccKT_Ke1Xx573E,10094
16
22
  aiecs/domain/context/__init__.py,sha256=FfKatEwU5-REwBNdVRtWUXYqhdAFD2O4sKafnS5sG8M,864
17
23
  aiecs/domain/context/context_engine.py,sha256=HiUMAsUYx9ts242JzPZ0nJ9YjOYNpTnBY2lFI5RQ2FI,36439
18
24
  aiecs/domain/context/conversation_models.py,sha256=GgHEZTnHs6U-ecfAJ-0GJUXF48AdXCw0O8Lb8BzQ3oU,13005
@@ -30,16 +36,18 @@ aiecs/infrastructure/monitoring/__init__.py,sha256=fQ13Q1MTIJTNlh35BSCqXpayCTM_k
30
36
  aiecs/infrastructure/monitoring/executor_metrics.py,sha256=z8KJpq6tfCOEArfR-YJ4UClTsef2mNMFuSDHrP51Aww,6040
31
37
  aiecs/infrastructure/monitoring/structured_logger.py,sha256=iI895YHmPoaLdXjxHxd952PeTfGw6sh-yUDCnF8R7NY,1657
32
38
  aiecs/infrastructure/monitoring/tracing_manager.py,sha256=g4u6paNCZCYdGDEMZiv4bYv_GTG0s8oug-BJgFmkDp0,13449
33
- aiecs/infrastructure/persistence/__init__.py,sha256=7M0Z58QsSC6PJlZoLfBEiYQJh6t62P5QtwjiqBU0rEs,238
39
+ aiecs/infrastructure/persistence/__init__.py,sha256=yoog7fEHmhgY50vGdgDNqiZCPUUL2-xnJrdhT5PrPWU,570
40
+ aiecs/infrastructure/persistence/context_engine_client.py,sha256=CCVjNU4B3SeXdSLVpzV5hnsJilcF-AYdMzGmU0Bcwuo,5880
34
41
  aiecs/infrastructure/persistence/database_manager.py,sha256=MRkMTALeeybzAfnfuJrOXbEchBCrMAgsz8YYyEUVMjI,12592
35
42
  aiecs/infrastructure/persistence/file_storage.py,sha256=d3tcV7Wg_-TGsbw3PY9ttNANntR5rIo7mBgE0CGXKZQ,23321
36
43
  aiecs/infrastructure/persistence/redis_client.py,sha256=CqPtYFP8-KHl3cJG9VHun9YFFSp3kCc3ZaZbW7GlqUU,5791
37
- aiecs/llm/__init__.py,sha256=EsOIu25eDnhEYKZDb1h_O9RxYIF7vaiORUZSUipzhsM,1084
38
- aiecs/llm/base_client.py,sha256=xjirSGpLSsiWEhiTbKEFHNqbZanwJxBji9yVxegu77w,3193
39
- aiecs/llm/client_factory.py,sha256=Ysa3NYJIwgqBfFomTfG-G8z3cIElLPcv_vFM4D1IyCc,13566
44
+ aiecs/llm/__init__.py,sha256=INfMR-xeQ0K53iXf_LRMVdo-hDXooIoAcmXRemLWmjQ,1150
45
+ aiecs/llm/base_client.py,sha256=R4B-KBJ0aCEuvni5XwHd6sR3XNYQF3q1ZycgQvCJUfk,3275
46
+ aiecs/llm/client_factory.py,sha256=qvQXo0kIcGC6SzIE7xfCN0f7CcGNKPwauFq2mTUP0SA,13718
40
47
  aiecs/llm/custom_callbacks.py,sha256=qwNwWqBl8EYj2aY16zXChZKLpxZahU9QeY4p57uZ5YU,9933
48
+ aiecs/llm/googleai_client.py,sha256=4cJuhdzzn-svM7NLnYASYsnV2P758EeD5_ljVZu_XFc,6213
41
49
  aiecs/llm/openai_client.py,sha256=T3-LMDV-bzv0fwyDCw6h9D2XbNbWd0dt-QmjKg-wkd0,4397
42
- aiecs/llm/vertex_client.py,sha256=VN474c9fTUI2KpHOYlKJ8yQgd_VPQ7ht3dTtT7DoyWM,8601
50
+ aiecs/llm/vertex_client.py,sha256=DSKyTwlhpOVTXfCTgTD7NholVFtVm7F5qmHk8p8X1Po,10823
43
51
  aiecs/llm/xai_client.py,sha256=VYfBGD8ns6NHscT68DDmAmvwBMEVykLQufH8kFNf_L8,6809
44
52
  aiecs/scripts/DEPENDENCY_SYSTEM_SUMMARY.md,sha256=u2OLwmXaRGuTwEfj3jQ_yzAO_Z49P1CBC1pV3iULuoE,5866
45
53
  aiecs/scripts/README_DEPENDENCY_CHECKER.md,sha256=7sAyeiMN7I-RsTOudo_JO2CTbC5ObEV0z_YyGtjiMcI,6003
@@ -82,9 +90,9 @@ aiecs/utils/prompt_loader.py,sha256=cBS2bZXpYQOWSiOGkhwIzyy3_bETqwIblRi_9qQT9iQ,
82
90
  aiecs/utils/token_usage_repository.py,sha256=1xjenLYwC0YT6lKZFEGO4scRCXLuWdec2MWjzih5SZY,10210
83
91
  aiecs/ws/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
84
92
  aiecs/ws/socket_server.py,sha256=j_9idVY_rWlTsF51FgmuhWCWFVt7_gAHL8vNg3IxV5g,1476
85
- aiecs-1.0.5.dist-info/licenses/LICENSE,sha256=_1YRaIS0eZu1pv6xfz245UkU0i1Va2B841hv3OWRwqg,12494
86
- aiecs-1.0.5.dist-info/METADATA,sha256=_xt9zCfqZG6dafoVqRcmtYBdLEgjXlPh5na8qdTokU4,16388
87
- aiecs-1.0.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
88
- aiecs-1.0.5.dist-info/entry_points.txt,sha256=0Bj2pSaZM-ADKTktbCQ0KQxRe0s8mQFKVsg3IGDJGqA,342
89
- aiecs-1.0.5.dist-info/top_level.txt,sha256=22IlUlOqh9Ni3jXlQNMNUqzbW8dcxXPeR_EQ-BJVcV8,6
90
- aiecs-1.0.5.dist-info/RECORD,,
93
+ aiecs-1.0.7.dist-info/licenses/LICENSE,sha256=_1YRaIS0eZu1pv6xfz245UkU0i1Va2B841hv3OWRwqg,12494
94
+ aiecs-1.0.7.dist-info/METADATA,sha256=ha9q-TJYdEZbuEYr4QIZGA_6m5alcDLmvWRNYQCw2NA,16437
95
+ aiecs-1.0.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
96
+ aiecs-1.0.7.dist-info/entry_points.txt,sha256=0Bj2pSaZM-ADKTktbCQ0KQxRe0s8mQFKVsg3IGDJGqA,342
97
+ aiecs-1.0.7.dist-info/top_level.txt,sha256=22IlUlOqh9Ni3jXlQNMNUqzbW8dcxXPeR_EQ-BJVcV8,6
98
+ aiecs-1.0.7.dist-info/RECORD,,
File without changes