aiecs 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aiecs might be problematic. Click here for more details.

Files changed (90) hide show
  1. aiecs/__init__.py +75 -0
  2. aiecs/__main__.py +41 -0
  3. aiecs/aiecs_client.py +295 -0
  4. aiecs/application/__init__.py +10 -0
  5. aiecs/application/executors/__init__.py +10 -0
  6. aiecs/application/executors/operation_executor.py +341 -0
  7. aiecs/config/__init__.py +15 -0
  8. aiecs/config/config.py +117 -0
  9. aiecs/config/registry.py +19 -0
  10. aiecs/core/__init__.py +46 -0
  11. aiecs/core/interface/__init__.py +34 -0
  12. aiecs/core/interface/execution_interface.py +150 -0
  13. aiecs/core/interface/storage_interface.py +214 -0
  14. aiecs/domain/__init__.py +20 -0
  15. aiecs/domain/context/__init__.py +28 -0
  16. aiecs/domain/context/content_engine.py +982 -0
  17. aiecs/domain/context/conversation_models.py +306 -0
  18. aiecs/domain/execution/__init__.py +12 -0
  19. aiecs/domain/execution/model.py +49 -0
  20. aiecs/domain/task/__init__.py +13 -0
  21. aiecs/domain/task/dsl_processor.py +460 -0
  22. aiecs/domain/task/model.py +50 -0
  23. aiecs/domain/task/task_context.py +257 -0
  24. aiecs/infrastructure/__init__.py +26 -0
  25. aiecs/infrastructure/messaging/__init__.py +13 -0
  26. aiecs/infrastructure/messaging/celery_task_manager.py +341 -0
  27. aiecs/infrastructure/messaging/websocket_manager.py +289 -0
  28. aiecs/infrastructure/monitoring/__init__.py +12 -0
  29. aiecs/infrastructure/monitoring/executor_metrics.py +138 -0
  30. aiecs/infrastructure/monitoring/structured_logger.py +50 -0
  31. aiecs/infrastructure/monitoring/tracing_manager.py +376 -0
  32. aiecs/infrastructure/persistence/__init__.py +12 -0
  33. aiecs/infrastructure/persistence/database_manager.py +286 -0
  34. aiecs/infrastructure/persistence/file_storage.py +671 -0
  35. aiecs/infrastructure/persistence/redis_client.py +162 -0
  36. aiecs/llm/__init__.py +54 -0
  37. aiecs/llm/base_client.py +99 -0
  38. aiecs/llm/client_factory.py +339 -0
  39. aiecs/llm/custom_callbacks.py +228 -0
  40. aiecs/llm/openai_client.py +125 -0
  41. aiecs/llm/vertex_client.py +186 -0
  42. aiecs/llm/xai_client.py +184 -0
  43. aiecs/main.py +351 -0
  44. aiecs/scripts/DEPENDENCY_SYSTEM_SUMMARY.md +241 -0
  45. aiecs/scripts/README_DEPENDENCY_CHECKER.md +309 -0
  46. aiecs/scripts/README_WEASEL_PATCH.md +126 -0
  47. aiecs/scripts/__init__.py +3 -0
  48. aiecs/scripts/dependency_checker.py +825 -0
  49. aiecs/scripts/dependency_fixer.py +348 -0
  50. aiecs/scripts/download_nlp_data.py +348 -0
  51. aiecs/scripts/fix_weasel_validator.py +121 -0
  52. aiecs/scripts/fix_weasel_validator.sh +82 -0
  53. aiecs/scripts/patch_weasel_library.sh +188 -0
  54. aiecs/scripts/quick_dependency_check.py +269 -0
  55. aiecs/scripts/run_weasel_patch.sh +41 -0
  56. aiecs/scripts/setup_nlp_data.sh +217 -0
  57. aiecs/tasks/__init__.py +2 -0
  58. aiecs/tasks/worker.py +111 -0
  59. aiecs/tools/__init__.py +196 -0
  60. aiecs/tools/base_tool.py +202 -0
  61. aiecs/tools/langchain_adapter.py +361 -0
  62. aiecs/tools/task_tools/__init__.py +82 -0
  63. aiecs/tools/task_tools/chart_tool.py +704 -0
  64. aiecs/tools/task_tools/classfire_tool.py +901 -0
  65. aiecs/tools/task_tools/image_tool.py +397 -0
  66. aiecs/tools/task_tools/office_tool.py +600 -0
  67. aiecs/tools/task_tools/pandas_tool.py +565 -0
  68. aiecs/tools/task_tools/report_tool.py +499 -0
  69. aiecs/tools/task_tools/research_tool.py +363 -0
  70. aiecs/tools/task_tools/scraper_tool.py +548 -0
  71. aiecs/tools/task_tools/search_api.py +7 -0
  72. aiecs/tools/task_tools/stats_tool.py +513 -0
  73. aiecs/tools/temp_file_manager.py +126 -0
  74. aiecs/tools/tool_executor/__init__.py +35 -0
  75. aiecs/tools/tool_executor/tool_executor.py +518 -0
  76. aiecs/utils/LLM_output_structor.py +409 -0
  77. aiecs/utils/__init__.py +23 -0
  78. aiecs/utils/base_callback.py +50 -0
  79. aiecs/utils/execution_utils.py +158 -0
  80. aiecs/utils/logging.py +1 -0
  81. aiecs/utils/prompt_loader.py +13 -0
  82. aiecs/utils/token_usage_repository.py +279 -0
  83. aiecs/ws/__init__.py +0 -0
  84. aiecs/ws/socket_server.py +41 -0
  85. aiecs-1.0.0.dist-info/METADATA +610 -0
  86. aiecs-1.0.0.dist-info/RECORD +90 -0
  87. aiecs-1.0.0.dist-info/WHEEL +5 -0
  88. aiecs-1.0.0.dist-info/entry_points.txt +7 -0
  89. aiecs-1.0.0.dist-info/licenses/LICENSE +225 -0
  90. aiecs-1.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,409 @@
1
+ """
2
+ LLM Output Structor - Readability and Tone Enhancer
3
+
4
+ This module transforms LLM outputs to be more readable and natural while preserving
5
+ the reasoning process and technical details. Instead of hiding the thinking process,
6
+ it makes it more accessible and easier to understand.
7
+
8
+ Key transformations:
9
+ 1. Add friendly openings and closings
10
+ 2. Transform technical jargon to more accessible language
11
+ 3. Improve formatting and structure for better readability
12
+ 4. Preserve reasoning but make it more conversational
13
+ 5. Maintain transparency while improving user experience
14
+ """
15
+
16
+ import re
17
+ import json
18
+ from typing import List, Dict, Any, Optional, Union
19
+
20
+
21
+ class LLMOutputTransformer:
22
+ """
23
+ Transformer that enhances readability while preserving LLM reasoning and analysis.
24
+ """
25
+
26
+ def __init__(self):
27
+ # Word replacements for better readability
28
+ self.technical_replacements = {
29
+ # Technical terms to friendly alternatives
30
+ "demand_state": "request status",
31
+ "VAGUE_UNCLEAR": "needs clarification",
32
+ "SMART_COMPLIANT": "well-defined",
33
+ "SMART_LARGE_SCOPE": "comprehensive but needs focusing",
34
+ "smart_analysis": "detailed analysis",
35
+ "confidence": "certainty level",
36
+ "intent_categories": "identified purposes",
37
+ "complexity_assessment": "complexity evaluation",
38
+ "execution_mode": "approach type",
39
+ "agent_requirements": "required components",
40
+ "meta_architect": "strategic planner",
41
+ "intent_parser": "request analyzer",
42
+
43
+ # Analysis terms
44
+ "fails most SMART criteria": "lacks several key details",
45
+ "SMART criteria": "clarity requirements",
46
+ "not Specific": "not specific enough",
47
+ "not Measurable": "missing measurable goals",
48
+ "not Achievable": "unclear if achievable",
49
+ "not Time-bound": "missing timeframe",
50
+ "high-level goal": "general objective",
51
+ "actionable task": "specific action item",
52
+
53
+ # Common phrases
54
+ "classic example": "typical case",
55
+ "significant clarification": "more details",
56
+ "multi-stage": "step-by-step",
57
+ "architect_output": "strategic plan",
58
+ "problem_analysis": "situation assessment",
59
+ "intent parsing": "request understanding",
60
+ "blueprint": "detailed plan",
61
+ "roadmap": "action sequence"
62
+ }
63
+
64
+ # Friendly section headers
65
+ self.section_headers = {
66
+ "Reasoning:": "💭 My Analysis:",
67
+ "Clarification needed": "📝 Questions to Help Me Understand Better",
68
+ "clarification_needed:": "📝 Helpful Questions:",
69
+ "Problem Analysis:": "🔍 Situation Overview:",
70
+ "Solution Approach:": "💡 Recommended Approach:",
71
+ "Key Components:": "🔧 Main Elements:",
72
+ "Confidence:": "📊 Confidence Level:",
73
+ "Intent Categories:": "🎯 Identified Goals:",
74
+ "Complexity:": "📈 Complexity Level:"
75
+ }
76
+
77
+ # Opening greetings
78
+ self.greetings = {
79
+ 'clarification': "Hello! Thank you for reaching out. Let me help you with your request.",
80
+ 'confirmation': "Great! I've carefully analyzed your requirements.",
81
+ 'completion': "Excellent! I've completed the analysis.",
82
+ 'general': "Thank you for your message."
83
+ }
84
+
85
+ # Closing messages
86
+ self.closings = {
87
+ 'clarification': "\n\n✨ These details will help me provide you with the most accurate and helpful solution!",
88
+ 'confirmation': "\n\n🤝 I'm ready to proceed whenever you are. Feel free to ask any questions or suggest modifications!",
89
+ 'completion': "\n\n✅ Everything is set up and ready. Let me know if you need any adjustments!",
90
+ 'general': "\n\n💬 Please let me know if you need any clarification!"
91
+ }
92
+
93
+ def transform_message(self, content: str, message_type: str = 'general', preserve_structure: bool = True) -> str:
94
+ """
95
+ Transform LLM output to be more readable while preserving content.
96
+
97
+ Args:
98
+ content: Raw LLM output
99
+ message_type: Type of message ('clarification', 'confirmation', 'completion', 'general')
100
+ preserve_structure: Whether to preserve the original structure
101
+
102
+ Returns:
103
+ Enhanced, more readable message
104
+ """
105
+ # Add appropriate greeting
106
+ result = self._add_greeting(message_type)
107
+
108
+ # Transform the main content
109
+ transformed_content = self._enhance_readability(content)
110
+
111
+ # Special handling for different message types
112
+ if message_type == 'clarification':
113
+ transformed_content = self._enhance_clarification(transformed_content)
114
+ elif message_type == 'confirmation':
115
+ transformed_content = self._enhance_confirmation(transformed_content)
116
+
117
+ result += "\n\n" + transformed_content
118
+
119
+ # Add appropriate closing
120
+ result += self._add_closing(message_type)
121
+
122
+ return result
123
+
124
+ def _add_greeting(self, message_type: str) -> str:
125
+ """Add an appropriate greeting based on message type."""
126
+ return self.greetings.get(message_type, self.greetings['general'])
127
+
128
+ def _add_closing(self, message_type: str) -> str:
129
+ """Add an appropriate closing based on message type."""
130
+ return self.closings.get(message_type, self.closings['general'])
131
+
132
+ def _enhance_readability(self, content: str) -> str:
133
+ """Enhance readability by replacing technical terms and improving formatting."""
134
+ result = content
135
+
136
+ # Replace section headers with friendly versions
137
+ for old_header, new_header in self.section_headers.items():
138
+ result = result.replace(old_header, new_header)
139
+
140
+ # Replace technical terms with friendly alternatives
141
+ for technical, friendly in self.technical_replacements.items():
142
+ # Case-insensitive replacement
143
+ result = re.sub(rf'\b{re.escape(technical)}\b', friendly, result, flags=re.IGNORECASE)
144
+
145
+ # Improve JSON-like structures visibility
146
+ result = self._format_json_structures(result)
147
+
148
+ # Enhance bullet points
149
+ result = self._enhance_bullet_points(result)
150
+
151
+ # Add spacing for better readability
152
+ result = self._improve_spacing(result)
153
+
154
+ return result
155
+
156
+ def _enhance_clarification(self, content: str) -> str:
157
+ """Special enhancements for clarification messages."""
158
+ # Transform reasoning section to be more conversational
159
+ content = re.sub(
160
+ r"(💭 My Analysis:)(.*?)(?=\n\n|$)",
161
+ lambda m: f"{m.group(1)}\n{self._make_reasoning_conversational(m.group(2))}",
162
+ content,
163
+ flags=re.DOTALL
164
+ )
165
+
166
+ # Format questions better
167
+ content = self._format_clarification_questions(content)
168
+
169
+ return content
170
+
171
+ def _enhance_confirmation(self, content: str) -> str:
172
+ """Special enhancements for confirmation messages."""
173
+ # Make technical descriptions more accessible
174
+ content = re.sub(
175
+ r"I have generated a detailed (.*?):(.*?)(?=Do you|Would you|$)",
176
+ r"I've prepared a comprehensive \1 for you:\n\n\2\n",
177
+ content,
178
+ flags=re.DOTALL
179
+ )
180
+
181
+ # Format proposed plans better
182
+ content = re.sub(
183
+ r"Proposed plan:(.*?)(?=Do you|Would you|$)",
184
+ r"📋 **Proposed Approach:**\n\1\n",
185
+ content,
186
+ flags=re.DOTALL
187
+ )
188
+
189
+ return content
190
+
191
+ def _make_reasoning_conversational(self, reasoning: str) -> str:
192
+ """Make reasoning more conversational and easier to read."""
193
+ # First, convert perspective from third-person to second-person
194
+ reasoning = self._convert_perspective(reasoning)
195
+
196
+ # Split into sentences for processing
197
+ sentences = reasoning.strip().split('.')
198
+ conversational_parts = []
199
+
200
+ for i, sentence in enumerate(sentences):
201
+ sentence = sentence.strip()
202
+ if not sentence:
203
+ continue
204
+
205
+ # Make the first sentence more natural
206
+ if i == 0:
207
+ # Remove phrases like "is a classic example of" to be more direct
208
+ sentence = re.sub(r"is a (?:classic|typical|clear) example of", "seems to be", sentence)
209
+ sentence = "Looking at what you've shared, " + sentence.lower()
210
+ # For sentences about what's missing
211
+ elif any(word in sentence.lower() for word in ["lacks", "missing", "doesn't have", "without"]):
212
+ if not sentence.lower().startswith(("i ", "it ", "this ")):
213
+ sentence = "I notice that it " + sentence.lower()
214
+ # For sentences about what's not clear
215
+ elif "not" in sentence.lower() and any(word in sentence.lower() for word in ["specific", "clear", "measurable"]):
216
+ sentence = re.sub(r"it is not", "it isn't quite", sentence, flags=re.IGNORECASE)
217
+ if not sentence.lower().startswith(("i ", "this ")):
218
+ sentence = "I can see that " + sentence.lower()
219
+ # For requirement sentences
220
+ elif any(word in sentence.lower() for word in ["requires", "needs", "must"]):
221
+ sentence = "To help you effectively, " + sentence.lower().replace("the request", "we'll")
222
+ # Default: make it conversational
223
+ else:
224
+ if len(sentence) > 20 and not sentence.lower().startswith(("i ", "this ", "that ", "we ")):
225
+ sentence = "Additionally, " + sentence.lower()
226
+
227
+ conversational_parts.append(sentence)
228
+
229
+ result = '. '.join(conversational_parts)
230
+ if result and not result.endswith('.'):
231
+ result += '.'
232
+
233
+ return result
234
+
235
+ def _convert_perspective(self, text: str) -> str:
236
+ """Convert text from third-person to second-person perspective."""
237
+ # Replace "the user" references with "you"
238
+ text = re.sub(r"the user'?s?\s+", "your ", text, flags=re.IGNORECASE)
239
+ text = re.sub(r"user'?s?\s+", "your ", text, flags=re.IGNORECASE)
240
+
241
+ # Replace "the request" with "your request" or "what you're asking"
242
+ text = re.sub(r"the request", "your request", text, flags=re.IGNORECASE)
243
+
244
+ # Replace "the business" with "your business"
245
+ text = re.sub(r"the business", "your business", text, flags=re.IGNORECASE)
246
+
247
+ # Replace impersonal constructions
248
+ text = re.sub(r"it is (?:a|an)", "this is", text, flags=re.IGNORECASE)
249
+ text = re.sub(r"this is (?:a|an) vague", "this seems vague", text, flags=re.IGNORECASE)
250
+
251
+ return text
252
+
253
+ def _format_clarification_questions(self, content: str) -> str:
254
+ """Format clarification questions for better readability."""
255
+ # Find questions section
256
+ questions_match = re.search(r"📝.*?:(.*?)(?=💭|✨|$)", content, re.DOTALL)
257
+ if not questions_match:
258
+ return content
259
+
260
+ questions_text = questions_match.group(1)
261
+ questions = self._extract_questions(questions_text)
262
+
263
+ if questions:
264
+ formatted = "📝 Questions to Help Me Understand Better:\n\n"
265
+ for i, q in enumerate(questions, 1):
266
+ # Clean up the question
267
+ q = q.strip().strip('-*•')
268
+ if not q.endswith('?'):
269
+ q += '?'
270
+ formatted += f"**{i}.** {q}\n\n"
271
+
272
+ # Replace the original questions section
273
+ content = content.replace(questions_match.group(0), formatted)
274
+
275
+ return content
276
+
277
+ def _extract_questions(self, text: str) -> List[str]:
278
+ """Extract individual questions from text."""
279
+ # Split by semicolons or line breaks
280
+ parts = re.split(r'[;\n]', text)
281
+ questions = []
282
+
283
+ for part in parts:
284
+ part = part.strip()
285
+ if part and not part.startswith('*Reason:'):
286
+ questions.append(part)
287
+
288
+ return questions
289
+
290
+ def _format_json_structures(self, content: str) -> str:
291
+ """Format JSON-like structures to be more readable."""
292
+ # Find JSON-like structures
293
+ def format_dict(match):
294
+ try:
295
+ # Extract the dictionary string
296
+ dict_str = match.group(0)
297
+ # Make it more readable
298
+ formatted = dict_str.replace("'", "")
299
+ formatted = formatted.replace("{", "{\n ")
300
+ formatted = formatted.replace(",", ",\n ")
301
+ formatted = formatted.replace("}", "\n}")
302
+ return formatted
303
+ except:
304
+ return match.group(0)
305
+
306
+ # Apply to simple dictionaries
307
+ content = re.sub(r'\{[^{}]+\}', format_dict, content)
308
+
309
+ return content
310
+
311
+ def _enhance_bullet_points(self, content: str) -> str:
312
+ """Enhance bullet points for better visibility."""
313
+ # Replace various bullet point styles with a consistent one
314
+ content = re.sub(r'^[-*•]\s*', '• ', content, flags=re.MULTILINE)
315
+ content = re.sub(r'^\d+\.\s*', lambda m: f"**{m.group(0)}**", content, flags=re.MULTILINE)
316
+
317
+ return content
318
+
319
+ def _improve_spacing(self, content: str) -> str:
320
+ """Improve spacing for better readability."""
321
+ # Add space before emoji headers
322
+ content = re.sub(r'(?<!\n)(💭|📝|🔍|💡|🔧|📊|🎯|📈)', r'\n\n\1', content)
323
+
324
+ # Ensure proper spacing after headers
325
+ content = re.sub(r'(💭|📝|🔍|💡|🔧|📊|🎯|📈)(.*?):', r'\1\2:\n', content)
326
+
327
+ # Clean up excessive newlines
328
+ content = re.sub(r'\n{3,}', '\n\n', content)
329
+
330
+ return content.strip()
331
+
332
+
333
+ # Convenience functions
334
+ def format_clarification_message(
335
+ questions: List[str],
336
+ round_number: int = 1,
337
+ reasoning: Optional[str] = None
338
+ ) -> str:
339
+ """
340
+ Format clarification messages with preserved reasoning.
341
+
342
+ Args:
343
+ questions: List of clarification questions
344
+ round_number: Current round number
345
+ reasoning: Optional reasoning to include
346
+
347
+ Returns:
348
+ Formatted message with enhanced readability
349
+ """
350
+ transformer = LLMOutputTransformer()
351
+
352
+ # Build content
353
+ content = f"Clarification needed (Round {round_number}): "
354
+ content += "; ".join(questions)
355
+
356
+ if reasoning:
357
+ content += f"\n\nReasoning: {reasoning}"
358
+
359
+ return transformer.transform_message(content, 'clarification')
360
+
361
+
362
+ def format_confirmation_message(
363
+ content: Union[str, Dict[str, Any]],
364
+ confirmation_type: str = 'strategy'
365
+ ) -> str:
366
+ """
367
+ Format confirmation messages with preserved technical details.
368
+
369
+ Args:
370
+ content: Confirmation content
371
+ confirmation_type: Type of confirmation
372
+
373
+ Returns:
374
+ Enhanced confirmation message
375
+ """
376
+ transformer = LLMOutputTransformer()
377
+
378
+ if isinstance(content, dict):
379
+ content = json.dumps(content, indent=2)
380
+
381
+ return transformer.transform_message(content, 'confirmation')
382
+
383
+
384
+ def enhance_reasoning(reasoning: str) -> str:
385
+ """
386
+ Enhance reasoning text to be more readable.
387
+
388
+ Args:
389
+ reasoning: Raw reasoning text
390
+
391
+ Returns:
392
+ Enhanced reasoning text
393
+ """
394
+ transformer = LLMOutputTransformer()
395
+ return transformer._make_reasoning_conversational(reasoning)
396
+
397
+
398
+ def clean_technical_terms(content: str) -> str:
399
+ """
400
+ Replace technical terms with user-friendly alternatives.
401
+
402
+ Args:
403
+ content: Content with technical terms
404
+
405
+ Returns:
406
+ Content with friendly terms
407
+ """
408
+ transformer = LLMOutputTransformer()
409
+ return transformer._enhance_readability(content)
@@ -0,0 +1,23 @@
1
+ """
2
+ Utils module for the Python middleware application.
3
+
4
+ This module provides utility functions including:
5
+ - Prompt loading functionality
6
+ - Token usage tracking
7
+ - Execution utilities
8
+ """
9
+
10
+ from .prompt_loader import get_prompt
11
+ from .token_usage_repository import TokenUsageRepository
12
+ from .execution_utils import ExecutionUtils
13
+
14
+ __all__ = [
15
+ 'get_prompt',
16
+ 'TokenUsageRepository',
17
+ 'ExecutionUtils',
18
+ ]
19
+
20
+ # Version information
21
+ __version__ = "1.0.0"
22
+ __author__ = "Python Middleware Team"
23
+ __description__ = "Utility functions for the middleware application"
@@ -0,0 +1,50 @@
1
+ from typing import Any, List
2
+ import logging
3
+ from abc import ABC, abstractmethod
4
+
5
+ logger = logging.getLogger(__name__)
6
+
7
+
8
+ class CustomAsyncCallbackHandler(ABC):
9
+ """
10
+ Abstract base class for asynchronous callback handlers
11
+
12
+ This is an abstract base class that defines the callback interface for LLM calls.
13
+ All concrete callback handlers should inherit from this class and implement its abstract methods.
14
+
15
+ Uses generic data structures (Dict[str, Any]) instead of specific LLM types
16
+ to avoid circular import issues and maintain clean architecture.
17
+ """
18
+
19
+ @abstractmethod
20
+ async def on_llm_start(self, messages: List[dict], **kwargs: Any) -> None:
21
+ """
22
+ Callback triggered when LLM call starts
23
+
24
+ Args:
25
+ messages: List of message dictionaries, each containing 'role' and 'content' keys
26
+ **kwargs: Additional parameters such as provider, model, etc.
27
+ """
28
+ pass
29
+
30
+ @abstractmethod
31
+ async def on_llm_end(self, response: dict, **kwargs: Any) -> None:
32
+ """
33
+ Callback triggered when LLM call ends successfully
34
+
35
+ Args:
36
+ response: Response dictionary containing 'content', 'tokens_used', 'model', etc.
37
+ **kwargs: Additional parameters such as provider, model, etc.
38
+ """
39
+ pass
40
+
41
+ @abstractmethod
42
+ async def on_llm_error(self, error: Exception, **kwargs: Any) -> None:
43
+ """
44
+ Callback triggered when LLM call encounters an error
45
+
46
+ Args:
47
+ error: The exception that occurred during the LLM call
48
+ **kwargs: Additional parameters such as provider, model, etc.
49
+ """
50
+ pass
@@ -0,0 +1,158 @@
1
+ import json
2
+ import asyncio
3
+ import threading
4
+ import time
5
+ from typing import Any, Callable, Dict, List, Optional, Tuple
6
+ from cachetools import LRUCache
7
+ from contextlib import contextmanager
8
+ import logging
9
+ from tenacity import retry, stop_after_attempt, wait_exponential, after_log
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+ class ExecutionUtils:
14
+ """
15
+ Provides common utility set for execution layer, including caching and retry logic.
16
+ """
17
+ def __init__(self, cache_size: int = 100, cache_ttl: int = 3600, retry_attempts: int = 3, retry_backoff: float = 1.0):
18
+ """
19
+ Initialize execution utility class.
20
+
21
+ Args:
22
+ cache_size (int): Maximum number of cache entries
23
+ cache_ttl (int): Cache time-to-live (seconds)
24
+ retry_attempts (int): Number of retry attempts
25
+ retry_backoff (float): Retry backoff factor
26
+ """
27
+ self.cache_size = cache_size
28
+ self.cache_ttl = cache_ttl
29
+ self.retry_attempts = retry_attempts
30
+ self.retry_backoff = retry_backoff
31
+ self._cache = LRUCache(maxsize=self.cache_size) if cache_size > 0 else None
32
+ self._cache_lock = threading.Lock()
33
+ self._cache_ttl_dict: Dict[str, float] = {}
34
+
35
+ def generate_cache_key(self, func_name: str, user_id: str, task_id: str, args: tuple, kwargs: Dict[str, Any]) -> str:
36
+ """
37
+ Generate context-based cache key including user ID, task ID, function name and parameters.
38
+
39
+ Args:
40
+ func_name (str): Function name
41
+ user_id (str): User ID
42
+ task_id (str): Task ID
43
+ args (tuple): Positional arguments
44
+ kwargs (Dict[str, Any]): Keyword arguments
45
+
46
+ Returns:
47
+ str: Cache key
48
+ """
49
+ key_dict = {
50
+ 'func': func_name,
51
+ 'user_id': user_id,
52
+ 'task_id': task_id,
53
+ 'args': args,
54
+ 'kwargs': {k: v for k, v in kwargs.items() if k != 'self'}
55
+ }
56
+ try:
57
+ key_str = json.dumps(key_dict, sort_keys=True)
58
+ except (TypeError, ValueError):
59
+ key_str = str(key_dict)
60
+ return hash(key_str).__str__()
61
+
62
+ def get_from_cache(self, cache_key: str) -> Optional[Any]:
63
+ """
64
+ Get result from cache if it exists and is not expired.
65
+
66
+ Args:
67
+ cache_key (str): Cache key
68
+
69
+ Returns:
70
+ Optional[Any]: Cached result or None
71
+ """
72
+ if not self._cache:
73
+ return None
74
+ with self._cache_lock:
75
+ if cache_key in self._cache:
76
+ if cache_key in self._cache_ttl_dict and time.time() > self._cache_ttl_dict[cache_key]:
77
+ del self._cache[cache_key]
78
+ del self._cache_ttl_dict[cache_key]
79
+ return None
80
+ return self._cache[cache_key]
81
+ return None
82
+
83
+ def add_to_cache(self, cache_key: str, result: Any, ttl: Optional[int] = None) -> None:
84
+ """
85
+ Add result to cache with optional time-to-live setting.
86
+
87
+ Args:
88
+ cache_key (str): Cache key
89
+ result (Any): Cached result
90
+ ttl (Optional[int]): Time-to-live (seconds)
91
+ """
92
+ if not self._cache:
93
+ return
94
+ with self._cache_lock:
95
+ self._cache[cache_key] = result
96
+ ttl = ttl if ttl is not None else self.cache_ttl
97
+ if ttl > 0:
98
+ self._cache_ttl_dict[cache_key] = time.time() + ttl
99
+
100
+ def create_retry_strategy(self, metric_name: Optional[str] = None) -> Callable:
101
+ """
102
+ Create retry strategy for execution operations.
103
+
104
+ Args:
105
+ metric_name (Optional[str]): Metric name for logging
106
+
107
+ Returns:
108
+ Callable: Retry decorator
109
+ """
110
+ def after_retry(retry_state):
111
+ logger.warning(f"Retry {retry_state.attempt_number}/{self.retry_attempts} for {metric_name or 'operation'} after {retry_state.idle_for}s: {retry_state.outcome.exception()}")
112
+
113
+ return retry(
114
+ stop=stop_after_attempt(self.retry_attempts),
115
+ wait=wait_exponential(multiplier=self.retry_backoff, min=1, max=10),
116
+ after=after_retry
117
+ )
118
+
119
+ @contextmanager
120
+ def timeout_context(self, seconds: int):
121
+ """
122
+ Context manager for enforcing operation timeout.
123
+
124
+ Args:
125
+ seconds (int): Timeout duration (seconds)
126
+
127
+ Raises:
128
+ TimeoutError: If operation exceeds timeout duration
129
+ """
130
+ loop = asyncio.get_event_loop()
131
+ future = asyncio.Future()
132
+ handle = loop.call_later(seconds, lambda: future.set_exception(TimeoutError(f"Operation timed out after {seconds}s")))
133
+ try:
134
+ yield future
135
+ finally:
136
+ handle.cancel()
137
+
138
+ async def execute_with_retry_and_timeout(self, func: Callable, timeout: int, *args, **kwargs) -> Any:
139
+ """
140
+ Execute operation with retry and timeout mechanism.
141
+
142
+ Args:
143
+ func (Callable): Function to execute
144
+ timeout (int): Timeout duration (seconds)
145
+ *args: Positional arguments
146
+ **kwargs: Keyword arguments
147
+
148
+ Returns:
149
+ Any: Operation result
150
+
151
+ Raises:
152
+ OperationError: If all retry attempts fail
153
+ """
154
+ retry_strategy = self.create_retry_strategy(func.__name__)
155
+ try:
156
+ return await asyncio.wait_for(retry_strategy(func)(*args, **kwargs), timeout=timeout)
157
+ except asyncio.TimeoutError:
158
+ raise TimeoutError(f"Operation timed out after {timeout}s")
aiecs/utils/logging.py ADDED
@@ -0,0 +1 @@
1
+ # Placeholder for logging.py
@@ -0,0 +1,13 @@
1
+ import yaml
2
+ import os
3
+
4
+ def get_prompt(mode: str, service: str) -> str:
5
+ """
6
+ Load the prompt for the specified service from services/{mode}/prompts.yaml.
7
+ """
8
+ path = f"app/services/{mode}/prompts.yaml"
9
+ if not os.path.exists(path):
10
+ return "[Default prompt]"
11
+ with open(path, "r", encoding="utf-8") as f:
12
+ data = yaml.safe_load(f)
13
+ return data.get(service, "[No specific prompt found]")