aiecs 1.7.6__py3-none-any.whl → 1.7.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aiecs might be problematic. Click here for more details.

aiecs/__init__.py CHANGED
@@ -5,7 +5,7 @@ A powerful Python middleware framework for building AI-powered applications
5
5
  with tool orchestration, task execution, and multi-provider LLM support.
6
6
  """
7
7
 
8
- __version__ = "1.7.6"
8
+ __version__ = "1.7.17"
9
9
  __author__ = "AIECS Team"
10
10
  __email__ = "iretbl@gmail.com"
11
11
 
@@ -212,21 +212,51 @@ class ToolConfigLoader:
212
212
  logger.warning(f"Failed to load {global_config_path}: {e}. Skipping.")
213
213
 
214
214
  # Load tool-specific config (higher precedence)
215
+ # Try multiple locations:
216
+ # 1. config/tools/{tool_name}.yaml (standard location)
217
+ # 2. config/{tool_name}.yaml (direct in config_dir, for custom paths)
215
218
  tools_dir = config_dir / "tools"
219
+ search_dirs = []
216
220
  if tools_dir.exists() and tools_dir.is_dir():
217
- tool_config_path = tools_dir / f"{tool_name}.yaml"
218
- if tool_config_path.exists():
219
- try:
220
- with open(tool_config_path, "r", encoding="utf-8") as f:
221
- tool_data = yaml.safe_load(f)
222
- if tool_data:
223
- # Merge tool-specific config (overrides global)
224
- merged_config.update(tool_data)
225
- logger.debug(f"Loaded tool-specific config from {tool_config_path}")
226
- except yaml.YAMLError as e:
227
- logger.warning(f"Invalid YAML in {tool_config_path}: {e}. Skipping.")
228
- except Exception as e:
229
- logger.warning(f"Failed to load {tool_config_path}: {e}. Skipping.")
221
+ search_dirs.append(tools_dir)
222
+ # Also search directly in config_dir for custom path structures
223
+ search_dirs.append(config_dir)
224
+
225
+ # Try multiple naming conventions for tool config files
226
+ # 1. {tool_name}.yaml (e.g., image.yaml)
227
+ # 2. {tool_name}_tool.yaml (e.g., image_tool.yaml)
228
+ # 3. {ToolName}.yaml (e.g., ImageTool.yaml)
229
+ possible_names = [
230
+ f"{tool_name}.yaml",
231
+ f"{tool_name}_tool.yaml",
232
+ ]
233
+ # Also try with capitalized class name if tool_name is lowercase
234
+ if tool_name.islower():
235
+ class_name = tool_name.replace("_", "").title() + "Tool"
236
+ possible_names.append(f"{class_name}.yaml")
237
+
238
+ tool_config_path = None
239
+ for search_dir in search_dirs:
240
+ for name in possible_names:
241
+ candidate_path = search_dir / name
242
+ if candidate_path.exists():
243
+ tool_config_path = candidate_path
244
+ break
245
+ if tool_config_path:
246
+ break
247
+
248
+ if tool_config_path:
249
+ try:
250
+ with open(tool_config_path, "r", encoding="utf-8") as f:
251
+ tool_data = yaml.safe_load(f)
252
+ if tool_data:
253
+ # Merge tool-specific config (overrides global)
254
+ merged_config.update(tool_data)
255
+ logger.debug(f"Loaded tool-specific config from {tool_config_path}")
256
+ except yaml.YAMLError as e:
257
+ logger.warning(f"Invalid YAML in {tool_config_path}: {e}. Skipping.")
258
+ except Exception as e:
259
+ logger.warning(f"Failed to load {tool_config_path}: {e}. Skipping.")
230
260
 
231
261
  return merged_config
232
262
 
@@ -362,17 +392,23 @@ class ToolConfigLoader:
362
392
  logger.debug(f"Loaded config for {tool_name}: {len(merged_config)} keys")
363
393
  return merged_config
364
394
 
365
- def set_config_path(self, path: Union[str, Path]) -> None:
395
+ def set_config_path(self, path: Optional[Union[str, Path]] = None) -> None:
366
396
  """
367
397
  Set custom config path.
368
398
 
369
399
  Args:
370
- path: Path to config directory or file
400
+ path: Path to config directory or file. If None, resets to auto-discovery.
371
401
  """
372
- self._config_path = Path(path)
373
- # Clear cached config directory to force re-discovery
374
- self._cached_config_dir = None
375
- logger.info(f"Set custom config path: {self._config_path}")
402
+ if path is None:
403
+ self._config_path = None
404
+ # Clear cached config directory to force re-discovery
405
+ self._cached_config_dir = None
406
+ logger.info("Reset config path to auto-discovery")
407
+ else:
408
+ self._config_path = Path(path)
409
+ # Clear cached config directory to force re-discovery
410
+ self._cached_config_dir = None
411
+ logger.info(f"Set custom config path: {self._config_path}")
376
412
 
377
413
  def get_config_path(self) -> Optional[Path]:
378
414
  """
@@ -1296,6 +1296,67 @@ class BaseAIAgent(ABC):
1296
1296
 
1297
1297
  self._metrics.updated_at = datetime.utcnow()
1298
1298
 
1299
+ def update_cache_metrics(
1300
+ self,
1301
+ cache_read_tokens: Optional[int] = None,
1302
+ cache_creation_tokens: Optional[int] = None,
1303
+ cache_hit: Optional[bool] = None,
1304
+ ) -> None:
1305
+ """
1306
+ Update prompt cache metrics from LLM response.
1307
+
1308
+ This method tracks provider-level prompt caching statistics to monitor
1309
+ cache hit rates and token savings.
1310
+
1311
+ Args:
1312
+ cache_read_tokens: Tokens read from cache (indicates cache hit)
1313
+ cache_creation_tokens: Tokens used to create a new cache entry
1314
+ cache_hit: Whether the request hit a cached prompt prefix
1315
+
1316
+ Example:
1317
+ # After receiving LLM response
1318
+ agent.update_cache_metrics(
1319
+ cache_read_tokens=response.cache_read_tokens,
1320
+ cache_creation_tokens=response.cache_creation_tokens,
1321
+ cache_hit=response.cache_hit
1322
+ )
1323
+ """
1324
+ # Track LLM request count
1325
+ self._metrics.total_llm_requests += 1
1326
+
1327
+ # Track cache hit/miss
1328
+ if cache_hit is True:
1329
+ self._metrics.cache_hits += 1
1330
+ elif cache_hit is False:
1331
+ self._metrics.cache_misses += 1
1332
+ elif cache_read_tokens is not None and cache_read_tokens > 0:
1333
+ # Infer cache hit from tokens
1334
+ self._metrics.cache_hits += 1
1335
+ elif cache_creation_tokens is not None and cache_creation_tokens > 0:
1336
+ # Infer cache miss from creation tokens
1337
+ self._metrics.cache_misses += 1
1338
+
1339
+ # Update cache hit rate
1340
+ total_cache_requests = self._metrics.cache_hits + self._metrics.cache_misses
1341
+ if total_cache_requests > 0:
1342
+ self._metrics.cache_hit_rate = self._metrics.cache_hits / total_cache_requests
1343
+
1344
+ # Track cache tokens
1345
+ if cache_read_tokens is not None and cache_read_tokens > 0:
1346
+ self._metrics.total_cache_read_tokens += cache_read_tokens
1347
+ # Provider-level caching saves ~90% of token cost for cached tokens
1348
+ self._metrics.estimated_cache_savings_tokens += int(cache_read_tokens * 0.9)
1349
+
1350
+ if cache_creation_tokens is not None and cache_creation_tokens > 0:
1351
+ self._metrics.total_cache_creation_tokens += cache_creation_tokens
1352
+
1353
+ self._metrics.updated_at = datetime.utcnow()
1354
+ logger.debug(
1355
+ f"Agent {self.agent_id} cache metrics updated: "
1356
+ f"hit_rate={self._metrics.cache_hit_rate:.2%}, "
1357
+ f"read_tokens={cache_read_tokens}, creation_tokens={cache_creation_tokens}"
1358
+ )
1359
+
1299
1360
  def update_session_metrics(
1300
1361
  self,
1301
1362
  session_status: str,
@@ -1518,6 +1579,18 @@ class BaseAIAgent(ABC):
1518
1579
  "p95_operation_time": self._metrics.p95_operation_time,
1519
1580
  "p99_operation_time": self._metrics.p99_operation_time,
1520
1581
  "recent_operations": self._metrics.operation_history[-10:], # Last 10 operations
1582
+ # Prompt cache metrics
1583
+ "prompt_cache": {
1584
+ "total_llm_requests": self._metrics.total_llm_requests,
1585
+ "cache_hits": self._metrics.cache_hits,
1586
+ "cache_misses": self._metrics.cache_misses,
1587
+ "cache_hit_rate": self._metrics.cache_hit_rate,
1588
+ "cache_hit_rate_pct": f"{self._metrics.cache_hit_rate * 100:.1f}%",
1589
+ "total_cache_read_tokens": self._metrics.total_cache_read_tokens,
1590
+ "total_cache_creation_tokens": self._metrics.total_cache_creation_tokens,
1591
+ "estimated_cache_savings_tokens": self._metrics.estimated_cache_savings_tokens,
1592
+ "estimated_cache_savings_cost": self._metrics.estimated_cache_savings_cost,
1593
+ },
1521
1594
  }
1522
1595
 
1523
1596
  def get_health_status(self) -> Dict[str, Any]:
@@ -1658,6 +1731,12 @@ class BaseAIAgent(ABC):
1658
1731
  # Error tracking
1659
1732
  "error_count": self._metrics.error_count,
1660
1733
  "error_types": self._metrics.error_types,
1734
+ # Prompt cache metrics
1735
+ "cache_hit_rate": self._metrics.cache_hit_rate,
1736
+ "cache_hits": self._metrics.cache_hits,
1737
+ "cache_misses": self._metrics.cache_misses,
1738
+ "total_cache_read_tokens": self._metrics.total_cache_read_tokens,
1739
+ "estimated_cache_savings_tokens": self._metrics.estimated_cache_savings_tokens,
1661
1740
  },
1662
1741
  "capabilities": [cap.capability_type for cap in self.get_capabilities()],
1663
1742
  "active_goals": len([g for g in self._goals.values() if g.status == GoalStatus.IN_PROGRESS]),