aiecs 1.7.6__py3-none-any.whl → 1.7.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aiecs might be problematic. Click here for more details.
- aiecs/__init__.py +1 -1
- aiecs/config/tool_config.py +55 -19
- aiecs/domain/agent/base_agent.py +79 -0
- aiecs/domain/agent/hybrid_agent.py +468 -172
- aiecs/domain/agent/models.py +10 -0
- aiecs/domain/agent/tools/schema_generator.py +17 -4
- aiecs/llm/client_factory.py +6 -1
- aiecs/llm/clients/base_client.py +5 -1
- aiecs/llm/clients/google_function_calling_mixin.py +46 -88
- aiecs/llm/clients/googleai_client.py +79 -6
- aiecs/llm/clients/vertex_client.py +310 -21
- aiecs/main.py +2 -2
- aiecs/tools/docs/document_creator_tool.py +143 -2
- aiecs/tools/docs/document_parser_tool.py +9 -4
- aiecs/tools/docs/document_writer_tool.py +179 -0
- aiecs/tools/task_tools/image_tool.py +49 -14
- {aiecs-1.7.6.dist-info → aiecs-1.7.17.dist-info}/METADATA +1 -1
- {aiecs-1.7.6.dist-info → aiecs-1.7.17.dist-info}/RECORD +22 -22
- {aiecs-1.7.6.dist-info → aiecs-1.7.17.dist-info}/WHEEL +0 -0
- {aiecs-1.7.6.dist-info → aiecs-1.7.17.dist-info}/entry_points.txt +0 -0
- {aiecs-1.7.6.dist-info → aiecs-1.7.17.dist-info}/licenses/LICENSE +0 -0
- {aiecs-1.7.6.dist-info → aiecs-1.7.17.dist-info}/top_level.txt +0 -0
aiecs/domain/agent/models.py
CHANGED
|
@@ -381,6 +381,16 @@ class AgentMetrics(BaseModel):
|
|
|
381
381
|
p95_operation_time: Optional[float] = Field(None, ge=0, description="95th percentile operation time in seconds")
|
|
382
382
|
p99_operation_time: Optional[float] = Field(None, ge=0, description="99th percentile operation time in seconds")
|
|
383
383
|
|
|
384
|
+
# Prompt cache metrics (for LLM provider-level caching observability)
|
|
385
|
+
total_llm_requests: int = Field(default=0, ge=0, description="Total number of LLM requests made")
|
|
386
|
+
cache_hits: int = Field(default=0, ge=0, description="Number of LLM requests with cache hits")
|
|
387
|
+
cache_misses: int = Field(default=0, ge=0, description="Number of LLM requests without cache hits (cache creation)")
|
|
388
|
+
cache_hit_rate: float = Field(default=0.0, ge=0.0, le=1.0, description="Prompt cache hit rate (0-1)")
|
|
389
|
+
total_cache_read_tokens: int = Field(default=0, ge=0, description="Total tokens read from prompt cache")
|
|
390
|
+
total_cache_creation_tokens: int = Field(default=0, ge=0, description="Total tokens used to create cache entries")
|
|
391
|
+
estimated_cache_savings_tokens: int = Field(default=0, ge=0, description="Estimated tokens saved from cache (cache_read_tokens * 0.9)")
|
|
392
|
+
estimated_cache_savings_cost: float = Field(default=0.0, ge=0, description="Estimated cost saved from cache in USD")
|
|
393
|
+
|
|
384
394
|
# Timestamps
|
|
385
395
|
last_reset_at: Optional[datetime] = Field(None, description="When metrics were last reset")
|
|
386
396
|
updated_at: datetime = Field(default_factory=datetime.utcnow, description="Last metrics update")
|
|
@@ -207,6 +207,12 @@ class ToolSchemaGenerator:
|
|
|
207
207
|
if not hasattr(schema_class, "model_fields"):
|
|
208
208
|
return properties, required
|
|
209
209
|
|
|
210
|
+
# Import PydanticUndefined for v2 compatibility
|
|
211
|
+
try:
|
|
212
|
+
from pydantic_core import PydanticUndefined
|
|
213
|
+
except ImportError:
|
|
214
|
+
PydanticUndefined = type(None) # Fallback for Pydantic v1
|
|
215
|
+
|
|
210
216
|
for field_name, field_info in schema_class.model_fields.items():
|
|
211
217
|
# Build property schema
|
|
212
218
|
prop_schema: Dict[str, Any] = {}
|
|
@@ -219,11 +225,18 @@ class ToolSchemaGenerator:
|
|
|
219
225
|
if hasattr(field_info, "description") and field_info.description:
|
|
220
226
|
prop_schema["description"] = field_info.description
|
|
221
227
|
|
|
222
|
-
#
|
|
223
|
-
if field_info
|
|
224
|
-
|
|
228
|
+
# Check if required using Pydantic v2 API (preferred)
|
|
229
|
+
if hasattr(field_info, "is_required") and callable(field_info.is_required):
|
|
230
|
+
if field_info.is_required():
|
|
231
|
+
required.append(field_name)
|
|
232
|
+
elif field_info.default is not None and field_info.default is not PydanticUndefined:
|
|
233
|
+
prop_schema["default"] = field_info.default
|
|
225
234
|
else:
|
|
226
|
-
|
|
235
|
+
# Fallback for Pydantic v1
|
|
236
|
+
if field_info.default is None or field_info.default == inspect.Parameter.empty:
|
|
237
|
+
required.append(field_name)
|
|
238
|
+
else:
|
|
239
|
+
prop_schema["default"] = field_info.default
|
|
227
240
|
|
|
228
241
|
properties[field_name] = prop_schema
|
|
229
242
|
|
aiecs/llm/client_factory.py
CHANGED
|
@@ -7,6 +7,7 @@ from .clients.openai_client import OpenAIClient
|
|
|
7
7
|
from .clients.vertex_client import VertexAIClient
|
|
8
8
|
from .clients.googleai_client import GoogleAIClient
|
|
9
9
|
from .clients.xai_client import XAIClient
|
|
10
|
+
from .clients.openai_compatible_mixin import StreamChunk
|
|
10
11
|
from .callbacks.custom_callbacks import CustomAsyncCallbackHandler
|
|
11
12
|
|
|
12
13
|
if TYPE_CHECKING:
|
|
@@ -407,7 +408,11 @@ class LLMClientManager:
|
|
|
407
408
|
max_tokens=max_tokens,
|
|
408
409
|
**kwargs,
|
|
409
410
|
):
|
|
410
|
-
|
|
411
|
+
# Handle StreamChunk objects (when return_chunks=True or function calling)
|
|
412
|
+
if hasattr(chunk, 'content') and chunk.content:
|
|
413
|
+
collected_content += chunk.content
|
|
414
|
+
elif isinstance(chunk, str):
|
|
415
|
+
collected_content += chunk
|
|
411
416
|
yield chunk
|
|
412
417
|
|
|
413
418
|
# Create a response object for callbacks (streaming doesn't return LLMResponse directly)
|
aiecs/llm/clients/base_client.py
CHANGED
|
@@ -139,7 +139,11 @@ class SafetyBlockError(LLMClientError):
|
|
|
139
139
|
if self.block_type:
|
|
140
140
|
msg += f" (Block type: {self.block_type})"
|
|
141
141
|
if self.safety_ratings:
|
|
142
|
-
|
|
142
|
+
# Safely extract categories, handling potential non-dict elements
|
|
143
|
+
categories = []
|
|
144
|
+
for r in self.safety_ratings:
|
|
145
|
+
if isinstance(r, dict) and r.get("blocked"):
|
|
146
|
+
categories.append(r.get("category", "UNKNOWN"))
|
|
143
147
|
if categories:
|
|
144
148
|
msg += f" (Categories: {', '.join(categories)})"
|
|
145
149
|
return msg
|
|
@@ -5,6 +5,7 @@ Provides shared implementation for Google providers (Vertex AI, Google AI)
|
|
|
5
5
|
that use FunctionDeclaration format for Function Calling.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
+
import json
|
|
8
9
|
import logging
|
|
9
10
|
from typing import Dict, Any, Optional, List, Union, AsyncGenerator
|
|
10
11
|
from dataclasses import dataclass
|
|
@@ -12,8 +13,6 @@ from vertexai.generative_models import (
|
|
|
12
13
|
FunctionDeclaration,
|
|
13
14
|
Tool,
|
|
14
15
|
)
|
|
15
|
-
from google.genai.types import Schema, Type
|
|
16
|
-
|
|
17
16
|
from .base_client import LLMMessage, LLMResponse
|
|
18
17
|
|
|
19
18
|
logger = logging.getLogger(__name__)
|
|
@@ -32,13 +31,46 @@ except ImportError:
|
|
|
32
31
|
tool_calls: Optional[List[Dict[str, Any]]] = None
|
|
33
32
|
|
|
34
33
|
|
|
34
|
+
def _serialize_function_args(args) -> str:
|
|
35
|
+
"""
|
|
36
|
+
Safely serialize function call arguments to JSON string.
|
|
37
|
+
|
|
38
|
+
Handles MapComposite/protobuf objects from Vertex AI by converting
|
|
39
|
+
them to regular dicts before JSON serialization.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
args: Function call arguments (may be MapComposite, dict, or other)
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
JSON string representation of the arguments
|
|
46
|
+
"""
|
|
47
|
+
if args is None:
|
|
48
|
+
return "{}"
|
|
49
|
+
|
|
50
|
+
# Handle MapComposite/protobuf objects (they have items() method)
|
|
51
|
+
if hasattr(args, 'items'):
|
|
52
|
+
# Convert to regular dict
|
|
53
|
+
args_dict = dict(args)
|
|
54
|
+
elif isinstance(args, dict):
|
|
55
|
+
args_dict = args
|
|
56
|
+
else:
|
|
57
|
+
# Try to convert to dict if possible
|
|
58
|
+
try:
|
|
59
|
+
args_dict = dict(args)
|
|
60
|
+
except (TypeError, ValueError):
|
|
61
|
+
# Last resort: use str() but this should rarely happen
|
|
62
|
+
return str(args)
|
|
63
|
+
|
|
64
|
+
return json.dumps(args_dict, ensure_ascii=False)
|
|
65
|
+
|
|
66
|
+
|
|
35
67
|
class GoogleFunctionCallingMixin:
|
|
36
68
|
"""
|
|
37
69
|
Mixin class providing Google Function Calling implementation.
|
|
38
|
-
|
|
70
|
+
|
|
39
71
|
This mixin can be used by Google providers (Vertex AI, Google AI)
|
|
40
72
|
that use FunctionDeclaration format for Function Calling.
|
|
41
|
-
|
|
73
|
+
|
|
42
74
|
Usage:
|
|
43
75
|
class VertexAIClient(BaseLLMClient, GoogleFunctionCallingMixin):
|
|
44
76
|
async def generate_text(self, messages, tools=None, ...):
|
|
@@ -71,15 +103,13 @@ class GoogleFunctionCallingMixin:
|
|
|
71
103
|
if not func_name:
|
|
72
104
|
logger.warning(f"Skipping tool without name: {tool}")
|
|
73
105
|
continue
|
|
74
|
-
|
|
75
|
-
#
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
# Create FunctionDeclaration
|
|
106
|
+
|
|
107
|
+
# Create FunctionDeclaration with raw dict parameters
|
|
108
|
+
# Let Vertex SDK handle the schema conversion internally
|
|
79
109
|
function_declaration = FunctionDeclaration(
|
|
80
110
|
name=func_name,
|
|
81
111
|
description=func_description,
|
|
82
|
-
parameters=
|
|
112
|
+
parameters=func_parameters,
|
|
83
113
|
)
|
|
84
114
|
|
|
85
115
|
function_declarations.append(function_declaration)
|
|
@@ -91,78 +121,6 @@ class GoogleFunctionCallingMixin:
|
|
|
91
121
|
return [Tool(function_declarations=function_declarations)]
|
|
92
122
|
return []
|
|
93
123
|
|
|
94
|
-
def _convert_json_schema_to_google_schema(
|
|
95
|
-
self, json_schema: Dict[str, Any]
|
|
96
|
-
) -> Schema:
|
|
97
|
-
"""
|
|
98
|
-
Convert JSON Schema to Google Schema format.
|
|
99
|
-
|
|
100
|
-
Args:
|
|
101
|
-
json_schema: JSON Schema dictionary
|
|
102
|
-
|
|
103
|
-
Returns:
|
|
104
|
-
Google Schema object
|
|
105
|
-
"""
|
|
106
|
-
schema_type = json_schema.get("type", "object")
|
|
107
|
-
properties = json_schema.get("properties", {})
|
|
108
|
-
required = json_schema.get("required", [])
|
|
109
|
-
|
|
110
|
-
# Convert type
|
|
111
|
-
google_type = self._convert_json_type_to_google_type(schema_type)
|
|
112
|
-
|
|
113
|
-
# Convert properties (only for object types)
|
|
114
|
-
google_properties = None
|
|
115
|
-
if schema_type == "object" and properties:
|
|
116
|
-
google_properties = {}
|
|
117
|
-
for prop_name, prop_schema in properties.items():
|
|
118
|
-
google_properties[prop_name] = self._convert_json_schema_to_google_schema(
|
|
119
|
-
prop_schema
|
|
120
|
-
)
|
|
121
|
-
|
|
122
|
-
# Handle array items
|
|
123
|
-
items = None
|
|
124
|
-
if schema_type == "array" and "items" in json_schema:
|
|
125
|
-
items = self._convert_json_schema_to_google_schema(json_schema["items"])
|
|
126
|
-
|
|
127
|
-
# Create Schema
|
|
128
|
-
schema_kwargs = {
|
|
129
|
-
"type": google_type,
|
|
130
|
-
}
|
|
131
|
-
|
|
132
|
-
if google_properties is not None:
|
|
133
|
-
schema_kwargs["properties"] = google_properties
|
|
134
|
-
|
|
135
|
-
if required:
|
|
136
|
-
schema_kwargs["required"] = required
|
|
137
|
-
|
|
138
|
-
if items is not None:
|
|
139
|
-
schema_kwargs["items"] = items
|
|
140
|
-
|
|
141
|
-
schema = Schema(**schema_kwargs)
|
|
142
|
-
|
|
143
|
-
return schema
|
|
144
|
-
|
|
145
|
-
def _convert_json_type_to_google_type(self, json_type: str) -> Type:
|
|
146
|
-
"""
|
|
147
|
-
Convert JSON Schema type to Google Type enum.
|
|
148
|
-
|
|
149
|
-
Args:
|
|
150
|
-
json_type: JSON Schema type string
|
|
151
|
-
|
|
152
|
-
Returns:
|
|
153
|
-
Google Type enum value
|
|
154
|
-
"""
|
|
155
|
-
type_mapping = {
|
|
156
|
-
"string": Type.STRING,
|
|
157
|
-
"number": Type.NUMBER,
|
|
158
|
-
"integer": Type.NUMBER, # Google uses NUMBER for both
|
|
159
|
-
"boolean": Type.BOOLEAN,
|
|
160
|
-
"array": Type.ARRAY,
|
|
161
|
-
"object": Type.OBJECT,
|
|
162
|
-
}
|
|
163
|
-
|
|
164
|
-
return type_mapping.get(json_type.lower(), Type.OBJECT)
|
|
165
|
-
|
|
166
124
|
def _extract_function_calls_from_google_response(
|
|
167
125
|
self, response: Any
|
|
168
126
|
) -> Optional[List[Dict[str, Any]]]:
|
|
@@ -191,10 +149,10 @@ class GoogleFunctionCallingMixin:
|
|
|
191
149
|
"type": "function",
|
|
192
150
|
"function": {
|
|
193
151
|
"name": func_call.name,
|
|
194
|
-
"arguments":
|
|
152
|
+
"arguments": _serialize_function_args(func_call.args) if hasattr(func_call, "args") else "{}",
|
|
195
153
|
},
|
|
196
154
|
})
|
|
197
|
-
|
|
155
|
+
|
|
198
156
|
# Check for content.parts with function_call (newer API)
|
|
199
157
|
elif hasattr(candidate, "content") and hasattr(candidate.content, "parts"):
|
|
200
158
|
for part in candidate.content.parts:
|
|
@@ -205,7 +163,7 @@ class GoogleFunctionCallingMixin:
|
|
|
205
163
|
"type": "function",
|
|
206
164
|
"function": {
|
|
207
165
|
"name": func_call.name,
|
|
208
|
-
"arguments":
|
|
166
|
+
"arguments": _serialize_function_args(func_call.args) if hasattr(func_call, "args") else "{}",
|
|
209
167
|
},
|
|
210
168
|
})
|
|
211
169
|
|
|
@@ -300,10 +258,10 @@ class GoogleFunctionCallingMixin:
|
|
|
300
258
|
"type": "function",
|
|
301
259
|
"function": {
|
|
302
260
|
"name": func_call.name,
|
|
303
|
-
"arguments":
|
|
261
|
+
"arguments": _serialize_function_args(func_call.args) if hasattr(func_call, "args") else "{}",
|
|
304
262
|
},
|
|
305
263
|
})
|
|
306
|
-
|
|
264
|
+
|
|
307
265
|
# Check for function_call attribute directly on candidate
|
|
308
266
|
elif hasattr(candidate, "function_call") and candidate.function_call:
|
|
309
267
|
func_call = candidate.function_call
|
|
@@ -312,7 +270,7 @@ class GoogleFunctionCallingMixin:
|
|
|
312
270
|
"type": "function",
|
|
313
271
|
"function": {
|
|
314
272
|
"name": func_call.name,
|
|
315
|
-
"arguments":
|
|
273
|
+
"arguments": _serialize_function_args(func_call.args) if hasattr(func_call, "args") else "{}",
|
|
316
274
|
},
|
|
317
275
|
})
|
|
318
276
|
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import json
|
|
1
2
|
import logging
|
|
2
3
|
import os
|
|
3
4
|
from typing import Optional, List, AsyncGenerator
|
|
@@ -45,14 +46,86 @@ class GoogleAIClient(BaseLLMClient):
|
|
|
45
46
|
def _convert_messages_to_contents(
|
|
46
47
|
self, messages: List[LLMMessage]
|
|
47
48
|
) -> List[types.Content]:
|
|
48
|
-
"""
|
|
49
|
+
"""
|
|
50
|
+
Convert LLMMessage list to Google GenAI Content objects.
|
|
51
|
+
|
|
52
|
+
This properly handles multi-turn conversations including
|
|
53
|
+
function/tool responses for Google AI Function Calling.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
messages: List of LLMMessage objects (system messages should be filtered out)
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
List of Content objects for Google AI API
|
|
60
|
+
"""
|
|
49
61
|
contents = []
|
|
62
|
+
|
|
50
63
|
for msg in messages:
|
|
51
|
-
#
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
64
|
+
# Handle tool/function responses (role="tool")
|
|
65
|
+
if msg.role == "tool":
|
|
66
|
+
# Google AI expects function responses as user messages with FunctionResponse parts
|
|
67
|
+
func_name = msg.tool_call_id or "unknown_function"
|
|
68
|
+
|
|
69
|
+
# Parse content as the function response
|
|
70
|
+
try:
|
|
71
|
+
if msg.content and msg.content.strip().startswith('{'):
|
|
72
|
+
response_data = json.loads(msg.content)
|
|
73
|
+
else:
|
|
74
|
+
response_data = {"result": msg.content}
|
|
75
|
+
except json.JSONDecodeError:
|
|
76
|
+
response_data = {"result": msg.content}
|
|
77
|
+
|
|
78
|
+
# Create FunctionResponse part
|
|
79
|
+
func_response_part = types.Part.from_function_response(
|
|
80
|
+
name=func_name,
|
|
81
|
+
response=response_data
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
contents.append(types.Content(
|
|
85
|
+
role="user", # Function responses are sent as "user" role
|
|
86
|
+
parts=[func_response_part]
|
|
87
|
+
))
|
|
88
|
+
|
|
89
|
+
# Handle assistant messages with tool calls
|
|
90
|
+
elif msg.role == "assistant" and msg.tool_calls:
|
|
91
|
+
parts = []
|
|
92
|
+
if msg.content:
|
|
93
|
+
parts.append(types.Part(text=msg.content))
|
|
94
|
+
|
|
95
|
+
for tool_call in msg.tool_calls:
|
|
96
|
+
func = tool_call.get("function", {})
|
|
97
|
+
func_name = func.get("name", "")
|
|
98
|
+
func_args = func.get("arguments", "{}")
|
|
99
|
+
|
|
100
|
+
# Parse arguments
|
|
101
|
+
try:
|
|
102
|
+
args_dict = json.loads(func_args) if isinstance(func_args, str) else func_args
|
|
103
|
+
except json.JSONDecodeError:
|
|
104
|
+
args_dict = {}
|
|
105
|
+
|
|
106
|
+
# Create FunctionCall part using types.FunctionCall
|
|
107
|
+
# Note: types.Part.from_function_call() may not exist in google.genai
|
|
108
|
+
# Use FunctionCall type directly
|
|
109
|
+
function_call = types.FunctionCall(
|
|
110
|
+
name=func_name,
|
|
111
|
+
args=args_dict
|
|
112
|
+
)
|
|
113
|
+
parts.append(types.Part(function_call=function_call))
|
|
114
|
+
|
|
115
|
+
contents.append(types.Content(
|
|
116
|
+
role="model",
|
|
117
|
+
parts=parts
|
|
118
|
+
))
|
|
119
|
+
|
|
120
|
+
# Handle regular messages (user, assistant without tool_calls)
|
|
121
|
+
else:
|
|
122
|
+
role = "model" if msg.role == "assistant" else msg.role
|
|
123
|
+
if msg.content:
|
|
124
|
+
contents.append(types.Content(
|
|
125
|
+
role=role,
|
|
126
|
+
parts=[types.Part(text=msg.content)]
|
|
127
|
+
))
|
|
128
|
+
|
|
56
129
|
return contents
|
|
57
130
|
|
|
58
131
|
async def generate_text(
|