euriai 1.0.17__tar.gz → 1.0.19__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {euriai-1.0.17 → euriai-1.0.19}/PKG-INFO +1 -1
- {euriai-1.0.17 → euriai-1.0.19}/euriai/__init__.py +1 -1
- {euriai-1.0.17 → euriai-1.0.19}/euriai/langgraph.py +168 -52
- {euriai-1.0.17 → euriai-1.0.19}/euriai.egg-info/PKG-INFO +1 -1
- {euriai-1.0.17 → euriai-1.0.19}/setup.py +1 -1
- {euriai-1.0.17 → euriai-1.0.19}/README.md +0 -0
- {euriai-1.0.17 → euriai-1.0.19}/euriai/autogen.py +0 -0
- {euriai-1.0.17 → euriai-1.0.19}/euriai/cli.py +0 -0
- {euriai-1.0.17 → euriai-1.0.19}/euriai/client.py +0 -0
- {euriai-1.0.17 → euriai-1.0.19}/euriai/crewai.py +0 -0
- {euriai-1.0.17 → euriai-1.0.19}/euriai/direct.py +0 -0
- {euriai-1.0.17 → euriai-1.0.19}/euriai/embedding.py +0 -0
- {euriai-1.0.17 → euriai-1.0.19}/euriai/euri_chat.py +0 -0
- {euriai-1.0.17 → euriai-1.0.19}/euriai/euri_embed.py +0 -0
- {euriai-1.0.17 → euriai-1.0.19}/euriai/langchain.py +0 -0
- {euriai-1.0.17 → euriai-1.0.19}/euriai/llamaindex.py +0 -0
- {euriai-1.0.17 → euriai-1.0.19}/euriai/n8n.py +0 -0
- {euriai-1.0.17 → euriai-1.0.19}/euriai/smolagents.py +0 -0
- {euriai-1.0.17 → euriai-1.0.19}/euriai.egg-info/SOURCES.txt +0 -0
- {euriai-1.0.17 → euriai-1.0.19}/euriai.egg-info/dependency_links.txt +0 -0
- {euriai-1.0.17 → euriai-1.0.19}/euriai.egg-info/entry_points.txt +0 -0
- {euriai-1.0.17 → euriai-1.0.19}/euriai.egg-info/requires.txt +0 -0
- {euriai-1.0.17 → euriai-1.0.19}/euriai.egg-info/top_level.txt +0 -0
- {euriai-1.0.17 → euriai-1.0.19}/setup.cfg +0 -0
@@ -112,7 +112,9 @@ class EuriaiAINode:
|
|
112
112
|
max_tokens: int = 1000,
|
113
113
|
system_message: Optional[str] = None,
|
114
114
|
output_parser: Optional[Callable[[str], Any]] = None,
|
115
|
-
error_handler: Optional[Callable[[Exception], Any]] = None
|
115
|
+
error_handler: Optional[Callable[[Exception], Any]] = None,
|
116
|
+
max_retries: int = 0,
|
117
|
+
retry_delay: float = 1.0
|
116
118
|
):
|
117
119
|
"""
|
118
120
|
Initialize an AI node.
|
@@ -127,6 +129,8 @@ class EuriaiAINode:
|
|
127
129
|
system_message: Optional system message
|
128
130
|
output_parser: Function to parse AI output
|
129
131
|
error_handler: Function to handle errors
|
132
|
+
max_retries: Maximum number of retries on error (default: 0)
|
133
|
+
retry_delay: Delay between retries in seconds (default: 1.0)
|
130
134
|
"""
|
131
135
|
self.name = name
|
132
136
|
self.prompt_template = prompt_template
|
@@ -136,6 +140,8 @@ class EuriaiAINode:
|
|
136
140
|
self.system_message = system_message
|
137
141
|
self.output_parser = output_parser
|
138
142
|
self.error_handler = error_handler
|
143
|
+
self.max_retries = max_retries
|
144
|
+
self.retry_delay = retry_delay
|
139
145
|
|
140
146
|
# Initialize client
|
141
147
|
self.client = EuriaiClient(api_key=api_key, model=model)
|
@@ -149,58 +155,79 @@ class EuriaiAINode:
|
|
149
155
|
}
|
150
156
|
|
151
157
|
def __call__(self, state: Dict[str, Any]) -> Dict[str, Any]:
|
152
|
-
"""Execute the AI node."""
|
158
|
+
"""Execute the AI node with retry logic."""
|
153
159
|
start_time = time.time()
|
160
|
+
last_exception = None
|
154
161
|
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
except Exception as e:
|
196
|
-
self.usage_stats["errors"] += 1
|
197
|
-
|
198
|
-
if self.error_handler:
|
199
|
-
return self.error_handler(e)
|
200
|
-
else:
|
201
|
-
logging.error(f"Error in AI node {self.name}: {e}")
|
202
|
-
state[f"{self.name}_error"] = str(e)
|
162
|
+
for attempt in range(self.max_retries + 1): # +1 for initial attempt
|
163
|
+
try:
|
164
|
+
# Format prompt with state variables
|
165
|
+
formatted_prompt = self.prompt_template.format(**state)
|
166
|
+
|
167
|
+
# Prepare user prompt (combine system message if provided)
|
168
|
+
user_prompt = formatted_prompt
|
169
|
+
if self.system_message:
|
170
|
+
# If there's a system message, combine it with the user prompt
|
171
|
+
user_prompt = f"System: {self.system_message}\n\nUser: {formatted_prompt}"
|
172
|
+
|
173
|
+
# Make API call
|
174
|
+
response = self.client.generate_completion(
|
175
|
+
prompt=user_prompt,
|
176
|
+
temperature=self.temperature,
|
177
|
+
max_tokens=self.max_tokens
|
178
|
+
)
|
179
|
+
|
180
|
+
# Extract content
|
181
|
+
content = response.get("choices", [{}])[0].get("message", {}).get("content", "")
|
182
|
+
|
183
|
+
# Parse output if parser provided
|
184
|
+
if self.output_parser:
|
185
|
+
parsed_output = self.output_parser(content)
|
186
|
+
else:
|
187
|
+
parsed_output = content
|
188
|
+
|
189
|
+
# Update usage stats
|
190
|
+
self.usage_stats["total_calls"] += 1
|
191
|
+
response_time = time.time() - start_time
|
192
|
+
self.usage_stats["avg_response_time"] = (
|
193
|
+
(self.usage_stats["avg_response_time"] * (self.usage_stats["total_calls"] - 1) + response_time)
|
194
|
+
/ self.usage_stats["total_calls"]
|
195
|
+
)
|
196
|
+
|
197
|
+
# Update state
|
198
|
+
state[f"{self.name}_output"] = parsed_output
|
199
|
+
state[f"{self.name}_raw_response"] = content
|
200
|
+
|
203
201
|
return state
|
202
|
+
|
203
|
+
except Exception as e:
|
204
|
+
last_exception = e
|
205
|
+
self.usage_stats["errors"] += 1
|
206
|
+
|
207
|
+
# If we haven't exhausted retries, wait and try again
|
208
|
+
if attempt < self.max_retries:
|
209
|
+
logging.warning(f"AI node {self.name} failed (attempt {attempt + 1}/{self.max_retries + 1}): {e}")
|
210
|
+
time.sleep(self.retry_delay)
|
211
|
+
continue
|
212
|
+
|
213
|
+
# All retries exhausted, handle error
|
214
|
+
if self.error_handler:
|
215
|
+
error_result = self.error_handler(e)
|
216
|
+
if isinstance(error_result, dict):
|
217
|
+
# Merge error handler result with state
|
218
|
+
state.update(error_result)
|
219
|
+
return state
|
220
|
+
else:
|
221
|
+
state[f"{self.name}_error_handler_result"] = error_result
|
222
|
+
return state
|
223
|
+
else:
|
224
|
+
logging.error(f"Error in AI node {self.name} after {self.max_retries + 1} attempts: {e}")
|
225
|
+
state[f"{self.name}_error"] = str(e)
|
226
|
+
state["error_handled"] = False
|
227
|
+
return state
|
228
|
+
|
229
|
+
# Should never reach here, but just in case
|
230
|
+
return state
|
204
231
|
|
205
232
|
async def acall(self, state: Dict[str, Any]) -> Dict[str, Any]:
|
206
233
|
"""Async version of the AI node execution."""
|
@@ -318,7 +345,9 @@ class EuriaiLangGraph:
|
|
318
345
|
max_tokens: Optional[int] = None,
|
319
346
|
system_message: Optional[str] = None,
|
320
347
|
output_parser: Optional[Callable[[str], Any]] = None,
|
321
|
-
error_handler: Optional[Callable[[Exception], Any]] = None
|
348
|
+
error_handler: Optional[Callable[[Exception], Any]] = None,
|
349
|
+
max_retries: int = 0,
|
350
|
+
retry_delay: float = 1.0
|
322
351
|
) -> None:
|
323
352
|
"""
|
324
353
|
Add an AI-powered node to the graph.
|
@@ -332,6 +361,8 @@ class EuriaiLangGraph:
|
|
332
361
|
system_message: System message for the node
|
333
362
|
output_parser: Function to parse AI output
|
334
363
|
error_handler: Function to handle errors
|
364
|
+
max_retries: Maximum number of retries on error (default: 0)
|
365
|
+
retry_delay: Delay between retries in seconds (default: 1.0)
|
335
366
|
"""
|
336
367
|
ai_node = EuriaiAINode(
|
337
368
|
name=name,
|
@@ -342,7 +373,9 @@ class EuriaiLangGraph:
|
|
342
373
|
max_tokens=max_tokens or self.default_max_tokens,
|
343
374
|
system_message=system_message,
|
344
375
|
output_parser=output_parser,
|
345
|
-
error_handler=error_handler
|
376
|
+
error_handler=error_handler,
|
377
|
+
max_retries=max_retries,
|
378
|
+
retry_delay=retry_delay
|
346
379
|
)
|
347
380
|
|
348
381
|
self.ai_nodes[name] = ai_node
|
@@ -413,6 +446,24 @@ class EuriaiLangGraph:
|
|
413
446
|
if self.verbose:
|
414
447
|
print(f"Added condition node: {name} with routes: {routes}")
|
415
448
|
|
449
|
+
def add_conditional_node(
|
450
|
+
self,
|
451
|
+
name: str,
|
452
|
+
condition_func: Callable[[Dict[str, Any]], str],
|
453
|
+
routes: Dict[str, str]
|
454
|
+
) -> None:
|
455
|
+
"""
|
456
|
+
Alias for add_condition_node for more intuitive naming.
|
457
|
+
|
458
|
+
Add a conditional node that routes based on state.
|
459
|
+
|
460
|
+
Args:
|
461
|
+
name: Node name
|
462
|
+
condition_func: Function that returns route key based on state
|
463
|
+
routes: Mapping of route keys to target nodes
|
464
|
+
"""
|
465
|
+
return self.add_condition_node(name, condition_func, routes)
|
466
|
+
|
416
467
|
def add_embedding_node(
|
417
468
|
self,
|
418
469
|
name: str,
|
@@ -493,6 +544,40 @@ class EuriaiLangGraph:
|
|
493
544
|
if self.verbose:
|
494
545
|
print(f"Set finish point: {node_name}")
|
495
546
|
|
547
|
+
def add_conditional_edge(
|
548
|
+
self,
|
549
|
+
from_node: str,
|
550
|
+
condition_func: Callable[[Dict[str, Any]], str],
|
551
|
+
condition_map: Optional[Dict[str, str]] = None
|
552
|
+
) -> None:
|
553
|
+
"""
|
554
|
+
Add a conditional edge that routes based on state.
|
555
|
+
|
556
|
+
Args:
|
557
|
+
from_node: Source node name
|
558
|
+
condition_func: Function that evaluates state and returns next node name
|
559
|
+
condition_map: Optional mapping of condition results to node names
|
560
|
+
"""
|
561
|
+
if condition_map:
|
562
|
+
# Use condition map for routing
|
563
|
+
def router(state: Dict[str, Any]) -> str:
|
564
|
+
result = condition_func(state)
|
565
|
+
return condition_map.get(result, END)
|
566
|
+
|
567
|
+
self.graph.add_conditional_edges(from_node, router, condition_map)
|
568
|
+
else:
|
569
|
+
# Direct function routing
|
570
|
+
self.graph.add_conditional_edges(from_node, condition_func)
|
571
|
+
|
572
|
+
self.conditional_edges.append({
|
573
|
+
"source": from_node,
|
574
|
+
"condition": condition_func,
|
575
|
+
"condition_map": condition_map
|
576
|
+
})
|
577
|
+
|
578
|
+
if self.verbose:
|
579
|
+
print(f"Added conditional edge from: {from_node}")
|
580
|
+
|
496
581
|
def compile_graph(self) -> CompiledStateGraph:
|
497
582
|
"""
|
498
583
|
Compile the graph for execution.
|
@@ -1018,6 +1103,37 @@ class EuriaiLangGraph:
|
|
1018
1103
|
|
1019
1104
|
return stats
|
1020
1105
|
|
1106
|
+
def get_error_stats(self) -> Dict[str, Any]:
|
1107
|
+
"""Get error statistics for the workflow."""
|
1108
|
+
error_stats = {
|
1109
|
+
"total_errors": 0,
|
1110
|
+
"ai_node_errors": {},
|
1111
|
+
"error_rate": 0.0
|
1112
|
+
}
|
1113
|
+
|
1114
|
+
total_calls = 0
|
1115
|
+
total_errors = 0
|
1116
|
+
|
1117
|
+
# Collect error stats from AI nodes
|
1118
|
+
for name, node in self.ai_nodes.items():
|
1119
|
+
node_errors = node.usage_stats.get("errors", 0)
|
1120
|
+
node_calls = node.usage_stats.get("total_calls", 0)
|
1121
|
+
|
1122
|
+
error_stats["ai_node_errors"][name] = {
|
1123
|
+
"errors": node_errors,
|
1124
|
+
"total_calls": node_calls,
|
1125
|
+
"error_rate": node_errors / max(node_calls, 1)
|
1126
|
+
}
|
1127
|
+
|
1128
|
+
total_errors += node_errors
|
1129
|
+
total_calls += node_calls
|
1130
|
+
|
1131
|
+
error_stats["total_errors"] = total_errors
|
1132
|
+
error_stats["total_calls"] = total_calls
|
1133
|
+
error_stats["error_rate"] = total_errors / max(total_calls, 1)
|
1134
|
+
|
1135
|
+
return error_stats
|
1136
|
+
|
1021
1137
|
def get_graph_structure(self) -> Dict[str, Any]:
|
1022
1138
|
"""Get the structure of the graph."""
|
1023
1139
|
return {
|
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
|
|
2
2
|
|
3
3
|
setup(
|
4
4
|
name="euriai",
|
5
|
-
version="1.0.
|
5
|
+
version="1.0.19",
|
6
6
|
description="Python client for Euri API (euron.one) with CLI, LangChain, and LlamaIndex integration",
|
7
7
|
long_description=open("README.md", encoding="utf-8").read(),
|
8
8
|
long_description_content_type="text/markdown",
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|