euriai 0.3.30__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
euriai/langgraph.py ADDED
@@ -0,0 +1,1012 @@
1
+ """
2
+ Enhanced LangGraph Integration for Euri API
3
+ ==========================================
4
+
5
+ This module provides a comprehensive LangGraph integration with the Euri API,
6
+ including AI-powered workflows, async operations, multi-model support, and
7
+ pre-built workflow patterns for common use cases.
8
+
9
+ Usage:
10
+ from euriai.langgraph_enhanced import EuriaiLangGraph, EuriaiAINode
11
+
12
+ # Create enhanced LangGraph with AI capabilities
13
+ graph = EuriaiLangGraph(
14
+ api_key="your_api_key",
15
+ default_model="gpt-4.1-nano"
16
+ )
17
+
18
+ # Add AI-powered nodes
19
+ graph.add_ai_node("analyzer", "Analyze the input and extract key insights")
20
+ graph.add_ai_node("generator", "Generate a response based on the analysis")
21
+
22
+ # Create workflow
23
+ graph.add_edge("analyzer", "generator")
24
+ graph.set_entry_point("analyzer")
25
+ graph.set_finish_point("generator")
26
+
27
+ # Run workflow
28
+ result = graph.run({"input": "Your text here"})
29
+ print(result)
30
+ """
31
+
32
+ import asyncio
33
+ import json
34
+ import logging
35
+ from typing import (
36
+ Any, Dict, List, Optional, Iterator, AsyncIterator,
37
+ Union, Callable, Sequence, TypeVar, Generic, Tuple
38
+ )
39
+ from concurrent.futures import ThreadPoolExecutor
40
+ import time
41
+ from functools import wraps
42
+ from enum import Enum
43
+
44
+ try:
45
+ from langgraph.graph import StateGraph, START, END
46
+ from langgraph.graph.state import CompiledStateGraph
47
+ from langgraph.constants import Send
48
+ from langgraph.checkpoint.memory import MemorySaver
49
+ from pydantic import BaseModel, Field
50
+ LANGGRAPH_AVAILABLE = True
51
+ except ImportError:
52
+ LANGGRAPH_AVAILABLE = False
53
+ # Fallback classes
54
+ class StateGraph:
55
+ pass
56
+ class CompiledStateGraph:
57
+ pass
58
+ class Send:
59
+ pass
60
+ class MemorySaver:
61
+ pass
62
+ class BaseModel:
63
+ pass
64
+ class Field:
65
+ pass
66
+ START = "START"
67
+ END = "END"
68
+
69
+ from euriai.client import EuriaiClient
70
+ from euriai.embedding import EuriaiEmbeddingClient
71
+
72
+ # Type definitions
73
+ StateType = TypeVar('StateType', bound=Dict[str, Any])
74
+ NodeOutput = Union[Dict[str, Any], List[Dict[str, Any]]]
75
+
76
+
77
+ class WorkflowType(Enum):
78
+ """Predefined workflow types"""
79
+ SEQUENTIAL = "sequential"
80
+ PARALLEL = "parallel"
81
+ CONDITIONAL = "conditional"
82
+ AGENT_WORKFLOW = "agent_workflow"
83
+ RAG_WORKFLOW = "rag_workflow"
84
+ MULTI_AGENT = "multi_agent"
85
+
86
+
87
+ class NodeType(Enum):
88
+ """Types of nodes in the workflow"""
89
+ AI_NODE = "ai_node"
90
+ FUNCTION_NODE = "function_node"
91
+ CONDITION_NODE = "condition_node"
92
+ AGGREGATOR_NODE = "aggregator_node"
93
+ ROUTER_NODE = "router_node"
94
+ EMBEDDING_NODE = "embedding_node"
95
+
96
+
97
+ class EuriaiAINode:
98
+ """
99
+ AI-powered node that uses Euri API for processing.
100
+
101
+ This node can perform various AI tasks like text generation, analysis,
102
+ summarization, and more using the Euri API.
103
+ """
104
+
105
+ def __init__(
106
+ self,
107
+ name: str,
108
+ prompt_template: str,
109
+ api_key: str,
110
+ model: str = "gpt-4.1-nano",
111
+ temperature: float = 0.7,
112
+ max_tokens: int = 1000,
113
+ system_message: Optional[str] = None,
114
+ output_parser: Optional[Callable[[str], Any]] = None,
115
+ error_handler: Optional[Callable[[Exception], Any]] = None
116
+ ):
117
+ """
118
+ Initialize an AI node.
119
+
120
+ Args:
121
+ name: Node name
122
+ prompt_template: Template for generating prompts (can use {variable} placeholders)
123
+ api_key: Euri API key
124
+ model: Model to use for this node
125
+ temperature: Model temperature
126
+ max_tokens: Maximum tokens to generate
127
+ system_message: Optional system message
128
+ output_parser: Function to parse AI output
129
+ error_handler: Function to handle errors
130
+ """
131
+ self.name = name
132
+ self.prompt_template = prompt_template
133
+ self.model = model
134
+ self.temperature = temperature
135
+ self.max_tokens = max_tokens
136
+ self.system_message = system_message
137
+ self.output_parser = output_parser
138
+ self.error_handler = error_handler
139
+
140
+ # Initialize client
141
+ self.client = EuriaiClient(api_key=api_key, model=model)
142
+
143
+ # Usage tracking
144
+ self.usage_stats = {
145
+ "total_calls": 0,
146
+ "total_tokens": 0,
147
+ "errors": 0,
148
+ "avg_response_time": 0.0
149
+ }
150
+
151
+ def __call__(self, state: Dict[str, Any]) -> Dict[str, Any]:
152
+ """Execute the AI node."""
153
+ start_time = time.time()
154
+
155
+ try:
156
+ # Format prompt with state variables
157
+ formatted_prompt = self.prompt_template.format(**state)
158
+
159
+ # Prepare messages
160
+ messages = []
161
+ if self.system_message:
162
+ messages.append({"role": "system", "content": self.system_message})
163
+ messages.append({"role": "user", "content": formatted_prompt})
164
+
165
+ # Make API call
166
+ response = self.client.generate_completion(
167
+ messages=messages,
168
+ temperature=self.temperature,
169
+ max_tokens=self.max_tokens
170
+ )
171
+
172
+ # Extract content
173
+ content = response.get("choices", [{}])[0].get("message", {}).get("content", "")
174
+
175
+ # Parse output if parser provided
176
+ if self.output_parser:
177
+ parsed_output = self.output_parser(content)
178
+ else:
179
+ parsed_output = content
180
+
181
+ # Update usage stats
182
+ self.usage_stats["total_calls"] += 1
183
+ response_time = time.time() - start_time
184
+ self.usage_stats["avg_response_time"] = (
185
+ (self.usage_stats["avg_response_time"] * (self.usage_stats["total_calls"] - 1) + response_time)
186
+ / self.usage_stats["total_calls"]
187
+ )
188
+
189
+ # Update state
190
+ state[f"{self.name}_output"] = parsed_output
191
+ state[f"{self.name}_raw_response"] = content
192
+
193
+ return state
194
+
195
+ except Exception as e:
196
+ self.usage_stats["errors"] += 1
197
+
198
+ if self.error_handler:
199
+ return self.error_handler(e)
200
+ else:
201
+ logging.error(f"Error in AI node {self.name}: {e}")
202
+ state[f"{self.name}_error"] = str(e)
203
+ return state
204
+
205
+ async def acall(self, state: Dict[str, Any]) -> Dict[str, Any]:
206
+ """Async version of the AI node execution."""
207
+ loop = asyncio.get_event_loop()
208
+ return await loop.run_in_executor(None, self.__call__, state)
209
+
210
+
211
+ class EuriaiLangGraph:
212
+ """
213
+ Enhanced LangGraph integration that uses Euri API for AI-powered workflows.
214
+
215
+ This implementation provides:
216
+ - AI-powered nodes with Euri API integration
217
+ - Pre-built workflow patterns
218
+ - Async operations
219
+ - Multi-model support
220
+ - Usage tracking and monitoring
221
+ - Error handling and recovery
222
+ - Workflow visualization and debugging
223
+
224
+ Example:
225
+ graph = EuriaiLangGraph(
226
+ api_key="your_api_key",
227
+ default_model="gpt-4.1-nano"
228
+ )
229
+
230
+ # Add AI nodes
231
+ graph.add_ai_node("analyzer", "Analyze this text: {input}")
232
+ graph.add_ai_node("summarizer", "Summarize: {analyzer_output}")
233
+
234
+ # Create workflow
235
+ graph.add_edge("analyzer", "summarizer")
236
+ graph.set_entry_point("analyzer")
237
+ graph.set_finish_point("summarizer")
238
+
239
+ # Run workflow
240
+ result = graph.run({"input": "Your text here"})
241
+ """
242
+
243
+ def __init__(
244
+ self,
245
+ api_key: str,
246
+ name: str = "EuriaiLangGraph",
247
+ default_model: str = "gpt-4.1-nano",
248
+ default_temperature: float = 0.7,
249
+ default_max_tokens: int = 1000,
250
+ enable_checkpointing: bool = True,
251
+ verbose: bool = True
252
+ ):
253
+ """
254
+ Initialize the enhanced LangGraph.
255
+
256
+ Args:
257
+ api_key: Euri API key
258
+ name: Graph name
259
+ default_model: Default model for AI nodes
260
+ default_temperature: Default temperature
261
+ default_max_tokens: Default max tokens
262
+ enable_checkpointing: Enable workflow checkpointing
263
+ verbose: Enable verbose logging
264
+ """
265
+ if not LANGGRAPH_AVAILABLE:
266
+ raise ImportError(
267
+ "LangGraph is not installed. Please install with: "
268
+ "pip install langgraph"
269
+ )
270
+
271
+ self.api_key = api_key
272
+ self.name = name
273
+ self.default_model = default_model
274
+ self.default_temperature = default_temperature
275
+ self.default_max_tokens = default_max_tokens
276
+ self.verbose = verbose
277
+
278
+ # Initialize graph
279
+ self.graph = StateGraph(dict)
280
+ self.compiled_graph: Optional[CompiledStateGraph] = None
281
+
282
+ # Checkpointing
283
+ self.checkpointer = MemorySaver() if enable_checkpointing else None
284
+
285
+ # Node management
286
+ self.nodes: Dict[str, Any] = {}
287
+ self.ai_nodes: Dict[str, EuriaiAINode] = {}
288
+ self.edges: List[Tuple[str, str]] = []
289
+ self.conditional_edges: List[Dict[str, Any]] = []
290
+
291
+ # Workflow state
292
+ self.entry_point: Optional[str] = None
293
+ self.finish_point: Optional[str] = None
294
+
295
+ # Usage tracking
296
+ self.usage_stats = {
297
+ "total_runs": 0,
298
+ "total_nodes_executed": 0,
299
+ "avg_execution_time": 0.0,
300
+ "errors": 0,
301
+ "successful_runs": 0
302
+ }
303
+
304
+ # Thread pool for async operations
305
+ self._executor = ThreadPoolExecutor(max_workers=4)
306
+
307
+ def add_ai_node(
308
+ self,
309
+ name: str,
310
+ prompt_template: str,
311
+ model: Optional[str] = None,
312
+ temperature: Optional[float] = None,
313
+ max_tokens: Optional[int] = None,
314
+ system_message: Optional[str] = None,
315
+ output_parser: Optional[Callable[[str], Any]] = None,
316
+ error_handler: Optional[Callable[[Exception], Any]] = None
317
+ ) -> None:
318
+ """
319
+ Add an AI-powered node to the graph.
320
+
321
+ Args:
322
+ name: Node name
323
+ prompt_template: Prompt template with {variable} placeholders
324
+ model: Model to use (defaults to graph default)
325
+ temperature: Temperature (defaults to graph default)
326
+ max_tokens: Max tokens (defaults to graph default)
327
+ system_message: System message for the node
328
+ output_parser: Function to parse AI output
329
+ error_handler: Function to handle errors
330
+ """
331
+ ai_node = EuriaiAINode(
332
+ name=name,
333
+ prompt_template=prompt_template,
334
+ api_key=self.api_key,
335
+ model=model or self.default_model,
336
+ temperature=temperature or self.default_temperature,
337
+ max_tokens=max_tokens or self.default_max_tokens,
338
+ system_message=system_message,
339
+ output_parser=output_parser,
340
+ error_handler=error_handler
341
+ )
342
+
343
+ self.ai_nodes[name] = ai_node
344
+ self.nodes[name] = ai_node
345
+ self.graph.add_node(name, ai_node)
346
+
347
+ if self.verbose:
348
+ print(f"Added AI node: {name} (model: {ai_node.model})")
349
+
350
+ def add_function_node(self, name: str, func: Callable[[Dict[str, Any]], Dict[str, Any]]) -> None:
351
+ """
352
+ Add a function node to the graph.
353
+
354
+ Args:
355
+ name: Node name
356
+ func: Function to execute (takes state dict, returns state dict)
357
+ """
358
+ self.nodes[name] = func
359
+ self.graph.add_node(name, func)
360
+
361
+ if self.verbose:
362
+ print(f"Added function node: {name}")
363
+
364
+ def add_condition_node(
365
+ self,
366
+ name: str,
367
+ condition_func: Callable[[Dict[str, Any]], str],
368
+ routes: Dict[str, str]
369
+ ) -> None:
370
+ """
371
+ Add a conditional node that routes based on state.
372
+
373
+ Args:
374
+ name: Node name
375
+ condition_func: Function that returns route key based on state
376
+ routes: Mapping of route keys to target nodes
377
+ """
378
+ def condition_wrapper(state: Dict[str, Any]) -> str:
379
+ route_key = condition_func(state)
380
+ return routes.get(route_key, END)
381
+
382
+ self.nodes[name] = condition_wrapper
383
+ self.graph.add_node(name, condition_wrapper)
384
+
385
+ # Add conditional edges
386
+ for route_key, target_node in routes.items():
387
+ self.graph.add_conditional_edges(
388
+ name,
389
+ condition_wrapper,
390
+ {route_key: target_node}
391
+ )
392
+
393
+ self.conditional_edges.append({
394
+ "source": name,
395
+ "condition": condition_func,
396
+ "routes": routes
397
+ })
398
+
399
+ if self.verbose:
400
+ print(f"Added condition node: {name} with routes: {routes}")
401
+
402
+ def add_embedding_node(
403
+ self,
404
+ name: str,
405
+ embedding_model: str = "text-embedding-3-small",
406
+ batch_size: int = 100
407
+ ) -> None:
408
+ """
409
+ Add an embedding node that generates embeddings for text.
410
+
411
+ Args:
412
+ name: Node name
413
+ embedding_model: Embedding model to use
414
+ batch_size: Batch size for processing
415
+ """
416
+ embedding_client = EuriaiEmbeddingClient(
417
+ api_key=self.api_key,
418
+ model=embedding_model
419
+ )
420
+
421
+ def embedding_func(state: Dict[str, Any]) -> Dict[str, Any]:
422
+ # Get text to embed (can be string or list)
423
+ text_input = state.get(f"{name}_input", state.get("input", ""))
424
+
425
+ if isinstance(text_input, str):
426
+ embedding = embedding_client.embed(text_input)
427
+ state[f"{name}_output"] = embedding.tolist()
428
+ elif isinstance(text_input, list):
429
+ embeddings = embedding_client.embed_batch(text_input)
430
+ state[f"{name}_output"] = [emb.tolist() for emb in embeddings]
431
+ else:
432
+ state[f"{name}_error"] = "Invalid input type for embedding"
433
+
434
+ return state
435
+
436
+ self.nodes[name] = embedding_func
437
+ self.graph.add_node(name, embedding_func)
438
+
439
+ if self.verbose:
440
+ print(f"Added embedding node: {name} (model: {embedding_model})")
441
+
442
+ def add_edge(self, from_node: str, to_node: str) -> None:
443
+ """
444
+ Add an edge between two nodes.
445
+
446
+ Args:
447
+ from_node: Source node name
448
+ to_node: Target node name
449
+ """
450
+ self.graph.add_edge(from_node, to_node)
451
+ self.edges.append((from_node, to_node))
452
+
453
+ if self.verbose:
454
+ print(f"Added edge: {from_node} -> {to_node}")
455
+
456
+ def set_entry_point(self, node_name: str) -> None:
457
+ """
458
+ Set the entry point for the workflow.
459
+
460
+ Args:
461
+ node_name: Name of the starting node
462
+ """
463
+ self.entry_point = node_name
464
+ self.graph.add_edge(START, node_name)
465
+
466
+ if self.verbose:
467
+ print(f"Set entry point: {node_name}")
468
+
469
+ def set_finish_point(self, node_name: str) -> None:
470
+ """
471
+ Set the finish point for the workflow.
472
+
473
+ Args:
474
+ node_name: Name of the ending node
475
+ """
476
+ self.finish_point = node_name
477
+ self.graph.add_edge(node_name, END)
478
+
479
+ if self.verbose:
480
+ print(f"Set finish point: {node_name}")
481
+
482
+ def compile_graph(self) -> CompiledStateGraph:
483
+ """
484
+ Compile the graph for execution.
485
+
486
+ Returns:
487
+ Compiled graph ready for execution
488
+ """
489
+ self.compiled_graph = self.graph.compile(
490
+ checkpointer=self.checkpointer,
491
+ debug=self.verbose
492
+ )
493
+
494
+ if self.verbose:
495
+ print("Graph compiled successfully")
496
+
497
+ return self.compiled_graph
498
+
499
+ def run(
500
+ self,
501
+ input_state: Dict[str, Any],
502
+ config: Optional[Dict[str, Any]] = None
503
+ ) -> Dict[str, Any]:
504
+ """
505
+ Run the workflow with the given input state.
506
+
507
+ Args:
508
+ input_state: Initial state for the workflow
509
+ config: Optional configuration for the run
510
+
511
+ Returns:
512
+ Final state after workflow execution
513
+ """
514
+ start_time = time.time()
515
+
516
+ try:
517
+ # Compile graph if not already compiled
518
+ if self.compiled_graph is None:
519
+ self.compile_graph()
520
+
521
+ # Execute workflow
522
+ result = self.compiled_graph.invoke(input_state, config=config)
523
+
524
+ # Update usage stats
525
+ self.usage_stats["total_runs"] += 1
526
+ self.usage_stats["successful_runs"] += 1
527
+ execution_time = time.time() - start_time
528
+ self.usage_stats["avg_execution_time"] = (
529
+ (self.usage_stats["avg_execution_time"] * (self.usage_stats["total_runs"] - 1) + execution_time)
530
+ / self.usage_stats["total_runs"]
531
+ )
532
+
533
+ if self.verbose:
534
+ print(f"Workflow completed in {execution_time:.2f}s")
535
+
536
+ return result
537
+
538
+ except Exception as e:
539
+ self.usage_stats["errors"] += 1
540
+ logging.error(f"Error running workflow: {e}")
541
+ raise
542
+
543
+ async def arun(
544
+ self,
545
+ input_state: Dict[str, Any],
546
+ config: Optional[Dict[str, Any]] = None
547
+ ) -> Dict[str, Any]:
548
+ """
549
+ Async version of workflow execution.
550
+
551
+ Args:
552
+ input_state: Initial state for the workflow
553
+ config: Optional configuration for the run
554
+
555
+ Returns:
556
+ Final state after workflow execution
557
+ """
558
+ start_time = time.time()
559
+
560
+ try:
561
+ # Compile graph if not already compiled
562
+ if self.compiled_graph is None:
563
+ self.compile_graph()
564
+
565
+ # Execute workflow asynchronously
566
+ result = await self.compiled_graph.ainvoke(input_state, config=config)
567
+
568
+ # Update usage stats
569
+ self.usage_stats["total_runs"] += 1
570
+ self.usage_stats["successful_runs"] += 1
571
+ execution_time = time.time() - start_time
572
+ self.usage_stats["avg_execution_time"] = (
573
+ (self.usage_stats["avg_execution_time"] * (self.usage_stats["total_runs"] - 1) + execution_time)
574
+ / self.usage_stats["total_runs"]
575
+ )
576
+
577
+ if self.verbose:
578
+ print(f"Async workflow completed in {execution_time:.2f}s")
579
+
580
+ return result
581
+
582
+ except Exception as e:
583
+ self.usage_stats["errors"] += 1
584
+ logging.error(f"Error running async workflow: {e}")
585
+ raise
586
+
587
+ def stream(
588
+ self,
589
+ input_state: Dict[str, Any],
590
+ config: Optional[Dict[str, Any]] = None
591
+ ) -> Iterator[Dict[str, Any]]:
592
+ """
593
+ Stream workflow execution results.
594
+
595
+ Args:
596
+ input_state: Initial state for the workflow
597
+ config: Optional configuration for the run
598
+
599
+ Yields:
600
+ Intermediate states during workflow execution
601
+ """
602
+ if self.compiled_graph is None:
603
+ self.compile_graph()
604
+
605
+ for chunk in self.compiled_graph.stream(input_state, config=config):
606
+ yield chunk
607
+
608
+ async def astream(
609
+ self,
610
+ input_state: Dict[str, Any],
611
+ config: Optional[Dict[str, Any]] = None
612
+ ) -> AsyncIterator[Dict[str, Any]]:
613
+ """
614
+ Async stream workflow execution results.
615
+
616
+ Args:
617
+ input_state: Initial state for the workflow
618
+ config: Optional configuration for the run
619
+
620
+ Yields:
621
+ Intermediate states during workflow execution
622
+ """
623
+ if self.compiled_graph is None:
624
+ self.compile_graph()
625
+
626
+ async for chunk in self.compiled_graph.astream(input_state, config=config):
627
+ yield chunk
628
+
629
+ def create_workflow_pattern(self, pattern_type: WorkflowType, **kwargs) -> None:
630
+ """
631
+ Create a pre-defined workflow pattern.
632
+
633
+ Args:
634
+ pattern_type: Type of workflow pattern to create
635
+ **kwargs: Pattern-specific arguments
636
+ """
637
+ if pattern_type == WorkflowType.SEQUENTIAL:
638
+ self._create_sequential_workflow(**kwargs)
639
+ elif pattern_type == WorkflowType.PARALLEL:
640
+ self._create_parallel_workflow(**kwargs)
641
+ elif pattern_type == WorkflowType.CONDITIONAL:
642
+ self._create_conditional_workflow(**kwargs)
643
+ elif pattern_type == WorkflowType.AGENT_WORKFLOW:
644
+ self._create_agent_workflow(**kwargs)
645
+ elif pattern_type == WorkflowType.RAG_WORKFLOW:
646
+ self._create_rag_workflow(**kwargs)
647
+ elif pattern_type == WorkflowType.MULTI_AGENT:
648
+ self._create_multi_agent_workflow(**kwargs)
649
+ else:
650
+ raise ValueError(f"Unknown workflow pattern: {pattern_type}")
651
+
652
+ def _create_sequential_workflow(self, steps: List[Dict[str, Any]]) -> None:
653
+ """Create a sequential workflow pattern."""
654
+ previous_node = None
655
+
656
+ for i, step in enumerate(steps):
657
+ node_name = step.get("name", f"step_{i}")
658
+
659
+ if step["type"] == "ai":
660
+ self.add_ai_node(
661
+ node_name,
662
+ step["prompt_template"],
663
+ model=step.get("model"),
664
+ temperature=step.get("temperature"),
665
+ max_tokens=step.get("max_tokens")
666
+ )
667
+ elif step["type"] == "function":
668
+ self.add_function_node(node_name, step["function"])
669
+
670
+ if i == 0:
671
+ self.set_entry_point(node_name)
672
+
673
+ if previous_node:
674
+ self.add_edge(previous_node, node_name)
675
+
676
+ previous_node = node_name
677
+
678
+ if previous_node:
679
+ self.set_finish_point(previous_node)
680
+
681
+ def _create_parallel_workflow(self, parallel_nodes: List[Dict[str, Any]], aggregator: Dict[str, Any]) -> None:
682
+ """Create a parallel workflow pattern."""
683
+ # Create dispatcher node
684
+ def dispatcher(state: Dict[str, Any]) -> List[Send]:
685
+ return [Send(node["name"], state) for node in parallel_nodes]
686
+
687
+ self.add_function_node("dispatcher", dispatcher)
688
+ self.set_entry_point("dispatcher")
689
+
690
+ # Create parallel nodes
691
+ for node in parallel_nodes:
692
+ if node["type"] == "ai":
693
+ self.add_ai_node(
694
+ node["name"],
695
+ node["prompt_template"],
696
+ model=node.get("model"),
697
+ temperature=node.get("temperature")
698
+ )
699
+ elif node["type"] == "function":
700
+ self.add_function_node(node["name"], node["function"])
701
+
702
+ # Connect to aggregator
703
+ self.add_edge(node["name"], aggregator["name"])
704
+
705
+ # Create aggregator node
706
+ if aggregator["type"] == "ai":
707
+ self.add_ai_node(
708
+ aggregator["name"],
709
+ aggregator["prompt_template"],
710
+ model=aggregator.get("model")
711
+ )
712
+ elif aggregator["type"] == "function":
713
+ self.add_function_node(aggregator["name"], aggregator["function"])
714
+
715
+ self.set_finish_point(aggregator["name"])
716
+
717
+ def _create_conditional_workflow(self, condition: Dict[str, Any], branches: Dict[str, List[Dict[str, Any]]]) -> None:
718
+ """Create a conditional workflow pattern."""
719
+ # Create condition node
720
+ self.add_condition_node(
721
+ condition["name"],
722
+ condition["function"],
723
+ {key: f"{key}_start" for key in branches.keys()}
724
+ )
725
+ self.set_entry_point(condition["name"])
726
+
727
+ # Create branches
728
+ for branch_name, steps in branches.items():
729
+ previous_node = None
730
+
731
+ for i, step in enumerate(steps):
732
+ node_name = f"{branch_name}_{step.get('name', f'step_{i}')}"
733
+
734
+ if step["type"] == "ai":
735
+ self.add_ai_node(
736
+ node_name,
737
+ step["prompt_template"],
738
+ model=step.get("model")
739
+ )
740
+ elif step["type"] == "function":
741
+ self.add_function_node(node_name, step["function"])
742
+
743
+ if i == 0:
744
+ # This is the start of the branch
745
+ self.add_function_node(f"{branch_name}_start", lambda state: state)
746
+ self.add_edge(f"{branch_name}_start", node_name)
747
+
748
+ if previous_node:
749
+ self.add_edge(previous_node, node_name)
750
+
751
+ previous_node = node_name
752
+
753
+ # Connect last node to END
754
+ if previous_node:
755
+ self.set_finish_point(previous_node)
756
+
757
+ def _create_agent_workflow(self, agent_config: Dict[str, Any]) -> None:
758
+ """Create an agent workflow pattern."""
759
+ # Planning node
760
+ self.add_ai_node(
761
+ "planner",
762
+ agent_config.get("planning_prompt", "Create a plan to solve: {input}"),
763
+ model=agent_config.get("planning_model", self.default_model)
764
+ )
765
+
766
+ # Execution node
767
+ self.add_ai_node(
768
+ "executor",
769
+ agent_config.get("execution_prompt", "Execute this plan: {planner_output}"),
770
+ model=agent_config.get("execution_model", self.default_model)
771
+ )
772
+
773
+ # Evaluation node
774
+ self.add_ai_node(
775
+ "evaluator",
776
+ agent_config.get("evaluation_prompt", "Evaluate the result: {executor_output}"),
777
+ model=agent_config.get("evaluation_model", self.default_model)
778
+ )
779
+
780
+ # Create workflow
781
+ self.set_entry_point("planner")
782
+ self.add_edge("planner", "executor")
783
+ self.add_edge("executor", "evaluator")
784
+ self.set_finish_point("evaluator")
785
+
786
+ def _create_rag_workflow(self, rag_config: Dict[str, Any]) -> None:
787
+ """Create a RAG (Retrieval-Augmented Generation) workflow pattern."""
788
+ # Embedding node for query
789
+ self.add_embedding_node(
790
+ "query_embedder",
791
+ embedding_model=rag_config.get("embedding_model", "text-embedding-3-small")
792
+ )
793
+
794
+ # Retrieval node (function that uses embeddings to find relevant docs)
795
+ def retrieval_func(state: Dict[str, Any]) -> Dict[str, Any]:
796
+ # This would typically interface with a vector database
797
+ # For now, we'll use a simple placeholder
798
+ query_embedding = state.get("query_embedder_output", [])
799
+ # Simulate retrieval
800
+ state["retrieved_docs"] = rag_config.get("sample_docs", ["Sample document content"])
801
+ return state
802
+
803
+ self.add_function_node("retriever", retrieval_func)
804
+
805
+ # Generation node with context
806
+ self.add_ai_node(
807
+ "generator",
808
+ rag_config.get(
809
+ "generation_prompt",
810
+ "Based on these documents: {retrieved_docs}\n\nAnswer the question: {input}"
811
+ ),
812
+ model=rag_config.get("generation_model", self.default_model)
813
+ )
814
+
815
+ # Create workflow
816
+ self.set_entry_point("query_embedder")
817
+ self.add_edge("query_embedder", "retriever")
818
+ self.add_edge("retriever", "generator")
819
+ self.set_finish_point("generator")
820
+
821
+ def _create_multi_agent_workflow(self, agents: List[Dict[str, Any]]) -> None:
822
+ """Create a multi-agent workflow pattern."""
823
+ # Create agent nodes
824
+ for agent in agents:
825
+ self.add_ai_node(
826
+ agent["name"],
827
+ agent["prompt_template"],
828
+ model=agent.get("model", self.default_model),
829
+ system_message=agent.get("system_message")
830
+ )
831
+
832
+ # Create orchestrator
833
+ def orchestrator(state: Dict[str, Any]) -> str:
834
+ # Simple round-robin orchestration
835
+ # In practice, this would be more sophisticated
836
+ current_agent = state.get("current_agent", 0)
837
+ next_agent = (current_agent + 1) % len(agents)
838
+ state["current_agent"] = next_agent
839
+ return agents[next_agent]["name"]
840
+
841
+ self.add_condition_node(
842
+ "orchestrator",
843
+ orchestrator,
844
+ {agent["name"]: agent["name"] for agent in agents}
845
+ )
846
+
847
+ # Connect agents back to orchestrator
848
+ for agent in agents:
849
+ self.add_edge(agent["name"], "orchestrator")
850
+
851
+ self.set_entry_point("orchestrator")
852
+
853
+ def get_usage_stats(self) -> Dict[str, Any]:
854
+ """Get usage statistics for the workflow."""
855
+ stats = self.usage_stats.copy()
856
+
857
+ # Add AI node stats
858
+ stats["ai_nodes"] = {}
859
+ for name, node in self.ai_nodes.items():
860
+ stats["ai_nodes"][name] = node.usage_stats.copy()
861
+
862
+ return stats
863
+
864
+ def get_graph_structure(self) -> Dict[str, Any]:
865
+ """Get the structure of the graph."""
866
+ return {
867
+ "nodes": list(self.nodes.keys()),
868
+ "ai_nodes": list(self.ai_nodes.keys()),
869
+ "edges": self.edges,
870
+ "conditional_edges": self.conditional_edges,
871
+ "entry_point": self.entry_point,
872
+ "finish_point": self.finish_point
873
+ }
874
+
875
+ def visualize_graph(self) -> str:
876
+ """
877
+ Generate a simple text visualization of the graph.
878
+
879
+ Returns:
880
+ Text representation of the graph structure
881
+ """
882
+ lines = []
883
+ lines.append(f"Graph: {self.name}")
884
+ lines.append("=" * 50)
885
+ lines.append(f"Entry Point: {self.entry_point}")
886
+ lines.append(f"Finish Point: {self.finish_point}")
887
+ lines.append("")
888
+
889
+ lines.append("Nodes:")
890
+ for name, node in self.nodes.items():
891
+ node_type = "AI" if name in self.ai_nodes else "Function"
892
+ lines.append(f" - {name} ({node_type})")
893
+
894
+ lines.append("")
895
+ lines.append("Edges:")
896
+ for from_node, to_node in self.edges:
897
+ lines.append(f" {from_node} -> {to_node}")
898
+
899
+ if self.conditional_edges:
900
+ lines.append("")
901
+ lines.append("Conditional Edges:")
902
+ for edge in self.conditional_edges:
903
+ lines.append(f" {edge['source']} -> {edge['routes']}")
904
+
905
+ return "\n".join(lines)
906
+
907
+ def reset(self) -> None:
908
+ """Reset the graph to initial state."""
909
+ self.graph = StateGraph(dict)
910
+ self.compiled_graph = None
911
+ self.nodes = {}
912
+ self.ai_nodes = {}
913
+ self.edges = []
914
+ self.conditional_edges = []
915
+ self.entry_point = None
916
+ self.finish_point = None
917
+
918
+ # Reset usage stats
919
+ self.usage_stats = {
920
+ "total_runs": 0,
921
+ "total_nodes_executed": 0,
922
+ "avg_execution_time": 0.0,
923
+ "errors": 0,
924
+ "successful_runs": 0
925
+ }
926
+
927
+ if self.verbose:
928
+ print("Graph reset")
929
+
930
+ def update_model(self, node_name: str, model: str) -> None:
931
+ """
932
+ Update the model for a specific AI node.
933
+
934
+ Args:
935
+ node_name: Name of the AI node
936
+ model: New model to use
937
+ """
938
+ if node_name in self.ai_nodes:
939
+ self.ai_nodes[node_name].model = model
940
+ self.ai_nodes[node_name].client = EuriaiClient(
941
+ api_key=self.api_key,
942
+ model=model
943
+ )
944
+
945
+ if self.verbose:
946
+ print(f"Updated model for {node_name}: {model}")
947
+ else:
948
+ raise ValueError(f"AI node {node_name} not found")
949
+
950
+
951
+ # Helper functions for common patterns
952
+ def create_simple_workflow(
953
+ api_key: str,
954
+ steps: List[Dict[str, Any]],
955
+ name: str = "SimpleWorkflow"
956
+ ) -> EuriaiLangGraph:
957
+ """
958
+ Create a simple sequential workflow.
959
+
960
+ Args:
961
+ api_key: Euri API key
962
+ steps: List of step configurations
963
+ name: Workflow name
964
+
965
+ Returns:
966
+ Configured EuriaiLangGraph
967
+ """
968
+ graph = EuriaiLangGraph(api_key=api_key, name=name)
969
+ graph.create_workflow_pattern(WorkflowType.SEQUENTIAL, steps=steps)
970
+ return graph
971
+
972
+
973
+ def create_agent_workflow(
974
+ api_key: str,
975
+ agent_config: Dict[str, Any],
976
+ name: str = "AgentWorkflow"
977
+ ) -> EuriaiLangGraph:
978
+ """
979
+ Create an agent-based workflow.
980
+
981
+ Args:
982
+ api_key: Euri API key
983
+ agent_config: Agent configuration
984
+ name: Workflow name
985
+
986
+ Returns:
987
+ Configured EuriaiLangGraph
988
+ """
989
+ graph = EuriaiLangGraph(api_key=api_key, name=name)
990
+ graph.create_workflow_pattern(WorkflowType.AGENT_WORKFLOW, agent_config=agent_config)
991
+ return graph
992
+
993
+
994
+ def create_rag_workflow(
995
+ api_key: str,
996
+ rag_config: Dict[str, Any],
997
+ name: str = "RAGWorkflow"
998
+ ) -> EuriaiLangGraph:
999
+ """
1000
+ Create a RAG (Retrieval-Augmented Generation) workflow.
1001
+
1002
+ Args:
1003
+ api_key: Euri API key
1004
+ rag_config: RAG configuration
1005
+ name: Workflow name
1006
+
1007
+ Returns:
1008
+ Configured EuriaiLangGraph
1009
+ """
1010
+ graph = EuriaiLangGraph(api_key=api_key, name=name)
1011
+ graph.create_workflow_pattern(WorkflowType.RAG_WORKFLOW, rag_config=rag_config)
1012
+ return graph