genxai-framework 0.1.0__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. cli/commands/__init__.py +3 -1
  2. cli/commands/connector.py +309 -0
  3. cli/commands/workflow.py +80 -0
  4. cli/main.py +3 -1
  5. genxai/__init__.py +33 -0
  6. genxai/agents/__init__.py +8 -0
  7. genxai/agents/presets.py +53 -0
  8. genxai/connectors/__init__.py +10 -0
  9. genxai/connectors/config_store.py +106 -0
  10. genxai/connectors/github.py +117 -0
  11. genxai/connectors/google_workspace.py +124 -0
  12. genxai/connectors/jira.py +108 -0
  13. genxai/connectors/notion.py +97 -0
  14. genxai/connectors/slack.py +121 -0
  15. genxai/core/agent/config_io.py +32 -1
  16. genxai/core/agent/runtime.py +41 -4
  17. genxai/core/graph/__init__.py +3 -0
  18. genxai/core/graph/engine.py +218 -11
  19. genxai/core/graph/executor.py +103 -10
  20. genxai/core/graph/nodes.py +28 -0
  21. genxai/core/graph/workflow_io.py +199 -0
  22. genxai/flows/__init__.py +33 -0
  23. genxai/flows/auction.py +66 -0
  24. genxai/flows/base.py +134 -0
  25. genxai/flows/conditional.py +45 -0
  26. genxai/flows/coordinator_worker.py +62 -0
  27. genxai/flows/critic_review.py +62 -0
  28. genxai/flows/ensemble_voting.py +49 -0
  29. genxai/flows/loop.py +42 -0
  30. genxai/flows/map_reduce.py +61 -0
  31. genxai/flows/p2p.py +146 -0
  32. genxai/flows/parallel.py +27 -0
  33. genxai/flows/round_robin.py +24 -0
  34. genxai/flows/router.py +45 -0
  35. genxai/flows/selector.py +63 -0
  36. genxai/flows/subworkflow.py +35 -0
  37. genxai/llm/factory.py +17 -10
  38. genxai/llm/providers/anthropic.py +116 -1
  39. genxai/tools/builtin/__init__.py +3 -0
  40. genxai/tools/builtin/communication/human_input.py +32 -0
  41. genxai/tools/custom/test-2.py +19 -0
  42. genxai/tools/custom/test_tool_ui.py +9 -0
  43. genxai/tools/persistence/service.py +3 -3
  44. genxai/utils/tokens.py +6 -0
  45. {genxai_framework-0.1.0.dist-info → genxai_framework-0.1.1.dist-info}/METADATA +63 -12
  46. {genxai_framework-0.1.0.dist-info → genxai_framework-0.1.1.dist-info}/RECORD +50 -21
  47. {genxai_framework-0.1.0.dist-info → genxai_framework-0.1.1.dist-info}/WHEEL +0 -0
  48. {genxai_framework-0.1.0.dist-info → genxai_framework-0.1.1.dist-info}/entry_points.txt +0 -0
  49. {genxai_framework-0.1.0.dist-info → genxai_framework-0.1.1.dist-info}/licenses/LICENSE +0 -0
  50. {genxai_framework-0.1.0.dist-info → genxai_framework-0.1.1.dist-info}/top_level.txt +0 -0
genxai/flows/router.py ADDED
@@ -0,0 +1,45 @@
1
+ """Rule-based routing flow orchestrator."""
2
+
3
+ from typing import Callable, List
4
+
5
+ from genxai.core.graph.engine import Graph
6
+ from genxai.core.graph.edges import Edge
7
+ from genxai.core.graph.nodes import AgentNode, InputNode, OutputNode
8
+ from genxai.flows.base import FlowOrchestrator
9
+
10
+
11
+ class RouterFlow(FlowOrchestrator):
12
+ """Route to an agent based on deterministic routing rules."""
13
+
14
+ def __init__(
15
+ self,
16
+ agents: List,
17
+ router: Callable[[dict], str],
18
+ name: str = "router_flow",
19
+ llm_provider=None,
20
+ ) -> None:
21
+ super().__init__(agents=agents, name=name, llm_provider=llm_provider)
22
+ self.router = router
23
+
24
+ def build_graph(self) -> Graph:
25
+ graph = Graph(name=self.name)
26
+ start = InputNode(id="input")
27
+ graph.add_node(start)
28
+
29
+ end = OutputNode(id="output")
30
+ graph.add_node(end)
31
+ nodes = self._agent_nodes()
32
+ for node in nodes:
33
+ graph.add_node(node)
34
+ graph.add_edge(
35
+ Edge(
36
+ source=start.id,
37
+ target=node.id,
38
+ condition=lambda state, agent_id=node.id: self.router(state) == agent_id,
39
+ )
40
+ )
41
+ graph.add_edge(Edge(source=node.id, target=end.id))
42
+
43
+ graph.add_edge(Edge(source=start.id, target=end.id))
44
+
45
+ return graph
@@ -0,0 +1,63 @@
1
+ """Selector-based flow orchestrator."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any, Callable, Dict, List, Optional
6
+
7
+ from genxai.core.graph.engine import Graph
8
+ from genxai.core.graph.nodes import AgentNode
9
+ from genxai.flows.base import FlowOrchestrator
10
+
11
+
12
+ class SelectorFlow(FlowOrchestrator):
13
+ """Route to the next agent using a selector function.
14
+
15
+ The selector function returns the agent ID to execute next.
16
+ """
17
+
18
+ def __init__(
19
+ self,
20
+ agents: List[Any],
21
+ selector: Callable[[Dict[str, Any], List[str]], str],
22
+ name: str = "selector_flow",
23
+ llm_provider: Any = None,
24
+ max_hops: int = 1,
25
+ ) -> None:
26
+ super().__init__(agents=agents, name=name, llm_provider=llm_provider)
27
+ self.selector = selector
28
+ self.max_hops = max_hops
29
+
30
+ def build_graph(self) -> Graph:
31
+ graph = Graph(name=self.name)
32
+ nodes: List[AgentNode] = self._agent_nodes()
33
+
34
+ for node in nodes:
35
+ graph.add_node(node)
36
+
37
+ return graph
38
+
39
+ async def run(
40
+ self,
41
+ input_data: Any,
42
+ state: Optional[Dict[str, Any]] = None,
43
+ max_iterations: int = 100,
44
+ ) -> Dict[str, Any]:
45
+ graph = self.build_graph()
46
+ if state is None:
47
+ state = {}
48
+
49
+ state["input"] = input_data
50
+ agent_ids = [agent.id for agent in self.agents]
51
+
52
+ for hop in range(self.max_hops):
53
+ selected = self.selector(state, agent_ids)
54
+ if selected not in agent_ids:
55
+ raise ValueError(
56
+ f"SelectorFlow returned unknown agent id '{selected}'."
57
+ )
58
+
59
+ state["next_agent"] = selected
60
+ state["selector_hop"] = hop + 1
61
+ await graph._execute_node(selected, state, max_iterations)
62
+
63
+ return state
@@ -0,0 +1,35 @@
1
+ """Subworkflow flow orchestrator."""
2
+
3
+ from typing import Any, Dict, Optional
4
+
5
+ from genxai.core.graph.engine import Graph
6
+ from genxai.flows.base import FlowOrchestrator
7
+
8
+
9
+ class SubworkflowFlow(FlowOrchestrator):
10
+ """Execute a pre-built subgraph as a flow."""
11
+
12
+ def __init__(
13
+ self,
14
+ graph: Graph,
15
+ name: str = "subworkflow_flow",
16
+ llm_provider: Any = None,
17
+ ) -> None:
18
+ super().__init__(agents=[], name=name, llm_provider=llm_provider, allow_empty_agents=True)
19
+ self.graph = graph
20
+
21
+ def build_graph(self) -> Graph:
22
+ return self.graph
23
+
24
+ async def run(
25
+ self,
26
+ input_data: Any,
27
+ state: Optional[Dict[str, Any]] = None,
28
+ max_iterations: int = 100,
29
+ ) -> Dict[str, Any]:
30
+ return await self.graph.run(
31
+ input_data=input_data,
32
+ state=state,
33
+ max_iterations=max_iterations,
34
+ llm_provider=self.llm_provider,
35
+ )
genxai/llm/factory.py CHANGED
@@ -37,6 +37,11 @@ class LLMProviderFactory:
37
37
  "claude-3-opus": "genxai.llm.providers.anthropic.AnthropicProvider",
38
38
  "claude-3-sonnet": "genxai.llm.providers.anthropic.AnthropicProvider",
39
39
  "claude-3-haiku": "genxai.llm.providers.anthropic.AnthropicProvider",
40
+ "claude-3-5-sonnet-20241022": "genxai.llm.providers.anthropic.AnthropicProvider",
41
+ "claude-3-5-sonnet-20240620": "genxai.llm.providers.anthropic.AnthropicProvider",
42
+ "claude-3-opus-20240229": "genxai.llm.providers.anthropic.AnthropicProvider",
43
+ "claude-3-sonnet-20240229": "genxai.llm.providers.anthropic.AnthropicProvider",
44
+ "claude-3-haiku-20240307": "genxai.llm.providers.anthropic.AnthropicProvider",
40
45
  "google": "genxai.llm.providers.google.GoogleProvider",
41
46
  "gemini-pro": "genxai.llm.providers.google.GoogleProvider",
42
47
  "gemini-ultra": "genxai.llm.providers.google.GoogleProvider",
@@ -235,41 +240,43 @@ class LLMProviderFactory:
235
240
  Returns:
236
241
  Provider class or None
237
242
  """
243
+ model_key = model.lower()
244
+
238
245
  # Direct match in pre-loaded providers
239
- if model in cls._providers:
240
- return cls._providers[model]
246
+ if model_key in cls._providers:
247
+ return cls._providers[model_key]
241
248
 
242
249
  # Check lazy-loaded providers
243
- if model in cls._provider_modules:
244
- provider_class = cls._load_provider_class(cls._provider_modules[model])
250
+ if model_key in cls._provider_modules:
251
+ provider_class = cls._load_provider_class(cls._provider_modules[model_key])
245
252
  if provider_class:
246
253
  # Cache it for future use
247
- cls._providers[model] = provider_class
254
+ cls._providers[model_key] = provider_class
248
255
  return provider_class
249
256
 
250
257
  # Check if model starts with known provider prefix
251
- model_lower = model.lower()
258
+ model_lower = model_key
252
259
  if model_lower.startswith("gpt"):
253
260
  return OpenAIProvider
254
261
  elif model_lower.startswith("claude"):
255
262
  provider_class = cls._load_provider_class("genxai.llm.providers.anthropic.AnthropicProvider")
256
263
  if provider_class:
257
- cls._providers[model] = provider_class
264
+ cls._providers[model_key] = provider_class
258
265
  return provider_class
259
266
  elif model_lower.startswith("gemini"):
260
267
  provider_class = cls._load_provider_class("genxai.llm.providers.google.GoogleProvider")
261
268
  if provider_class:
262
- cls._providers[model] = provider_class
269
+ cls._providers[model_key] = provider_class
263
270
  return provider_class
264
271
  elif model_lower.startswith("command"):
265
272
  provider_class = cls._load_provider_class("genxai.llm.providers.cohere.CohereProvider")
266
273
  if provider_class:
267
- cls._providers[model] = provider_class
274
+ cls._providers[model_key] = provider_class
268
275
  return provider_class
269
276
  elif model_lower.startswith("llama") or model_lower.startswith("mistral") or model_lower.startswith("phi"):
270
277
  provider_class = cls._load_provider_class("genxai.llm.providers.ollama.OllamaProvider")
271
278
  if provider_class:
272
- cls._providers[model] = provider_class
279
+ cls._providers[model_key] = provider_class
273
280
  return provider_class
274
281
 
275
282
  return None
@@ -12,6 +12,23 @@ logger = logging.getLogger(__name__)
12
12
  class AnthropicProvider(LLMProvider):
13
13
  """Anthropic Claude LLM provider."""
14
14
 
15
+ _MODEL_ALIASES = {
16
+ # Claude 4.5 models
17
+ "claude-sonnet-4-5": "claude-sonnet-4-5-20250929",
18
+ "claude-haiku-4-5": "claude-haiku-4-5-20251001",
19
+ "claude-opus-4-5": "claude-opus-4-5-20251101",
20
+ # Claude 4 models
21
+ "claude-sonnet-4": "claude-sonnet-4-20250514",
22
+ "claude-opus-4": "claude-opus-4-20250514",
23
+ "claude-opus-4-1": "claude-opus-4-1-20250805",
24
+ # Claude 3.5 models
25
+ "claude-3-5-sonnet": "claude-3-5-sonnet-20241022",
26
+ # Claude 3 models
27
+ "claude-3-opus": "claude-3-opus-20240229",
28
+ "claude-3-sonnet": "claude-3-sonnet-20240229",
29
+ "claude-3-haiku": "claude-3-haiku-20240307",
30
+ }
31
+
15
32
  def __init__(
16
33
  self,
17
34
  model: str = "claude-3-opus-20240229",
@@ -29,7 +46,9 @@ class AnthropicProvider(LLMProvider):
29
46
  max_tokens: Maximum tokens to generate
30
47
  **kwargs: Additional Anthropic-specific parameters
31
48
  """
32
- super().__init__(model, temperature, max_tokens, **kwargs)
49
+ resolved_model = self._normalize_model(model)
50
+ super().__init__(resolved_model, temperature, max_tokens, **kwargs)
51
+ self.requested_model = model
33
52
 
34
53
  self.api_key = api_key or os.getenv("ANTHROPIC_API_KEY")
35
54
  if not self.api_key:
@@ -126,6 +145,35 @@ class AnthropicProvider(LLMProvider):
126
145
  )
127
146
 
128
147
  except Exception as e:
148
+ if self._is_model_not_found_error(e):
149
+ fallback_model = self._fallback_model(self.model)
150
+ if fallback_model and fallback_model != self.model:
151
+ logger.warning(
152
+ "Anthropic model '%s' not found. Falling back to '%s'.",
153
+ self.model,
154
+ fallback_model,
155
+ )
156
+ self.model = fallback_model
157
+ params["model"] = fallback_model
158
+ response = await self._client.messages.create(**params)
159
+ content = response.content[0].text if response.content else ""
160
+ finish_reason = response.stop_reason
161
+ usage = {
162
+ "prompt_tokens": response.usage.input_tokens if response.usage else 0,
163
+ "completion_tokens": response.usage.output_tokens if response.usage else 0,
164
+ "total_tokens": (
165
+ (response.usage.input_tokens + response.usage.output_tokens)
166
+ if response.usage else 0
167
+ ),
168
+ }
169
+ self._update_stats(usage)
170
+ return LLMResponse(
171
+ content=content,
172
+ model=response.model,
173
+ usage=usage,
174
+ finish_reason=finish_reason,
175
+ metadata={"response_id": response.id, "type": response.type},
176
+ )
129
177
  logger.error(f"Anthropic API call failed: {e}")
130
178
  raise
131
179
 
@@ -245,5 +293,72 @@ class AnthropicProvider(LLMProvider):
245
293
  )
246
294
 
247
295
  except Exception as e:
296
+ if self._is_model_not_found_error(e):
297
+ fallback_model = self._fallback_model(self.model)
298
+ if fallback_model and fallback_model != self.model:
299
+ logger.warning(
300
+ "Anthropic model '%s' not found. Falling back to '%s'.",
301
+ self.model,
302
+ fallback_model,
303
+ )
304
+ self.model = fallback_model
305
+ params["model"] = fallback_model
306
+ response = await self._client.messages.create(**params)
307
+ content = response.content[0].text if response.content else ""
308
+ finish_reason = response.stop_reason
309
+ usage = {
310
+ "prompt_tokens": response.usage.input_tokens if response.usage else 0,
311
+ "completion_tokens": response.usage.output_tokens if response.usage else 0,
312
+ "total_tokens": (
313
+ (response.usage.input_tokens + response.usage.output_tokens)
314
+ if response.usage else 0
315
+ ),
316
+ }
317
+ self._update_stats(usage)
318
+ return LLMResponse(
319
+ content=content,
320
+ model=response.model,
321
+ usage=usage,
322
+ finish_reason=finish_reason,
323
+ metadata={"response_id": response.id, "type": response.type},
324
+ )
248
325
  logger.error(f"Anthropic chat API call failed: {e}")
249
326
  raise
327
+
328
+ @classmethod
329
+ def _normalize_model(cls, model: str) -> str:
330
+ model_key = model.strip().lower()
331
+ return cls._MODEL_ALIASES.get(model_key, model)
332
+
333
+ @staticmethod
334
+ def _is_model_not_found_error(error: Exception) -> bool:
335
+ message = str(error).lower()
336
+ return "not_found_error" in message or "model:" in message
337
+
338
+ @staticmethod
339
+ def _fallback_model(model: str) -> Optional[str]:
340
+ model_lower = model.lower()
341
+ # Claude 4.5 fallbacks
342
+ if model_lower.startswith("claude-sonnet-4-5") or model_lower.startswith("claude-opus-4-5"):
343
+ return "claude-sonnet-4-20250514"
344
+ if model_lower.startswith("claude-haiku-4-5"):
345
+ return "claude-haiku-4-5-20251001"
346
+ # Claude 4 fallbacks
347
+ if model_lower.startswith("claude-opus-4"):
348
+ return "claude-sonnet-4-20250514"
349
+ if model_lower.startswith("claude-sonnet-4"):
350
+ return "claude-3-5-sonnet-20241022"
351
+ # Claude 3.5 fallbacks
352
+ if model_lower.startswith("claude-3-5"):
353
+ return "claude-3-sonnet-20240229"
354
+ # Claude 3 fallbacks
355
+ if model_lower.startswith("claude-3-opus"):
356
+ return "claude-3-sonnet-20240229"
357
+ if model_lower.startswith("claude-3-sonnet"):
358
+ return "claude-3-haiku-20240307"
359
+ if model_lower.startswith("claude-3-haiku"):
360
+ return "claude-3-haiku-20240307"
361
+ # Generic Claude fallback
362
+ if model_lower.startswith("claude"):
363
+ return "claude-3-haiku-20240307"
364
+ return None
@@ -34,6 +34,7 @@ from genxai.tools.builtin.database.database_inspector import DatabaseInspectorTo
34
34
 
35
35
  # Communication tools
36
36
  from genxai.tools.builtin.communication.email_sender import EmailSenderTool
37
+ from genxai.tools.builtin.communication.human_input import HumanInputTool
37
38
  from genxai.tools.builtin.communication.slack_notifier import SlackNotifierTool
38
39
  from genxai.tools.builtin.communication.sms_sender import SMSSenderTool
39
40
  from genxai.tools.builtin.communication.webhook_caller import WebhookCallerTool
@@ -75,6 +76,7 @@ _tools_to_register = [
75
76
  DatabaseInspectorTool(),
76
77
  # Communication
77
78
  EmailSenderTool(),
79
+ HumanInputTool(),
78
80
  SlackNotifierTool(),
79
81
  SMSSenderTool(),
80
82
  WebhookCallerTool(),
@@ -119,6 +121,7 @@ __all__ = [
119
121
  "VectorSearchTool",
120
122
  "DatabaseInspectorTool",
121
123
  "EmailSenderTool",
124
+ "HumanInputTool",
122
125
  "SlackNotifierTool",
123
126
  "SMSSenderTool",
124
127
  "WebhookCallerTool",
@@ -0,0 +1,32 @@
1
+ """Human input tool for interactive workflows."""
2
+
3
+ from typing import Any, Dict
4
+
5
+ from genxai.tools.base import Tool, ToolMetadata, ToolParameter, ToolCategory
6
+
7
+
8
+ class HumanInputTool(Tool):
9
+ """Collect input from a human (stdin)."""
10
+
11
+ def __init__(self) -> None:
12
+ from genxai.tools.registry import ToolRegistry
13
+ super().__init__(
14
+ metadata=ToolMetadata(
15
+ name="human_input",
16
+ description="Collects human input from the console",
17
+ category=ToolCategory.CUSTOM,
18
+ ),
19
+ parameters=[
20
+ ToolParameter(
21
+ name="prompt",
22
+ type="string",
23
+ description="Prompt to show the user",
24
+ )
25
+ ],
26
+ )
27
+ if ToolRegistry.get(self.metadata.name) is None:
28
+ ToolRegistry.register(self)
29
+
30
+ async def _execute(self, **kwargs: Any) -> Dict[str, Any]:
31
+ prompt = kwargs.get("prompt", "Your response:")
32
+ return {"response": input(f"{prompt} ")}
@@ -0,0 +1,19 @@
1
+ """
2
+ Auto-generated tool: test-2
3
+ Description: testing
4
+ Category: custom
5
+ Created: 2026-02-03 02:05:46.667101
6
+ """
7
+
8
+ # Tool code
9
+ # Access parameters via 'params' dict
10
+ # Example: value = params.get('input_value')
11
+
12
+ # Your tool logic here
13
+ result = {
14
+ "message": "Hello from custom tool!",
15
+ "data": params
16
+ }
17
+
18
+ # Set 'result' variable with your output
19
+
@@ -0,0 +1,9 @@
1
+ """
2
+ Auto-generated tool: test_tool_ui
3
+ Description: test
4
+ Category: custom
5
+ Created: 2026-02-03 02:03:15.074097
6
+ """
7
+
8
+ # Tool code
9
+ result = {"message": "ok"}
@@ -94,7 +94,7 @@ class ToolService:
94
94
  logger.info(f"Saved tool to database: {name}")
95
95
 
96
96
  # Optionally export to file
97
- ToolService._export_to_file(tool_model)
97
+ # ToolService._export_to_file(tool_model)
98
98
 
99
99
  return tool_model
100
100
  except Exception as e:
@@ -153,7 +153,7 @@ class ToolService:
153
153
  logger.info(f"Updated tool code in database: {name}")
154
154
 
155
155
  # Update file if exists
156
- ToolService._export_to_file(tool)
156
+ # ToolService._export_to_file(tool)
157
157
 
158
158
  return True
159
159
  return False
@@ -183,7 +183,7 @@ class ToolService:
183
183
  logger.info(f"Deleted tool from database: {name}")
184
184
 
185
185
  # Delete file if exists
186
- ToolService._delete_file(name)
186
+ # ToolService._delete_file(name)
187
187
 
188
188
  return True
189
189
  return False
genxai/utils/tokens.py CHANGED
@@ -17,8 +17,14 @@ MODEL_TOKEN_LIMITS: Dict[str, int] = {
17
17
  "gpt-3.5-turbo-16k": 16384,
18
18
  # Anthropic models
19
19
  "claude-3-opus": 200000,
20
+ "claude-3-opus-20240229": 200000,
20
21
  "claude-3-sonnet": 200000,
22
+ "claude-3-sonnet-20240229": 200000,
21
23
  "claude-3-haiku": 200000,
24
+ "claude-3-haiku-20240307": 200000,
25
+ "claude-3-5-sonnet": 200000,
26
+ "claude-3-5-sonnet-20241022": 200000,
27
+ "claude-3-5-sonnet-20240620": 200000,
22
28
  "claude-2.1": 200000,
23
29
  "claude-2": 100000,
24
30
  "claude-instant": 100000,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: genxai-framework
3
- Version: 0.1.0
3
+ Version: 0.1.1
4
4
  Summary: Advanced Agentic AI Framework with Graph-Based Orchestration
5
5
  Author-email: GenXAI Team <team@genxai.dev>
6
6
  License: MIT
@@ -81,9 +81,9 @@ Dynamic: license-file
81
81
 
82
82
  # GenXAI - Advanced Agentic AI Framework
83
83
 
84
- **Version:** 1.0.0 (Design Phase)
85
- **Status:** Planning & Architecture
86
- **License:** MIT (Planned)
84
+ **Version:** 1.0.0
85
+ **Status:** Active Development
86
+ **License:** MIT
87
87
 
88
88
  ---
89
89
 
@@ -155,7 +155,7 @@ Comprehensive documentation is available in the following files:
155
155
 
156
156
  - **[ARCHITECTURE.md](./ARCHITECTURE.md)** - Complete system architecture and design principles
157
157
  - **[REQUIREMENTS.md](./REQUIREMENTS.md)** - Detailed functional and non-functional requirements
158
- - **[IMPLEMENTATION_PLAN.md](./IMPLEMENTATION_PLAN.md)** - 20-week development roadmap
158
+ - **[IMPLEMENTATION_PLAN.md](./IMPLEMENTATION_PLAN.md)** - Development roadmap
159
159
  - **[TOOLS_DESIGN.md](./TOOLS_DESIGN.md)** - Tool system architecture and 50+ built-in tools
160
160
  - **[MEMORY_DESIGN.md](./MEMORY_DESIGN.md)** - Multi-layered memory system design
161
161
 
@@ -229,7 +229,7 @@ See [ARCHITECTURE.md](./ARCHITECTURE.md) for complete details.
229
229
  - Beta testing
230
230
  - Official launch 🚀
231
231
 
232
- See [IMPLEMENTATION_PLAN.md](./IMPLEMENTATION_PLAN.md) for detailed timeline.
232
+ See [IMPLEMENTATION_PLAN.md](./IMPLEMENTATION_PLAN.md) for the timeline.
233
233
 
234
234
  ---
235
235
 
@@ -286,6 +286,48 @@ graph.add_edge(Edge(source="support", target="end"))
286
286
  result = await graph.run(input_data="My app crashed")
287
287
  ```
288
288
 
289
+ ### Flow Orchestrator Examples
290
+
291
+ GenXAI also ships with lightweight flow orchestrators for common patterns:
292
+
293
+ ```python
294
+ from genxai import AgentFactory, RoundRobinFlow, SelectorFlow, P2PFlow
295
+
296
+ agents = [
297
+ AgentFactory.create_agent(id="analyst", role="Analyst", goal="Analyze"),
298
+ AgentFactory.create_agent(id="writer", role="Writer", goal="Write"),
299
+ ]
300
+
301
+ # Round-robin flow
302
+ round_robin = RoundRobinFlow(agents)
303
+
304
+ # Selector flow
305
+ def choose_next(state, agent_ids):
306
+ return agent_ids[state.get("selector_hop", 0) % len(agent_ids)]
307
+
308
+ selector = SelectorFlow(agents, selector=choose_next, max_hops=3)
309
+
310
+ # P2P flow
311
+ p2p = P2PFlow(agents, max_rounds=4, consensus_threshold=0.7)
312
+ ```
313
+
314
+ See runnable examples in:
315
+ - `examples/code/flow_round_robin_example.py`
316
+ - `examples/code/flow_selector_example.py`
317
+ - `examples/code/flow_p2p_example.py`
318
+ - `examples/code/flow_parallel_example.py`
319
+ - `examples/code/flow_conditional_example.py`
320
+ - `examples/code/flow_loop_example.py`
321
+ - `examples/code/flow_router_example.py`
322
+ - `examples/code/flow_ensemble_voting_example.py`
323
+ - `examples/code/flow_critic_review_example.py`
324
+ - `examples/code/flow_coordinator_worker_example.py`
325
+ - `examples/code/flow_map_reduce_example.py`
326
+ - `examples/code/flow_subworkflow_example.py`
327
+ - `examples/code/flow_auction_example.py`
328
+
329
+ Full flow documentation: [docs/FLOWS.md](./docs/FLOWS.md)
330
+
289
331
  ### Trigger SDK Quick Start
290
332
 
291
333
  ```python
@@ -350,6 +392,16 @@ workflow:
350
392
  condition: "category == 'technical'"
351
393
  ```
352
394
 
395
+ Shared memory template:
396
+
397
+ ```bash
398
+ genxai workflow run examples/nocode/shared_memory_workflow.yaml \
399
+ --input '{"task": "Draft a short response"}'
400
+ ```
401
+
402
+ See no-code templates (including a shared memory example) in:
403
+ - `examples/nocode/README.md`
404
+
353
405
  ---
354
406
 
355
407
  ## 🛠️ Technology Stack
@@ -443,10 +495,9 @@ workflow:
443
495
 
444
496
  ## 🤝 Contributing
445
497
 
446
- We welcome contributions! This project is currently in the design phase. Once implementation begins, we'll provide:
498
+ We welcome contributions! This project is in active development. We provide:
447
499
 
448
500
  - Contributing guidelines
449
- - Code of conduct
450
501
  - Development setup instructions
451
502
  - Issue templates
452
503
  - Pull request templates
@@ -455,7 +506,7 @@ We welcome contributions! This project is currently in the design phase. Once im
455
506
 
456
507
  ## 📜 License
457
508
 
458
- MIT License (Planned)
509
+ MIT License
459
510
 
460
511
  ---
461
512
 
@@ -486,9 +537,9 @@ Inspired by:
486
537
 
487
538
  ## 📈 Project Status
488
539
 
489
- **Current Phase**: Design & Planning
490
- **Next Milestone**: Begin Phase 1 implementation
491
- **Expected Launch**: Week 20 (approximately 5 months from start)
540
+ **Current Phase**: Active Development
541
+ **Next Milestone**: Complete visual editor + studio polish
542
+ **Expected Launch**: TBD
492
543
 
493
544
  ---
494
545