fairo 25.6.5__tar.gz → 25.7.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fairo might be problematic. Click here for more details.

Files changed (51) hide show
  1. {fairo-25.6.5 → fairo-25.7.1}/PKG-INFO +4 -2
  2. fairo-25.7.1/fairo/__init__.py +1 -0
  3. fairo-25.7.1/fairo/core/chat/__init__.py +1 -0
  4. fairo-25.7.1/fairo/core/chat/chat.py +54 -0
  5. fairo-25.7.1/fairo/core/execution/agent_serializer.py +288 -0
  6. {fairo-25.6.5 → fairo-25.7.1}/fairo/core/execution/executor.py +50 -97
  7. fairo-25.7.1/fairo/core/execution/model_log_helper.py +409 -0
  8. fairo-25.7.1/fairo/core/workflow/utils.py +473 -0
  9. {fairo-25.6.5 → fairo-25.7.1}/fairo/settings.py +0 -20
  10. {fairo-25.6.5 → fairo-25.7.1}/fairo.egg-info/PKG-INFO +4 -2
  11. {fairo-25.6.5 → fairo-25.7.1}/fairo.egg-info/SOURCES.txt +2 -0
  12. {fairo-25.6.5 → fairo-25.7.1}/fairo.egg-info/requires.txt +3 -1
  13. {fairo-25.6.5 → fairo-25.7.1}/pyproject.toml +4 -2
  14. fairo-25.6.5/fairo/__init__.py +0 -1
  15. fairo-25.6.5/fairo/core/chat/chat.py +0 -23
  16. fairo-25.6.5/fairo/core/workflow/utils.py +0 -191
  17. fairo-25.6.5/fairo/tests/__init__.py +0 -0
  18. {fairo-25.6.5 → fairo-25.7.1}/README.md +0 -0
  19. {fairo-25.6.5 → fairo-25.7.1}/fairo/core/__init__.py +0 -0
  20. {fairo-25.6.5 → fairo-25.7.1}/fairo/core/agent/__init__.py +0 -0
  21. {fairo-25.6.5 → fairo-25.7.1}/fairo/core/agent/base_agent.py +0 -0
  22. {fairo-25.6.5 → fairo-25.7.1}/fairo/core/agent/code_analysis_agent.py +0 -0
  23. {fairo-25.6.5 → fairo-25.7.1}/fairo/core/agent/output/__init__.py +0 -0
  24. {fairo-25.6.5 → fairo-25.7.1}/fairo/core/agent/output/base_output.py +0 -0
  25. {fairo-25.6.5 → fairo-25.7.1}/fairo/core/agent/output/google_drive.py +0 -0
  26. {fairo-25.6.5 → fairo-25.7.1}/fairo/core/agent/tools/__init__.py +0 -0
  27. {fairo-25.6.5 → fairo-25.7.1}/fairo/core/agent/tools/base_tools.py +0 -0
  28. {fairo-25.6.5 → fairo-25.7.1}/fairo/core/agent/tools/code_analysis.py +0 -0
  29. {fairo-25.6.5 → fairo-25.7.1}/fairo/core/agent/tools/utils.py +0 -0
  30. {fairo-25.6.5 → fairo-25.7.1}/fairo/core/agent/utils.py +0 -0
  31. {fairo-25.6.5/fairo/core/chat → fairo-25.7.1/fairo/core/client}/__init__.py +0 -0
  32. {fairo-25.6.5 → fairo-25.7.1}/fairo/core/client/client.py +0 -0
  33. {fairo-25.6.5 → fairo-25.7.1}/fairo/core/exceptions.py +0 -0
  34. {fairo-25.6.5/fairo/core/client → fairo-25.7.1/fairo/core/execution}/__init__.py +0 -0
  35. {fairo-25.6.5 → fairo-25.7.1}/fairo/core/execution/env_finder.py +0 -0
  36. {fairo-25.6.5/fairo/core/execution → fairo-25.7.1/fairo/core/models}/__init__.py +0 -0
  37. {fairo-25.6.5 → fairo-25.7.1}/fairo/core/models/custom_field_value.py +0 -0
  38. {fairo-25.6.5 → fairo-25.7.1}/fairo/core/models/resources.py +0 -0
  39. {fairo-25.6.5/fairo/core/models → fairo-25.7.1/fairo/core/runnable}/__init__.py +0 -0
  40. {fairo-25.6.5 → fairo-25.7.1}/fairo/core/runnable/runnable.py +0 -0
  41. {fairo-25.6.5/fairo/core/runnable → fairo-25.7.1/fairo/core/workflow}/__init__.py +0 -0
  42. {fairo-25.6.5 → fairo-25.7.1}/fairo/core/workflow/base_workflow.py +0 -0
  43. {fairo-25.6.5 → fairo-25.7.1}/fairo/core/workflow/dependency.py +0 -0
  44. {fairo-25.6.5/fairo/core/workflow → fairo-25.7.1/fairo/metrics}/__init__.py +0 -0
  45. {fairo-25.6.5 → fairo-25.7.1}/fairo/metrics/fairness_object.py +0 -0
  46. {fairo-25.6.5 → fairo-25.7.1}/fairo/metrics/metrics.py +0 -0
  47. {fairo-25.6.5/fairo/metrics → fairo-25.7.1/fairo/tests}/__init__.py +0 -0
  48. {fairo-25.6.5 → fairo-25.7.1}/fairo/tests/test_metrics.py +0 -0
  49. {fairo-25.6.5 → fairo-25.7.1}/fairo.egg-info/dependency_links.txt +0 -0
  50. {fairo-25.6.5 → fairo-25.7.1}/fairo.egg-info/top_level.txt +0 -0
  51. {fairo-25.6.5 → fairo-25.7.1}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fairo
3
- Version: 25.6.5
3
+ Version: 25.7.1
4
4
  Summary: SDK for interfacing with Fairo SaaS platform.
5
5
  Author-email: "Fairo Systems, Inc." <support@fairo.ai>
6
6
  License: Apache-2.0
@@ -11,7 +11,7 @@ Classifier: Programming Language :: Python :: 3.10
11
11
  Classifier: License :: OSI Approved :: Apache Software License
12
12
  Classifier: Operating System :: OS Independent
13
13
  Description-Content-Type: text/markdown
14
- Requires-Dist: mlflow<3.0.0,>=2.21.0
14
+ Requires-Dist: mlflow<=3.1.1,>=3.1.0
15
15
  Requires-Dist: langchain<0.4.0,>=0.3.20
16
16
  Requires-Dist: langchain-aws<0.3.0,>=0.2.18
17
17
  Requires-Dist: langchain-community<0.4.0,>=0.3.20
@@ -19,6 +19,8 @@ Requires-Dist: langchain-core<0.4.0,>=0.3.49
19
19
  Requires-Dist: langchain-text-splitters<0.4.0,>=0.3.7
20
20
  Requires-Dist: psycopg2-binary<3.0.0,>=2.9.0
21
21
  Requires-Dist: langchain-postgres<0.1.0,>=0.0.14
22
+ Requires-Dist: setuptools>=79.0.0
23
+ Requires-Dist: pandas<3.0.0,>=2.0.0
22
24
 
23
25
  # Fairo SDK
24
26
 
@@ -0,0 +1 @@
1
+ __version__ = "25.7.1"
@@ -0,0 +1 @@
1
+ from .chat import ChatFairo
@@ -0,0 +1,54 @@
1
+
2
+ from langchain_community.chat_models.mlflow import ChatMlflow
3
+ from mlflow.deployments import get_deploy_client
4
+ from fairo.settings import get_mlflow_gateway_chat_route, get_mlflow_gateway_uri, get_mlflow_user, get_mlflow_password
5
+ import os
6
+
7
+
8
+ class ChatFairo(ChatMlflow):
9
+
10
+ def __init__(self, **kwargs):
11
+
12
+ # # TODO <- see if this can be improved
13
+ # os.environ["MLFLOW_TRACKING_USERNAME"] = get_mlflow_user()
14
+ # os.environ["MLFLOW_TRACKING_PASSWORD"] = get_mlflow_password()
15
+
16
+ super().__init__(
17
+ target_uri=os.environ.get('MLFLOW_GATEWAY_URI', get_mlflow_gateway_uri()),
18
+ endpoint=os.environ.get('MLFLOW_GATEWAY_ROUTE', get_mlflow_gateway_chat_route()),
19
+ **kwargs
20
+ )
21
+
22
+ @property
23
+ def _target_uri(self):
24
+ return os.environ.get("MLFLOW_GATEWAY_URI", None)
25
+
26
+ @property
27
+ def _endpoint(self):
28
+ return os.environ.get("MLFLOW_GATEWAY_ROUTE", None)
29
+
30
+ def invoke(self, *args, **kwargs):
31
+ # Override invoke to use dynamic target_uri
32
+ self.target_uri = self._target_uri
33
+ self._client = get_deploy_client(self.target_uri)
34
+ return super().invoke(*args, **kwargs)
35
+
36
+
37
+ class FairoChat(ChatMlflow):
38
+ def __init__(self, endpoint, **kwargs):
39
+ super().__init__(
40
+ target_uri=os.environ.get('MLFLOW_GATEWAY_URI', None),
41
+ endpoint=endpoint,
42
+ # extra_params={"workflow_run_id": workflow_run_id},
43
+ **kwargs
44
+ )
45
+
46
+ @property
47
+ def _target_uri(self):
48
+ return os.environ.get("MLFLOW_GATEWAY_URI", None)
49
+
50
+ def invoke(self, *args, **kwargs):
51
+ # Override invoke to use dynamic target_uri
52
+ self.target_uri = self._target_uri
53
+ self._client = get_deploy_client(self.target_uri)
54
+ return super().invoke(*args, **kwargs)
@@ -0,0 +1,288 @@
1
+ from typing import Any, Dict
2
+ import mlflow
3
+ import cloudpickle
4
+ import os
5
+ import sys
6
+ from pathlib import Path
7
+ from langchain_core.runnables import RunnableLambda, Runnable
8
+ from langchain.chains import SimpleSequentialChain
9
+ import logging
10
+ import types
11
+ import threading
12
+ import pandas as pd
13
+ logger = logging.getLogger(__name__)
14
+
15
+ class CustomPythonModel(mlflow.pyfunc.PythonModel):
16
+ def __init__(self):
17
+ self.agent = None
18
+
19
+ def __getstate__(self):
20
+ state = self.__dict__.copy()
21
+ state.pop("lock", None)
22
+
23
+ def __setstate__(self, state):
24
+ self.__dict__.update(state)
25
+ self.lock = threading.Lock()
26
+
27
+ def load_context(self, context):
28
+ import sys
29
+ import os
30
+ import shutil
31
+
32
+ agent_code_path = context.model_config["agent_code"]
33
+ agent_code_dir = os.path.dirname(agent_code_path)
34
+
35
+ if agent_code_dir not in sys.path:
36
+ sys.path.insert(0, agent_code_dir)
37
+
38
+ for artifact_name, artifact_path in context.model_config.items():
39
+ if artifact_name.startswith("local_module_"):
40
+ module_name = artifact_name.replace("local_module_", "")
41
+ module_filename = f"{module_name}.py"
42
+ dest_path = os.path.join(agent_code_dir, module_filename)
43
+
44
+ if not os.path.exists(dest_path):
45
+ shutil.copy2(artifact_path, dest_path)
46
+ print(f"Restored local module: {module_name}")
47
+
48
+ try:
49
+ import agent_code
50
+ from agent_code import create_simple_agent
51
+ self.agent_func = create_simple_agent
52
+ self.agent = self.agent_func()
53
+ except ImportError as e:
54
+ raise ImportError(f"Failed to import agent_code: {e}")
55
+
56
+ def predict(self, context, model_input):
57
+ if isinstance(model_input, list):
58
+ return [self.agent.run(query) for query in model_input]
59
+ else:
60
+ return self.agent.run(model_input)
61
+
62
+ class AgentChainWrapper:
63
+ def __init__(self, chain_class = SimpleSequentialChain, agent_functions_list = []):
64
+ self.chain_class = chain_class
65
+ self.agents = [func() for func in agent_functions_list]
66
+ self.agent_functions = agent_functions_list
67
+
68
+ def _wrap_agent_runnable(self, agent) -> RunnableLambda:
69
+ """
70
+ Wraps the agent's .run() method into a RunnableLambda with a custom function name.
71
+ Properly propagates errors instead of continuing to the next agent.
72
+ """
73
+ def base_fn(inputs: Dict[str, Any]) -> Dict[str, Any]:
74
+ # Run the agent, but don't catch exceptions - let them propagate
75
+ # This will stop the entire pipeline on agent failure
76
+ return agent.invoke(inputs)
77
+
78
+ # Check if result starts with "An error occurred" which indicates agent failure
79
+ # if isinstance(result, str) and result.startswith("An error occurred during execution:"):
80
+ # # Propagate the error by raising an exception to stop the execution
81
+ # raise RuntimeError(f"Agent {agent.__class__.__name__} failed: {result}")
82
+
83
+ # return result
84
+
85
+ # Clone function and set custom name
86
+ fn_name = f"runnable_{agent.__class__.__name__.lower().replace(' ', '_')}"
87
+ runnable_fn = types.FunctionType(
88
+ base_fn.__code__,
89
+ base_fn.__globals__,
90
+ name=fn_name,
91
+ argdefs=base_fn.__defaults__,
92
+ closure=base_fn.__closure__,
93
+ )
94
+
95
+ return RunnableLambda(runnable_fn)
96
+
97
+ def run(self, query):
98
+ result = query
99
+ def is_dataframe(obj) -> bool:
100
+ try:
101
+ return isinstance(obj, pd.DataFrame)
102
+ except Exception as e:
103
+ return False
104
+ if is_dataframe(result):
105
+ result = result.to_dict(orient='records')[0]
106
+ runnables = []
107
+ for agent in self.agents:
108
+ if isinstance(agent, Runnable):
109
+ runnables.append(agent)
110
+ else:
111
+ runnables.append(
112
+ self._wrap_agent_runnable(agent)
113
+ )
114
+ if self.chain_class is SimpleSequentialChain:
115
+ pipeline = runnables[0]
116
+ for r in runnables[1:]:
117
+ pipeline = pipeline | r
118
+ if is_dataframe(query):
119
+ query = query.to_dict(orient='records')[0]
120
+ return pipeline.invoke(query)
121
+ chain = self.chain_class(
122
+ chains=runnables,
123
+ )
124
+ return chain.run(result)
125
+
126
+ def predict(self, context = "", model_input = ""):
127
+ return self.run(model_input)
128
+
129
+ class CustomChainModel(mlflow.pyfunc.PythonModel):
130
+ def __init__(self):
131
+ self.agent_chain = None
132
+ self.agents = []
133
+
134
+ def __getstate__(self):
135
+ state = self.__dict__.copy()
136
+ state.pop("lock", None)
137
+
138
+ def __setstate__(self, state):
139
+ self.__dict__.update(state)
140
+ self.lock = threading.Lock()
141
+
142
+ def load_context(self, context):
143
+ import sys
144
+ import os
145
+ import shutil
146
+ import importlib.util
147
+
148
+ # Get the directory where artifacts are stored
149
+ base_dir = os.path.dirname(list(context.artifacts.values())[0])
150
+
151
+ if base_dir not in sys.path:
152
+ sys.path.insert(0, base_dir)
153
+
154
+ # Restore local modules
155
+ for artifact_name, artifact_path in context.artifacts.items():
156
+ if artifact_name.startswith("local_module_"):
157
+ module_name = artifact_name.replace("local_module_", "")
158
+ module_filename = f"{module_name}.py"
159
+ dest_path = os.path.join(base_dir, module_filename)
160
+
161
+ if not os.path.exists(dest_path):
162
+ shutil.copy2(artifact_path, dest_path)
163
+ print(f"Restored local module: {module_name}")
164
+
165
+ # Load chain configuration
166
+ chain_config_path = context.artifacts["chain_config"]
167
+ spec = importlib.util.spec_from_file_location("chain_config", chain_config_path)
168
+ chain_config_module = importlib.util.module_from_spec(spec)
169
+ spec.loader.exec_module(chain_config_module)
170
+
171
+ chain_config = chain_config_module.CHAIN_CONFIG
172
+
173
+ # Load each agent
174
+ agent_functions = []
175
+ for agent_info in chain_config["agents"]:
176
+ agent_code_file = agent_info["agent_code_file"]
177
+ function_name = agent_info["function_name"]
178
+
179
+ # Load the agent module - handle the artifact key mapping
180
+ artifact_key = agent_code_file.replace(".py", "")
181
+ if artifact_key not in context.artifacts:
182
+ # Try with agent_code_ prefix for consistency
183
+ artifact_key = f"agent_code_{agent_info['name'].split('_')[-1]}"
184
+ agent_code_path = context.artifacts[artifact_key]
185
+ spec = importlib.util.spec_from_file_location("agent_module", agent_code_path)
186
+ agent_module = importlib.util.module_from_spec(spec)
187
+ spec.loader.exec_module(agent_module)
188
+
189
+ # Get the agent function
190
+ agent_function = getattr(agent_module, function_name)
191
+ agent_functions.append(agent_function)
192
+
193
+ # Create the agent chain
194
+ self.agent_chain = AgentChainWrapper(agent_functions_list=agent_functions)
195
+
196
+ def predict(self, context, model_input):
197
+ if isinstance(model_input, list):
198
+ return [self.agent_chain.run(query) for query in model_input]
199
+ else:
200
+ return self.agent_chain.run(model_input)
201
+
202
+ class CrewAgentWrapper:
203
+ def __init__(self, agent_func=None):
204
+ if agent_func is not None:
205
+ # During logging phase
206
+ try:
207
+ from crew_agent import create_crew_agent
208
+ self.base_agent = create_crew_agent()
209
+ except ImportError:
210
+ raise ImportError("Could not import CrewAI agent functions")
211
+ else:
212
+ # During model loading phase
213
+ try:
214
+ from agent_code import create_crew_agent
215
+ self.base_agent = create_crew_agent()
216
+ except ImportError:
217
+ try:
218
+ from crew_agent import create_crew_agent
219
+ self.base_agent = create_crew_agent()
220
+ except ImportError:
221
+ raise ImportError("Could not import CrewAI agent")
222
+
223
+ def run(self, query):
224
+ try:
225
+ if hasattr(self, 'base_agent'):
226
+ # Import create_crew_with_task function
227
+ try:
228
+ from agent_code import create_crew_with_task
229
+ except ImportError:
230
+ from crew_agent import create_crew_with_task
231
+
232
+ crew = create_crew_with_task(query)
233
+ result = crew.kickoff()
234
+ return str(result)
235
+ else:
236
+ return "Error: Agent not properly initialized"
237
+ except Exception as e:
238
+ print(f"Error running CrewAI crew: {e}")
239
+ return f"Error executing query '{query}': {str(e)}"
240
+
241
+ def predict(self, context, model_input):
242
+ return self.run(model_input)
243
+
244
+ class CustomCrewModel(mlflow.pyfunc.PythonModel):
245
+ def __init__(self):
246
+ self.agent = None
247
+
248
+ def __getstate__(self):
249
+ state = self.__dict__.copy()
250
+ state.pop("lock", None)
251
+
252
+ def __setstate__(self, state):
253
+ self.__dict__.update(state)
254
+ self.lock = threading.Lock()
255
+
256
+ def load_context(self, context):
257
+ import sys
258
+ import os
259
+ import shutil
260
+
261
+ agent_code_path = context.model_config["agent_code"]
262
+ agent_code_dir = os.path.dirname(agent_code_path)
263
+
264
+ if agent_code_dir not in sys.path:
265
+ sys.path.insert(0, agent_code_dir)
266
+
267
+ for artifact_name, artifact_path in context.model_config.items():
268
+ if artifact_name.startswith("local_module_"):
269
+ module_name = artifact_name.replace("local_module_", "")
270
+ module_filename = f"{module_name}.py"
271
+ dest_path = os.path.join(agent_code_dir, module_filename)
272
+
273
+ if not os.path.exists(dest_path):
274
+ shutil.copy2(artifact_path, dest_path)
275
+ print(f"Restored local module: {module_name}")
276
+
277
+ try:
278
+ import agent_code
279
+ from agent_code import CrewAgentWrapper
280
+ self.agent = CrewAgentWrapper()
281
+ except ImportError as e:
282
+ raise ImportError(f"Failed to import CrewAI agent_code: {e}")
283
+
284
+ def predict(self, context, model_input):
285
+ if isinstance(model_input, list):
286
+ return [self.agent.run(query) for query in model_input]
287
+ else:
288
+ return self.agent.run(model_input)
@@ -1,51 +1,57 @@
1
1
  import json
2
2
  import os
3
- import types
4
3
  from typing import List, Any, Callable, Dict, Union
5
4
  from langchain_core.runnables import RunnableLambda, RunnableSequence
5
+ from langchain.chains import SimpleSequentialChain
6
6
  import logging
7
7
 
8
8
  import mlflow
9
9
 
10
10
  from mlflow.models.signature import ModelSignature
11
11
  from mlflow.types.schema import Schema, ColSpec
12
- from fairo.core.agent.base_agent import SimpleAgent
13
12
  from fairo.core.client.client import BaseClient
13
+ from fairo.core.execution.agent_serializer import AgentChainWrapper
14
14
  from fairo.core.execution.env_finder import read_variables
15
+ from fairo.core.execution.model_log_helper import ModelLogHelper
15
16
  from fairo.core.runnable.runnable import Runnable
16
- from fairo.core.workflow.utils import output_workflow_process_graph
17
+ from fairo.core.workflow.utils import output_langchain_process_graph
17
18
  from fairo.settings import get_fairo_api_key, get_fairo_api_secret, get_mlflow_experiment_name, get_mlflow_server, get_fairo_base_url
18
19
 
19
- # Optional interfaces/types
20
- class LLMAgentOutput:
21
- pass
22
20
 
23
21
  logger = logging.getLogger(__name__)
24
22
 
25
23
  class FairoExecutor:
26
24
  def __init__(
27
25
  self,
26
+ agent_type: str = "Langchain",
28
27
  agents: List[Any] = [],
29
28
  verbose: bool = False,
30
- patch_run_output_json: Callable[[LLMAgentOutput], None] = None,
29
+ patch_run_output_json: Callable[[Any], None] = None,
31
30
  workflow_run_id: str = "",
32
31
  runnable: Runnable = None,
33
- experiment_name: str = None
32
+ experiment_name: str = None,
33
+ chain_class = SimpleSequentialChain,
34
+ input_fields: List[str] = [],
34
35
  ):
35
36
  if agents and runnable:
36
37
  raise ValueError("FairoExecutor cannot be initialized with both 'agents' and 'runnable'. Please provide only one.")
38
+ if not input_fields:
39
+ raise ValueError("Missing input_fields")
37
40
  self.agents = agents
41
+ self.agent_type = agent_type
38
42
  self.verbose = verbose
39
43
  self.patch_run_output_json = patch_run_output_json
40
44
  self.workflow_run_id = workflow_run_id
41
45
  self.runnable = runnable
42
46
  self.experiment_name = experiment_name if experiment_name else get_mlflow_experiment_name()
43
47
  self.setup_mlflow()
48
+ self.chain_class = chain_class
44
49
  self.client = BaseClient(
45
50
  base_url=get_fairo_base_url(),
46
51
  password=get_fairo_api_secret(),
47
52
  username=get_fairo_api_key()
48
53
  )
54
+ self.input_fields = input_fields
49
55
  # Inject shared attributes into agents
50
56
  for agent in self.agents:
51
57
  if hasattr(agent, 'set_client'):
@@ -53,81 +59,31 @@ class FairoExecutor:
53
59
  if hasattr(agent, 'verbose'):
54
60
  agent.verbose = self.verbose
55
61
 
56
- self.pipeline = self._build_pipeline()
57
-
58
- def _wrap_agent_runnable(self, agent, input_key: str, output_key: str) -> RunnableLambda:
59
- """
60
- Wraps the agent's .run() method into a RunnableLambda with a custom function name.
61
- Properly propagates errors instead of continuing to the next agent.
62
- """
63
- def base_fn(inputs: Dict[str, Any]) -> Dict[str, Any]:
64
- if self.verbose:
65
- logger.info(f"[{agent.__class__.__name__}] received input: {inputs}")
66
-
67
- # Run the agent, but don't catch exceptions - let them propagate
68
- # This will stop the entire pipeline on agent failure
69
- result = agent.run(inputs[input_key])
70
-
71
- # Check if result starts with "An error occurred" which indicates agent failure
72
- if isinstance(result, str) and result.startswith("An error occurred during execution:"):
73
- # Propagate the error by raising an exception to stop the execution
74
- raise RuntimeError(f"Agent {agent.__class__.__name__} failed: {result}")
75
-
76
- return {output_key: result}
77
-
78
- # Clone function and set custom name
79
- fn_name = f"runnable_{agent.__class__.__name__.lower().replace(' ', '_')}"
80
- runnable_fn = types.FunctionType(
81
- base_fn.__code__,
82
- base_fn.__globals__,
83
- name=fn_name,
84
- argdefs=base_fn.__defaults__,
85
- closure=base_fn.__closure__,
86
- )
87
-
88
- return RunnableLambda(runnable_fn)
89
-
90
62
  def _build_pipeline(self) -> RunnableSequence:
91
63
  if not self.agents and not self.runnable:
92
64
  raise ValueError("At least one agent or runnable must be provided.")
93
65
 
94
66
  if self.runnable:
95
- pipeline = mlflow.langchain.load_model(self.runnable.artifact_path)
96
-
97
- # Assign input/output keys
98
- if len(self.agents) > 0:
99
- for i, agent in enumerate(self.agents):
100
- agent.input_key = "input" if i == 0 else f"output_{i - 1}"
101
- agent.output_key = f"output_{i}"
102
-
103
- runnables = []
104
- for agent in self.agents:
105
- runnables.append(
106
- self._wrap_agent_runnable(agent, agent.input_key, agent.output_key)
107
- )
108
- first_input_key = self.agents[0].input_key
109
- last_output_key = self.agents[-1].output_key
110
- # Build RunnableSequence from all steps
111
- pipeline = runnables[0]
112
- for r in runnables[1:]:
113
- pipeline = pipeline | r # chaining
114
-
115
- input_schema = Schema([
116
- ColSpec(type="string", name=first_input_key),
117
- ])
67
+ pipeline = mlflow.pyfunc.load_model(self.runnable.artifact_path)
68
+ else:
69
+ pipeline = AgentChainWrapper(chain_class=self.chain_class, agent_functions_list=self.agents)
70
+ cols = [ColSpec(type="string", name=field) for field in self.input_fields]
71
+ input_schema = Schema(cols)
118
72
 
119
73
  output_schema = Schema([
120
- ColSpec(type="string", name=last_output_key),
74
+ ColSpec(type="string", name="output"),
121
75
  ])
122
- model_info = mlflow.langchain.log_model(
123
- pipeline,
124
- artifact_path="",
125
- signature=ModelSignature(inputs=input_schema, outputs=output_schema)
126
- )
76
+ current_run = mlflow.active_run()
77
+ # Log Model
78
+ ModelLogHelper(
79
+ agent_type=self.agent_type,
80
+ signature=ModelSignature(inputs=input_schema, outputs=output_schema),
81
+ agents=self.agents,
82
+ ).log_model()
127
83
 
128
84
  def save_process_graph():
129
85
  if len(self.agents) > 0:
130
- process_graph = output_workflow_process_graph(self.agents)
86
+ process_graph = output_langchain_process_graph([ag() for ag in self.agents])
131
87
  if len(self.agents) > 1:
132
88
  type = "Workflow"
133
89
  else:
@@ -141,11 +97,16 @@ class FairoExecutor:
141
97
  fairo_settings = {
142
98
  "type": type,
143
99
  "process_graph": process_graph,
100
+ "input_schema": self.input_fields
144
101
  }
145
102
  if process_graph:
146
103
  mlflow.log_text(json.dumps(fairo_settings, ensure_ascii=False, indent=2), artifact_file="fairo_settings.txt")
104
+
147
105
  if self.agents:
148
- save_process_graph()
106
+ try:
107
+ save_process_graph()
108
+ except Exception as e:
109
+ logger.warning("It wasn't possible to generate and save process graph")
149
110
  try:
150
111
  # Find environment variables used in the project
151
112
  all_env_vars = read_variables()
@@ -173,30 +134,28 @@ class FairoExecutor:
173
134
  Execute the pipeline using the provided input.
174
135
  Properly handles and propagates errors from agents.
175
136
  """
176
- if self.agents:
177
- first_input_key = getattr(self.agents[0], 'input_key', 'input')
178
- else:
179
- first_input_key = 'input'
180
-
181
- # Normalize input
182
- if isinstance(input_data, str):
183
- input_data = {first_input_key: input_data}
184
- elif self.agents and (first_input_key not in input_data):
185
- raise ValueError(f"Missing required input key: '{first_input_key}'")
186
-
187
137
  if self.verbose:
188
138
  logger.info("Running agent pipeline...")
189
139
  logger.info(f"Initial input: {input_data}")
190
140
 
191
141
  try:
192
142
  # Run the pipeline but don't catch exceptions
193
- result = self.pipeline.invoke(input_data)
194
-
195
- if self.verbose:
196
- logger.info("Pipeline execution completed")
197
- logger.info(f"Final output: {result}")
143
+ with mlflow.start_run() as run:
144
+ mlflow.autolog(
145
+ log_traces=True,
146
+ log_input_examples=True,
147
+ )
148
+ self.pipeline = self._build_pipeline()
149
+ if self.runnable:
150
+ result = self.pipeline.predict(input_data)
151
+ else:
152
+ result = self.pipeline.predict(model_input=input_data)
198
153
 
199
- return result
154
+ if self.verbose:
155
+ logger.info("Pipeline execution completed")
156
+ logger.info(f"Final output: {result}")
157
+
158
+ return result
200
159
 
201
160
  except Exception as e:
202
161
  # Log the error
@@ -217,10 +176,4 @@ class FairoExecutor:
217
176
  mlflow.set_tracking_uri(get_mlflow_server())
218
177
  mlflow.set_experiment(experiment_name=self.experiment_name)
219
178
  _clean_mlflow_env_vars()
220
- setup_mlflow_tracking_server()
221
- with mlflow.start_run():
222
- mlflow.langchain.autolog(
223
- log_traces=True,
224
- log_input_examples=True,
225
- )
226
-
179
+ setup_mlflow_tracking_server()