fairo 25.8.2__tar.gz → 25.9.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fairo might be problematic. Click here for more details.

Files changed (52) hide show
  1. {fairo-25.8.2 → fairo-25.9.2}/PKG-INFO +2 -4
  2. fairo-25.9.2/fairo/__init__.py +1 -0
  3. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/chat/chat.py +8 -12
  4. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/execution/executor.py +12 -7
  5. fairo-25.9.2/fairo/core/tools/__init__.py +2 -0
  6. fairo-25.9.2/fairo/core/tools/plot.py +229 -0
  7. fairo-25.9.2/fairo/core/tools/suggestion.py +43 -0
  8. fairo-25.9.2/fairo/core/utils.py +28 -0
  9. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/workflow/dependency.py +11 -146
  10. {fairo-25.8.2 → fairo-25.9.2}/fairo.egg-info/PKG-INFO +2 -4
  11. {fairo-25.8.2 → fairo-25.9.2}/fairo.egg-info/SOURCES.txt +4 -0
  12. {fairo-25.8.2 → fairo-25.9.2}/fairo.egg-info/requires.txt +1 -3
  13. {fairo-25.8.2 → fairo-25.9.2}/pyproject.toml +1 -3
  14. fairo-25.8.2/fairo/__init__.py +0 -1
  15. {fairo-25.8.2 → fairo-25.9.2}/README.md +0 -0
  16. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/__init__.py +0 -0
  17. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/agent/__init__.py +0 -0
  18. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/agent/base_agent.py +0 -0
  19. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/agent/code_analysis_agent.py +0 -0
  20. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/agent/output/__init__.py +0 -0
  21. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/agent/output/base_output.py +0 -0
  22. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/agent/output/google_drive.py +0 -0
  23. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/agent/tools/__init__.py +0 -0
  24. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/agent/tools/base_tools.py +0 -0
  25. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/agent/tools/code_analysis.py +0 -0
  26. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/agent/tools/utils.py +0 -0
  27. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/agent/utils.py +0 -0
  28. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/chat/__init__.py +0 -0
  29. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/client/__init__.py +0 -0
  30. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/client/client.py +0 -0
  31. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/exceptions.py +0 -0
  32. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/execution/__init__.py +0 -0
  33. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/execution/agent_serializer.py +0 -0
  34. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/execution/env_finder.py +0 -0
  35. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/execution/model_log_helper.py +0 -0
  36. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/models/__init__.py +0 -0
  37. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/models/custom_field_value.py +0 -0
  38. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/models/resources.py +0 -0
  39. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/runnable/__init__.py +0 -0
  40. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/runnable/runnable.py +0 -0
  41. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/workflow/__init__.py +0 -0
  42. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/workflow/base_workflow.py +0 -0
  43. {fairo-25.8.2 → fairo-25.9.2}/fairo/core/workflow/utils.py +0 -0
  44. {fairo-25.8.2 → fairo-25.9.2}/fairo/metrics/__init__.py +0 -0
  45. {fairo-25.8.2 → fairo-25.9.2}/fairo/metrics/fairness_object.py +0 -0
  46. {fairo-25.8.2 → fairo-25.9.2}/fairo/metrics/metrics.py +0 -0
  47. {fairo-25.8.2 → fairo-25.9.2}/fairo/settings.py +0 -0
  48. {fairo-25.8.2 → fairo-25.9.2}/fairo/tests/__init__.py +0 -0
  49. {fairo-25.8.2 → fairo-25.9.2}/fairo/tests/test_metrics.py +0 -0
  50. {fairo-25.8.2 → fairo-25.9.2}/fairo.egg-info/dependency_links.txt +0 -0
  51. {fairo-25.8.2 → fairo-25.9.2}/fairo.egg-info/top_level.txt +0 -0
  52. {fairo-25.8.2 → fairo-25.9.2}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fairo
3
- Version: 25.8.2
3
+ Version: 25.9.2
4
4
  Summary: SDK for interfacing with Fairo SaaS platform.
5
5
  Author-email: "Fairo Systems, Inc." <support@fairo.ai>
6
6
  License: Apache-2.0
@@ -11,14 +11,12 @@ Classifier: Programming Language :: Python :: 3.10
11
11
  Classifier: License :: OSI Approved :: Apache Software License
12
12
  Classifier: Operating System :: OS Independent
13
13
  Description-Content-Type: text/markdown
14
- Requires-Dist: mlflow<=3.2.0,>=3.1.0
14
+ Requires-Dist: mlflow<=3.1.1,>=3.1.0
15
15
  Requires-Dist: langchain<0.4.0,>=0.3.20
16
16
  Requires-Dist: langchain-aws<0.3.0,>=0.2.18
17
17
  Requires-Dist: langchain-community<0.4.0,>=0.3.20
18
18
  Requires-Dist: langchain-core<0.4.0,>=0.3.49
19
19
  Requires-Dist: langchain-text-splitters<0.4.0,>=0.3.7
20
- Requires-Dist: psycopg2-binary<3.0.0,>=2.9.0
21
- Requires-Dist: langchain-postgres<0.1.0,>=0.0.14
22
20
  Requires-Dist: setuptools>=79.0.0
23
21
  Requires-Dist: pandas<3.0.0,>=2.0.0
24
22
 
@@ -0,0 +1 @@
1
+ __version__ = "25.9.2"
@@ -31,10 +31,10 @@ class FairoDeploymentClient(BaseDeploymentClient):
31
31
 
32
32
  # Add authentication if needed
33
33
  auth = None
34
- if os.environ.get('MLFLOW_TRACKING_USERNAME') and os.environ.get('MLFLOW_TRACKING_PASSWORD'):
34
+ if os.environ.get('FAIRO_API_ACCESS_KEY_ID') and os.environ.get('FAIRO_API_SECRET'):
35
35
  auth = HTTPBasicAuth(
36
- os.environ.get('MLFLOW_TRACKING_USERNAME'),
37
- os.environ.get('MLFLOW_TRACKING_PASSWORD')
36
+ os.environ.get('FAIRO_API_ACCESS_KEY_ID'),
37
+ os.environ.get('FAIRO_API_SECRET')
38
38
  )
39
39
 
40
40
  # Make streaming request
@@ -116,10 +116,10 @@ class FairoDeploymentClient(BaseDeploymentClient):
116
116
 
117
117
  # Add authentication if needed
118
118
  auth = None
119
- if os.environ.get('MLFLOW_TRACKING_USERNAME') and os.environ.get('MLFLOW_TRACKING_PASSWORD'):
119
+ if os.environ.get('FAIRO_API_ACCESS_KEY_ID') and os.environ.get('FAIRO_API_SECRET'):
120
120
  auth = HTTPBasicAuth(
121
- os.environ.get('MLFLOW_TRACKING_USERNAME'),
122
- os.environ.get('MLFLOW_TRACKING_PASSWORD')
121
+ os.environ.get('FAIRO_API_ACCESS_KEY_ID'),
122
+ os.environ.get('FAIRO_API_SECRET')
123
123
  )
124
124
 
125
125
  if os.environ.get('MLFLOW_TRACKING_TOKEN'):
@@ -183,10 +183,6 @@ class ChatFairo(ChatMlflow):
183
183
 
184
184
  def __init__(self, **kwargs):
185
185
 
186
- # # TODO <- see if this can be improved
187
- # os.environ["MLFLOW_TRACKING_USERNAME"] = get_mlflow_user()
188
- # os.environ["MLFLOW_TRACKING_PASSWORD"] = get_mlflow_password()
189
-
190
186
  super().__init__(
191
187
  target_uri=os.environ.get('MLFLOW_GATEWAY_URI', get_mlflow_gateway_uri()),
192
188
  endpoint=os.environ.get('MLFLOW_GATEWAY_ROUTE', get_mlflow_gateway_chat_route()),
@@ -197,11 +193,11 @@ class ChatFairo(ChatMlflow):
197
193
 
198
194
  @property
199
195
  def _target_uri(self):
200
- return os.environ.get("MLFLOW_GATEWAY_URI", None)
196
+ return os.environ.get("MLFLOW_GATEWAY_URI", get_mlflow_gateway_uri())
201
197
 
202
198
  @property
203
199
  def _endpoint(self):
204
- return os.environ.get("MLFLOW_GATEWAY_ROUTE", None)
200
+ return os.environ.get("MLFLOW_GATEWAY_ROUTE", get_mlflow_gateway_chat_route())
205
201
 
206
202
  def invoke(self, *args, **kwargs):
207
203
  # Override invoke to use dynamic target_uri
@@ -1,10 +1,12 @@
1
1
  import json
2
2
  import os
3
- from typing import List, Any, Callable, Dict, Union
3
+ from typing import List, Any, Callable, Dict, Optional, Type, Union
4
4
  from langchain_core.runnables import RunnableLambda, RunnableSequence
5
5
  from langchain.chains import SimpleSequentialChain
6
6
  import logging
7
7
 
8
+ from pydantic import BaseModel
9
+
8
10
  import mlflow
9
11
 
10
12
  from mlflow.models.signature import ModelSignature
@@ -16,14 +18,13 @@ from fairo.core.execution.model_log_helper import ModelLogHelper
16
18
  from fairo.core.runnable.runnable import Runnable
17
19
  from fairo.core.workflow.utils import output_langchain_process_graph
18
20
  from fairo.settings import get_fairo_api_key, get_fairo_api_secret, get_mlflow_experiment_name, get_mlflow_server, get_fairo_base_url
19
-
21
+ from fairo.core.tools import ChatSuggestions
20
22
 
21
23
  logger = logging.getLogger(__name__)
22
24
 
23
25
  class FairoExecutor:
24
26
  def __init__(
25
27
  self,
26
- input_schema,
27
28
  agent_type: str = "Langchain",
28
29
  agents: List[Any] = [],
29
30
  verbose: bool = False,
@@ -33,11 +34,13 @@ class FairoExecutor:
33
34
  experiment_name: str = None,
34
35
  chain_class = SimpleSequentialChain,
35
36
  input_fields: List[str] = [],
37
+ input_schema: Optional[Type[BaseModel]] = None,
38
+ chat_suggestions: Optional[ChatSuggestions] = None
36
39
  ):
37
40
  if agents and runnable:
38
41
  raise ValueError("FairoExecutor cannot be initialized with both 'agents' and 'runnable'. Please provide only one.")
39
- if not input_fields:
40
- raise ValueError("Missing input_fields")
42
+ if not input_fields and not input_schema:
43
+ raise ValueError("Missing required parameters: please provide at least one of 'input_fields' or 'input_schema'")
41
44
  self.input_schema = input_schema
42
45
  self.agents = agents
43
46
  self.agent_type = agent_type
@@ -53,6 +56,7 @@ class FairoExecutor:
53
56
  password=get_fairo_api_secret(),
54
57
  username=get_fairo_api_key()
55
58
  )
59
+ self.chat_suggestions = chat_suggestions
56
60
  self.input_fields = input_fields
57
61
  # Inject shared attributes into agents
58
62
  for agent in self.agents:
@@ -117,8 +121,9 @@ class FairoExecutor:
117
121
  fairo_settings = {
118
122
  "type": type,
119
123
  "process_graph": process_graph,
120
- "schema": self.input_schema.model_json_schema(),
121
- "input_fields": list(self.input_schema.model_fields.keys()),
124
+ "schema": self.input_schema.model_json_schema() if self.input_schema else None,
125
+ "input_fields": list(self.input_schema.model_fields.keys()) if self.input_schema else self.input_fields,
126
+ "chat_suggestions": self.chat_suggestions.model_dump() if self.chat_suggestions else None,
122
127
  }
123
128
  if process_graph:
124
129
  mlflow.log_text(json.dumps(fairo_settings, ensure_ascii=False, indent=2), artifact_file="fairo_settings.txt")
@@ -0,0 +1,2 @@
1
+ from .plot import generate_plot
2
+ from .suggestion import send_chat_suggestions, ChatSuggestions, ChatSuggestion
@@ -0,0 +1,229 @@
1
+ from typing import Dict, List, Literal, Optional, Union
2
+ from pydantic import BaseModel, Field, ConfigDict
3
+ from enum import Enum
4
+ from langchain_core.tools import tool
5
+ import matplotlib
6
+ matplotlib.use("Agg") # headless
7
+ import matplotlib.pyplot as plt
8
+ import io
9
+ import base64
10
+
11
+ class ChartTypeEnum(str, Enum):
12
+ line = "line"
13
+ bar = "bar"
14
+ scatter = "scatter"
15
+ hist = "hist"
16
+ area = "area"
17
+
18
+ EXAMPLES = [
19
+ {
20
+ "chart_type": "line",
21
+ "data": {"month": ["Jan","Feb","Mar"], "revenue": [10, 12, 15]},
22
+ "x": "month",
23
+ "y": "revenue",
24
+ "title": "Revenue by Month"
25
+ },
26
+ {
27
+ "chart_type": "bar",
28
+ "data": {"cat": ["A","B"], "m1": [1,2], "m2":[2,1]},
29
+ "x": "cat",
30
+ "y": ["m1","m2"],
31
+ "title": "Grouped Bars"
32
+ }
33
+ ]
34
+
35
+ class PlotSpec(BaseModel):
36
+ """
37
+ Either provide a 'spec' for a chart OR provide 'python_code' to execute.
38
+
39
+ If you provide 'spec', set:
40
+ - chart_type: one of line|bar|scatter|hist|area
41
+ - data: list of dicts (table rows) OR {"x":[...], "y":[...]} arrays
42
+ - x: x key (for list-of-dicts) or leave None if arrays
43
+ - y: y key or list of keys for multi-series
44
+ - title, xlabel, ylabel: optional labels
45
+ - width, height in inches (optional)
46
+ If you provide 'python_code', it must create a 'fig' and 'ax' variable.
47
+ """
48
+ model_config = ConfigDict(extra="allow", json_schema_extra={"examples": EXAMPLES})
49
+
50
+ # Option A: high-level specification
51
+ chart_type: Optional[ChartTypeEnum] = Field(None, description="Chart type.")
52
+ data: Optional[Union[List[Dict], Dict[str, List]]] = Field(
53
+ None, description="Either list of dict rows OR dict of arrays."
54
+ )
55
+ x: Optional[Union[str, List[str]]] = Field(None, description="X column or key(s).")
56
+ y: Optional[Union[str, List[str]]] = Field(None, description="Y column or key(s).")
57
+ title: Optional[str] = None
58
+ xlabel: Optional[str] = None
59
+ ylabel: Optional[str] = None
60
+ width: Optional[float] = Field(6.0, description="Figure width in inches.")
61
+ height: Optional[float] = Field(4.0, description="Figure height in inches.")
62
+ legend: Optional[bool] = True
63
+ grid: Optional[bool] = True
64
+ alpha: Optional[float] = 1.0
65
+
66
+ # Option B: low-level custom code (SAFE-ish)
67
+ python_code: Optional[str] = Field(
68
+ None,
69
+ description=(
70
+ "Matplotlib code that defines 'fig, ax'. Avoid imports; "
71
+ "numpy is available as 'np' and matplotlib.pyplot as 'plt'."
72
+ ),
73
+ )
74
+
75
+
76
+ def _png_base64_from_fig(fig) -> str:
77
+ buf = io.BytesIO()
78
+ fig.savefig(buf, format="png", dpi=150, bbox_inches="tight")
79
+ buf.seek(0)
80
+ b64 = base64.b64encode(buf.read()).decode("utf-8")
81
+ buf.close()
82
+ return b64
83
+
84
+
85
+ def _plot_from_spec(spec: PlotSpec) -> str:
86
+ # Build figure
87
+ fig, ax = plt.subplots(figsize=(spec.width or 6.0, spec.height or 4.0))
88
+
89
+ # Normalize data
90
+ data = spec.data or {}
91
+ is_rows = isinstance(data, list)
92
+ is_arrays = isinstance(data, dict) and all(isinstance(v, list) for v in data.values())
93
+
94
+ def extract_series(xkey, ykey):
95
+ if is_rows:
96
+ xs = [row.get(xkey) for row in data]
97
+ ys = [row.get(ykey) for row in data]
98
+ elif is_arrays:
99
+ xs = data.get(xkey) if xkey else range(len(data.get(ykey, [])))
100
+ ys = data.get(ykey, [])
101
+ else:
102
+ raise ValueError("Unsupported data format. Provide list[dict] or dict of arrays.")
103
+ return xs, ys
104
+
105
+ # Handle y being single or multiple
106
+ y_keys = [spec.y] if isinstance(spec.y, (str, type(None))) else list(spec.y or [])
107
+ if not y_keys: # auto-choose 'y' if present
108
+ y_keys = ["y"] if (is_arrays and "y" in data) else []
109
+ if not y_keys:
110
+ raise ValueError("Could not infer 'y' series; please set y.")
111
+
112
+ # Plot by chart_type
113
+ if spec.chart_type == "line":
114
+ for ykey in y_keys:
115
+ xs, ys = extract_series(spec.x, ykey)
116
+ ax.plot(xs, ys, alpha=spec.alpha, label=ykey if len(y_keys) > 1 else None)
117
+ elif spec.chart_type == "bar":
118
+ # Simple grouped bar if multiple y
119
+ import numpy as np
120
+ xs, _ = extract_series(spec.x, y_keys[0])
121
+ x_index = np.arange(len(xs))
122
+ n = len(y_keys)
123
+ width = 0.8 / n
124
+ for i, ykey in enumerate(y_keys):
125
+ _, ys = extract_series(spec.x, ykey)
126
+ ax.bar(x_index + i * width, ys, width=width, alpha=spec.alpha, label=ykey)
127
+ ax.set_xticks(x_index + width * (n - 1) / 2)
128
+ ax.set_xticklabels(xs, rotation=0)
129
+ elif spec.chart_type == "scatter":
130
+ for ykey in y_keys:
131
+ xs, ys = extract_series(spec.x, ykey)
132
+ ax.scatter(xs, ys, alpha=spec.alpha, label=ykey if len(y_keys) > 1 else None)
133
+ elif spec.chart_type == "hist":
134
+ # Expect a single series in y
135
+ xs, ys = extract_series(spec.x, y_keys[0])
136
+ values = ys if ys else xs
137
+ ax.hist(values, alpha=spec.alpha)
138
+ elif spec.chart_type == "area":
139
+ for ykey in y_keys:
140
+ xs, ys = extract_series(spec.x, ykey)
141
+ ax.fill_between(xs, ys, alpha=spec.alpha, label=ykey if len(y_keys) > 1 else None)
142
+ else:
143
+ raise ValueError("Unsupported chart_type. Use line|bar|scatter|hist|area.")
144
+
145
+ # Labels & cosmetics
146
+ if spec.title:
147
+ ax.set_title(spec.title)
148
+ if spec.xlabel:
149
+ ax.set_xlabel(spec.xlabel)
150
+ if spec.ylabel:
151
+ ax.set_ylabel(spec.ylabel)
152
+ if spec.grid:
153
+ ax.grid(True, linestyle="--", linewidth=0.5, alpha=0.5)
154
+ if spec.legend and len(y_keys) > 1:
155
+ ax.legend()
156
+
157
+ # Return base64
158
+ b64 = _png_base64_from_fig(fig)
159
+ plt.close(fig)
160
+ return b64
161
+
162
+
163
+ def _plot_from_code(code: str) -> str:
164
+ """
165
+ Execute minimal matplotlib code that must define 'fig' and 'ax'.
166
+ VERY LIMITED namespace to reduce risk.
167
+ """
168
+ import numpy as np
169
+
170
+ allowed_globals = {
171
+ "__builtins__": {
172
+ "len": len, "range": range, "min": min, "max": max, "sum": sum, "abs": abs
173
+ },
174
+ "np": np,
175
+ "plt": plt,
176
+ }
177
+ local_vars = {}
178
+
179
+ # Simple guardrails
180
+ forbidden = ["import os", "import sys", "open(", "subprocess", "socket", "eval(", "exec("]
181
+ if any(tok in code for tok in forbidden):
182
+ raise ValueError("Disallowed token in python_code.")
183
+
184
+ exec(code, allowed_globals, local_vars) # noqa: S102 (intentional, guarded)
185
+ fig = local_vars.get("fig")
186
+ ax = local_vars.get("ax")
187
+ if fig is None or ax is None:
188
+ raise ValueError("Your python_code must create variables 'fig' and 'ax'.")
189
+ b64 = _png_base64_from_fig(fig)
190
+ plt.close(fig)
191
+ return b64
192
+
193
+
194
+ class PlotReturn(BaseModel):
195
+ mime_type: Literal["image/png"] = "image/png"
196
+ data_base64: str
197
+ alt_text: Optional[str] = None
198
+ debug: Optional[str] = None
199
+
200
+
201
+ @tool(args_schema=PlotSpec)
202
+ def generate_plot(**kwargs) -> str:
203
+ """
204
+ Generate a plot image (PNG base64). Returns a JSON string with keys:
205
+ - mime_type: 'image/png'
206
+ - data_base64: <base64 string>
207
+ - alt_text: optional
208
+ """
209
+ spec = PlotSpec(**kwargs)
210
+ try:
211
+ if spec.python_code:
212
+ b64 = _plot_from_code(spec.python_code)
213
+ alt = spec.title or "Custom matplotlib figure"
214
+ else:
215
+ b64 = _plot_from_spec(spec)
216
+ alt = spec.title or (
217
+ f"{spec.chart_type} plot" if spec.chart_type else "Plot"
218
+ )
219
+ result = PlotReturn(data_base64=b64, alt_text=alt)
220
+ # Return a stringified JSON so the LLM can pass it through easily
221
+ return f"""data:{result.mime_type};base64,{result.data_base64}"""
222
+ except Exception as e:
223
+ # Return an error payload the orchestrator can handle
224
+ err = PlotReturn(
225
+ data_base64="",
226
+ alt_text="Plot generation failed.",
227
+ debug=str(e),
228
+ )
229
+ return err.model_dump_json()
@@ -0,0 +1,43 @@
1
+ import json
2
+ from typing import List, Literal, Optional
3
+ from pydantic import BaseModel, Field
4
+ from langchain_core.tools import tool
5
+
6
+ class ChatSuggestion(BaseModel):
7
+ action: Optional[Literal["enable_chat"]] = Field(default=None, description="Custom actions available to be dispatched by the chat interface")
8
+ prompt: str = Field(..., description="Suggestion text to show in UI")
9
+ model_config = {
10
+ "extra": "ignore"
11
+ }
12
+
13
+ class ChatSuggestions(BaseModel):
14
+ chat_enabled: Optional[bool] = Field(default=True, description="This will let user only answer using the suggestions provided")
15
+ suggestions: List[ChatSuggestion] = Field(..., description="List of suggestions that will be available for the user")
16
+
17
+ @tool(args_schema=ChatSuggestions)
18
+ def send_chat_suggestions(chat_enabled: Optional[bool] = True,
19
+ suggestions: List[ChatSuggestion] = None):
20
+ """
21
+ This tool can be used to provide user predefined prompts and help during the user experience
22
+ Example input:
23
+ {
24
+ "suggestions": [
25
+ {
26
+ "action": null,
27
+ "prompt": "Suggestion 1 Prompt"
28
+ },
29
+ {
30
+ "action": null,
31
+ "prompt": "Suggestion 2 Prompt"
32
+ },
33
+ {
34
+ "action": "enable_chat",
35
+ "prompt": "Other"
36
+ }
37
+ ],
38
+ }
39
+ """
40
+ # No side-effects needed; return the same payload so it's accessible
41
+ if not suggestions:
42
+ return []
43
+ return {"chat_enabled": chat_enabled, "suggestions": [s.model_dump() for s in suggestions]}
@@ -0,0 +1,28 @@
1
+ def parse_chat_interface_output(agent_executor_result):
2
+ """
3
+ Parses agent executor result into chat interface response
4
+ return_intermediate_steps must be set as true on the AgentExecutor in order to properly parse plot and suggestions
5
+ """
6
+ messages = [{"role": "assistant", "content": [
7
+ {
8
+ "type": "text",
9
+ "text": agent_executor_result["output"]
10
+ }
11
+ ]}]
12
+ suggestions = []
13
+ intermediate_steps = agent_executor_result.get('intermediate_steps', [])
14
+ for step, output in intermediate_steps:
15
+ if step.tool == "generate_plot":
16
+ messages.append({"role": "assistant", "content": [
17
+ {
18
+ "type": "image",
19
+ "image": output
20
+ }
21
+ ]})
22
+ if step.tool == "send_chat_suggestions":
23
+ suggestions = output
24
+
25
+ return {
26
+ "messages": messages,
27
+ "suggestions": suggestions
28
+ }
@@ -1,155 +1,16 @@
1
1
  import os
2
2
  from typing import Any, Dict, List, Optional, Tuple
3
- from langchain_aws import BedrockEmbeddings
4
3
  from langchain_community.embeddings.mlflow import MlflowEmbeddings
5
4
  from langchain_core.documents import Document
6
- from langchain_postgres import PGVector
7
- from fairo.settings import get_mlflow_gateway_embeddings_route, get_mlflow_gateway_uri
5
+ from fairo.settings import get_mlflow_gateway_embeddings_route, get_mlflow_gateway_uri, get_fairo_api_key, get_fairo_api_secret, get_fairo_base_url
8
6
  from fairo.core.client.client import BaseClient
9
- AWS_AI_EMBEDDING_MODEL = 'cohere.embed-english-v3'
10
7
  import requests
11
- import uuid
8
+ AWS_AI_EMBEDDING_MODEL = 'cohere.embed-english-v3'
12
9
 
13
10
 
14
11
  class BaseVectorStore:
15
12
  pass
16
13
 
17
-
18
- class PostgresVectorStore(BaseVectorStore):
19
- """
20
- A PostgreSQL-based vector store using LangChain and pgvector
21
- """
22
-
23
- def __init__(
24
- self,
25
- collection_name: str,
26
- embedding_model_id: str = AWS_AI_EMBEDDING_MODEL,
27
- region_name: str = None,
28
- collection_metadata: dict = None,
29
- connection_string: str = "postgresql://postgres:postgres@localhost:5432/vectordb",
30
- pre_delete_collection: bool = False,
31
- default_k: int = 5
32
- ):
33
- """
34
- Args:
35
- collection_name: Name of the collection in PostgreSQL
36
- embedding_model_id: Bedrock embedding model ID
37
- region_name: AWS region for Bedrock
38
- collection_metadata: Dict for what metadata we want to add to collection
39
- connection_string: PostgreSQL connection string
40
- """
41
- self.collection_name = collection_name
42
- self.connection_string = connection_string
43
-
44
- # Set up embeddings
45
- self.embeddings = MlflowEmbeddings(
46
- target_uri=get_mlflow_gateway_uri(),
47
- endpoint=get_mlflow_gateway_embeddings_route(),
48
- )
49
-
50
- if collection_metadata is not None:
51
- self.collection_metadata = collection_metadata
52
-
53
- # Initialize the PGVector store
54
- self.db = PGVector(
55
- collection_name=collection_name,
56
- connection=connection_string,
57
- collection_metadata=self.collection_metadata,
58
- embeddings=self.embeddings,
59
- pre_delete_collection=pre_delete_collection
60
- )
61
-
62
- self.default_k = default_k
63
-
64
- def add_documents(self, documents: List[Document]) -> None:
65
- """
66
- Args:
67
- documents: List of Document objects to add
68
- """
69
- if not documents:
70
- return
71
-
72
- # Add documents to PGVector
73
- self.db.add_documents(documents)
74
-
75
- def add_texts(self, texts: List[str], metadatas: Optional[List[Dict[str, Any]]] = None) -> None:
76
- """
77
- Args:
78
- texts: List of text strings to add
79
- metadatas: Optional list of metadata dictionaries
80
- """
81
- if not texts:
82
- return
83
-
84
- # Convert to Document objects
85
- documents = []
86
- for i, text in enumerate(texts):
87
- metadata = metadatas[i] if metadatas and i < len(metadatas) else {}
88
- documents.append(Document(page_content=text, metadata=metadata))
89
-
90
- # Add to vector store
91
- self.add_documents(documents)
92
-
93
- @staticmethod
94
- def _format_query(query):
95
- # Temporary fix, need to consider model / do more than truncate
96
- return query[0:2048]
97
-
98
- def similarity_search(self, query: str, k: int = None) -> List[Document]:
99
- """
100
- Args:
101
- query: The search query
102
- k: Number of results to return
103
- """
104
- formatted_query = self._format_query(query)
105
- if k is None:
106
- k = self.default_k
107
- return self.db.similarity_search(formatted_query, k=k)
108
-
109
- def similarity_search_with_score(self, query: str, k: int = 4) -> List[tuple[Document, float]]:
110
- """
111
- Args:
112
- query: The search query
113
- k: Number of results to return
114
- """
115
- formatted_query = self._format_query(query)
116
- if k is None:
117
- k = self.default_k
118
- return self.db.similarity_search_with_score(formatted_query, k=k)
119
-
120
- def delete(self) -> None:
121
- """Delete the collection from PostgreSQL."""
122
- try:
123
- # Use the internal PGVector method to delete a collection
124
- self.db._client.delete_collection(self.collection_name)
125
- except Exception as e:
126
- print(f"Error deleting collection: {str(e)}")
127
-
128
- @classmethod
129
- def from_existing(cls,
130
- collection_name: str,
131
- embedding_model_id: str = AWS_AI_EMBEDDING_MODEL,
132
- region_name: str = None,
133
- connection_string: str = "postgresql://postgres:postgres@localhost:5432/vectordb"):
134
- """
135
- Load an existing collection from PostgreSQL.
136
-
137
- Args:
138
- collection_name: Name of the existing collection
139
- embedding_model_id: Bedrock embedding model ID
140
- region_name: AWS region for Bedrock
141
- connection_string: PostgreSQL connection string
142
-
143
- Returns:
144
- PostgresVectorStore instance connected to the existing collection
145
- """
146
- return cls(
147
- collection_name=collection_name,
148
- embedding_model_id=embedding_model_id,
149
- region_name=region_name,
150
- connection_string=connection_string
151
- )
152
-
153
14
  class FairoVectorStore(BaseVectorStore):
154
15
  """
155
16
  A vector store implementation using the Fairo API
@@ -182,14 +43,18 @@ class FairoVectorStore(BaseVectorStore):
182
43
  self.collection_name = collection_name
183
44
 
184
45
  # Get credentials from parameters or environment
185
- self.username = username or os.environ.get("FAIRO_API_ACCESS_KEY_ID")
186
- self.password = password or os.environ.get("FAIRO_API_SECRET")
187
- self.api_url = api_url or os.environ.get("FAIRO_BASE_URL", "https://api.fairo.ai")
46
+ self.username = username or get_fairo_api_key()
47
+ self.password = password or get_fairo_api_secret()
48
+ self.api_url = api_url or get_fairo_base_url()
188
49
  self.fairo_auth_token = os.environ.get("FAIRO_AUTH_TOKEN")
189
-
50
+ # Setup credentials
190
51
  if not self.fairo_auth_token and (not self.username or not self.password):
191
52
  raise ValueError("Fairo API credentials must be provided either as FAIRO_AUTH_TOKEN or as parameters or in the FAIRO_USERNAME and FAIRO_PASSWORD environment variables")
192
53
 
54
+ if(self.username and self.password):
55
+ os.environ["MLFLOW_TRACKING_USERNAME"] = self.username
56
+ os.environ["MLFLOW_TRACKING_PASSWORD"] = self.password
57
+
193
58
  # Initialize API client
194
59
  self.client = BaseClient(
195
60
  base_url=self.api_url.rstrip('/'),
@@ -429,7 +294,7 @@ class FairoVectorStore(BaseVectorStore):
429
294
  collection_name: str,
430
295
  username: str = None,
431
296
  password: str = None,
432
- api_url: str = "https://api.fairo.ai",
297
+ api_url: str = get_fairo_base_url(),
433
298
  embedding_model_id: str = AWS_AI_EMBEDDING_MODEL,
434
299
  region_name: str = None):
435
300
  """
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fairo
3
- Version: 25.8.2
3
+ Version: 25.9.2
4
4
  Summary: SDK for interfacing with Fairo SaaS platform.
5
5
  Author-email: "Fairo Systems, Inc." <support@fairo.ai>
6
6
  License: Apache-2.0
@@ -11,14 +11,12 @@ Classifier: Programming Language :: Python :: 3.10
11
11
  Classifier: License :: OSI Approved :: Apache Software License
12
12
  Classifier: Operating System :: OS Independent
13
13
  Description-Content-Type: text/markdown
14
- Requires-Dist: mlflow<=3.2.0,>=3.1.0
14
+ Requires-Dist: mlflow<=3.1.1,>=3.1.0
15
15
  Requires-Dist: langchain<0.4.0,>=0.3.20
16
16
  Requires-Dist: langchain-aws<0.3.0,>=0.2.18
17
17
  Requires-Dist: langchain-community<0.4.0,>=0.3.20
18
18
  Requires-Dist: langchain-core<0.4.0,>=0.3.49
19
19
  Requires-Dist: langchain-text-splitters<0.4.0,>=0.3.7
20
- Requires-Dist: psycopg2-binary<3.0.0,>=2.9.0
21
- Requires-Dist: langchain-postgres<0.1.0,>=0.0.14
22
20
  Requires-Dist: setuptools>=79.0.0
23
21
  Requires-Dist: pandas<3.0.0,>=2.0.0
24
22
 
@@ -9,6 +9,7 @@ fairo.egg-info/requires.txt
9
9
  fairo.egg-info/top_level.txt
10
10
  fairo/core/__init__.py
11
11
  fairo/core/exceptions.py
12
+ fairo/core/utils.py
12
13
  fairo/core/agent/__init__.py
13
14
  fairo/core/agent/base_agent.py
14
15
  fairo/core/agent/code_analysis_agent.py
@@ -34,6 +35,9 @@ fairo/core/models/custom_field_value.py
34
35
  fairo/core/models/resources.py
35
36
  fairo/core/runnable/__init__.py
36
37
  fairo/core/runnable/runnable.py
38
+ fairo/core/tools/__init__.py
39
+ fairo/core/tools/plot.py
40
+ fairo/core/tools/suggestion.py
37
41
  fairo/core/workflow/__init__.py
38
42
  fairo/core/workflow/base_workflow.py
39
43
  fairo/core/workflow/dependency.py
@@ -1,10 +1,8 @@
1
- mlflow<=3.2.0,>=3.1.0
1
+ mlflow<=3.1.1,>=3.1.0
2
2
  langchain<0.4.0,>=0.3.20
3
3
  langchain-aws<0.3.0,>=0.2.18
4
4
  langchain-community<0.4.0,>=0.3.20
5
5
  langchain-core<0.4.0,>=0.3.49
6
6
  langchain-text-splitters<0.4.0,>=0.3.7
7
- psycopg2-binary<3.0.0,>=2.9.0
8
- langchain-postgres<0.1.0,>=0.0.14
9
7
  setuptools>=79.0.0
10
8
  pandas<3.0.0,>=2.0.0
@@ -20,14 +20,12 @@ classifiers = [
20
20
  "Operating System :: OS Independent",
21
21
  ]
22
22
  dependencies = [
23
- "mlflow>=3.1.0,<=3.2.0",
23
+ "mlflow>=3.1.0,<=3.1.1",
24
24
  "langchain>=0.3.20,<0.4.0",
25
25
  "langchain-aws>=0.2.18,<0.3.0",
26
26
  "langchain-community>=0.3.20,<0.4.0",
27
27
  "langchain-core>=0.3.49,<0.4.0",
28
28
  "langchain-text-splitters>=0.3.7,<0.4.0",
29
- "psycopg2-binary>=2.9.0,<3.0.0",
30
- "langchain-postgres>=0.0.14,<0.1.0",
31
29
  "setuptools>=79.0.0",
32
30
  "pandas>=2.0.0,<3.0.0"
33
31
  ]
@@ -1 +0,0 @@
1
- __version__ = "25.8.2"
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes