thoughtflow 0.0.1__py3-none-any.whl → 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,137 @@
1
+ """
2
+ Record/replay functionality for ThoughtFlow.
3
+
4
+ Replay enables deterministic testing by recording agent runs and
5
+ replaying them with mocked responses.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import json
11
+ from dataclasses import dataclass, field
12
+ from pathlib import Path
13
+ from typing import TYPE_CHECKING, Any
14
+
15
+ if TYPE_CHECKING:
16
+ from thoughtflow.agent import Agent
17
+ from thoughtflow.trace.session import Session
18
+
19
+
20
+ @dataclass
21
+ class ReplayResult:
22
+ """Result of a replay run.
23
+
24
+ Attributes:
25
+ success: Whether the replay succeeded.
26
+ original_response: The recorded response.
27
+ replayed_response: The response from the replay.
28
+ differences: List of differences found.
29
+ metadata: Additional result metadata.
30
+ """
31
+
32
+ success: bool
33
+ original_response: str | None = None
34
+ replayed_response: str | None = None
35
+ differences: list[str] = field(default_factory=list)
36
+ metadata: dict[str, Any] = field(default_factory=dict)
37
+
38
+
39
+ class Replay:
40
+ """Replay recorded sessions for testing.
41
+
42
+ Replay allows you to:
43
+ - Record agent runs to files
44
+ - Replay them with mocked model responses
45
+ - Compare outputs for regression testing
46
+ - Test without hitting live APIs
47
+
48
+ Example:
49
+ >>> # Save a session for replay
50
+ >>> session = Session()
51
+ >>> response = agent.call(messages, session=session)
52
+ >>> Replay.save(session, "test_case.json")
53
+ >>>
54
+ >>> # Later: replay the session
55
+ >>> replay = Replay.load("test_case.json")
56
+ >>> result = replay.run(agent)
57
+ >>>
58
+ >>> assert result.success
59
+ >>> assert result.replayed_response == result.original_response
60
+ """
61
+
62
+ def __init__(self, session_data: dict[str, Any]) -> None:
63
+ """Initialize a Replay from session data.
64
+
65
+ Args:
66
+ session_data: Recorded session data.
67
+ """
68
+ self.session_data = session_data
69
+ self._inputs = self._extract_inputs()
70
+ self._expected_outputs = self._extract_outputs()
71
+
72
+ def _extract_inputs(self) -> list[dict[str, Any]]:
73
+ """Extract input messages from session data.
74
+
75
+ Returns:
76
+ List of input message dicts.
77
+ """
78
+ inputs = []
79
+ for event in self.session_data.get("events", []):
80
+ if event.get("event_type") == "call_start":
81
+ inputs.append(event.get("data", {}).get("messages", []))
82
+ return inputs
83
+
84
+ def _extract_outputs(self) -> list[str]:
85
+ """Extract expected outputs from session data.
86
+
87
+ Returns:
88
+ List of expected response strings.
89
+ """
90
+ outputs = []
91
+ for event in self.session_data.get("events", []):
92
+ if event.get("event_type") == "call_end":
93
+ outputs.append(event.get("data", {}).get("response", ""))
94
+ return outputs
95
+
96
+ def run(self, agent: Agent) -> ReplayResult:
97
+ """Run the replay against an agent.
98
+
99
+ Args:
100
+ agent: The agent to test.
101
+
102
+ Returns:
103
+ ReplayResult with comparison data.
104
+
105
+ Raises:
106
+ NotImplementedError: This is a placeholder implementation.
107
+ """
108
+ # TODO: Implement replay with mocked adapter responses
109
+ raise NotImplementedError(
110
+ "Replay.run() is not yet implemented. "
111
+ "This is a placeholder for the ThoughtFlow alpha release."
112
+ )
113
+
114
+ @classmethod
115
+ def load(cls, path: str | Path) -> Replay:
116
+ """Load a replay from a JSON file.
117
+
118
+ Args:
119
+ path: Path to the replay file.
120
+
121
+ Returns:
122
+ Replay instance.
123
+ """
124
+ path = Path(path)
125
+ data = json.loads(path.read_text())
126
+ return cls(data)
127
+
128
+ @staticmethod
129
+ def save(session: Session, path: str | Path) -> None:
130
+ """Save a session for replay.
131
+
132
+ Args:
133
+ session: The session to save.
134
+ path: Path to save to.
135
+ """
136
+ path = Path(path)
137
+ path.write_text(json.dumps(session.to_dict(), indent=2))
thoughtflow/llm.py ADDED
@@ -0,0 +1,250 @@
1
+ """
2
+ LLM class for ThoughtFlow.
3
+
4
+ A unified interface for calling various language model services.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import json
10
+ import urllib.request
11
+ import urllib.error
12
+
13
+
14
+ class LLM:
15
+ """
16
+ The LLM class is designed to interface with various language model services.
17
+
18
+ Attributes:
19
+ service (str): The name of the service provider (e.g., 'openai', 'groq', 'anthropic').
20
+ model (str): The specific model to be used within the service.
21
+ api_key (str): The API key for authenticating requests.
22
+ api_secret (str): The API secret for additional authentication.
23
+ last_params (dict): Stores the parameters used in the last API call.
24
+
25
+ Methods:
26
+ __init__(model_id, key, secret):
27
+ Initializes the LLM instance with a model ID, API key, and secret.
28
+
29
+ call(msg_list, params):
30
+ Calls the appropriate API based on the service with the given message list and parameters.
31
+
32
+ _call_openai(msg_list, params):
33
+ Sends a request to the OpenAI API with the specified messages and parameters.
34
+
35
+ _call_groq(msg_list, params):
36
+ Sends a request to the Groq API with the specified messages and parameters.
37
+
38
+ _call_anthropic(msg_list, params):
39
+ Sends a request to the Anthropic API with the specified messages and parameters.
40
+
41
+ _send_request(url, data, headers):
42
+ Helper function to send HTTP requests to the specified URL with data and headers.
43
+ """
44
+ def __init__(self, model_id='', key='API_KEY', secret='API_SECRET'):
45
+ # Parse model ID and initialize service and model name
46
+ if ':' not in model_id: model_id = 'openai:gpt-4-turbo'
47
+
48
+ splitted = model_id.split(':')
49
+ self.service = splitted[0]
50
+ self.model = ''.join(splitted[1:])
51
+ self.api_key = key
52
+ self.api_secret = secret
53
+ self.last_params = {}
54
+ # Make the object directly callable
55
+ self.__call__ = self.call
56
+
57
+ def _normalize_messages(self, msg_list):
58
+ """
59
+ Accepts either:
60
+ - list[str] -> converts to [{'role':'user','content': str}, ...]
61
+ - list[dict] with 'role' and 'content' -> passes through unchanged
62
+ - list[dict] with only 'content' -> assumes role='user'
63
+ Returns: list[{'role': str, 'content': str or list[...]}]
64
+ """
65
+ norm = []
66
+ for m in msg_list:
67
+ if isinstance(m, dict):
68
+ role = m.get("role", "user")
69
+ content = m.get("content", "")
70
+ norm.append({"role": role, "content": content})
71
+ else:
72
+ # treat as plain user text
73
+ norm.append({"role": "user", "content": str(m)})
74
+ return norm
75
+
76
+ def call(self, msg_list, params={}):
77
+ self.last_params = dict(params)
78
+ # General function to call the appropriate API with msg_list and optional parameters
79
+ if self.service == 'openai':
80
+ return self._call_openai(msg_list, params)
81
+ elif self.service == 'groq':
82
+ return self._call_groq(msg_list, params)
83
+ elif self.service == 'anthropic':
84
+ return self._call_anthropic(msg_list, params)
85
+ elif self.service == 'ollama':
86
+ return self._call_ollama(msg_list, params)
87
+ elif self.service == 'gemini':
88
+ return self._call_gemini(msg_list, params)
89
+ elif self.service == 'openrouter':
90
+ return self._call_openrouter(msg_list, params)
91
+ else:
92
+ raise ValueError("Unsupported service '{}'.".format(self.service))
93
+
94
+ def _call_openai(self, msg_list, params):
95
+ url = "https://api.openai.com/v1/chat/completions"
96
+ data = json.dumps({
97
+ "model": self.model,
98
+ "messages": self._normalize_messages(msg_list),
99
+ **params
100
+ }).encode("utf-8")
101
+ headers = {
102
+ "Authorization": "Bearer " + self.api_key,
103
+ "Content-Type": "application/json",
104
+ }
105
+ res = self._send_request(url, data, headers)
106
+ choices = [a["message"]["content"] for a in res.get("choices", [])]
107
+ return choices
108
+
109
+ def _call_groq(self, msg_list, params):
110
+ url = "https://api.groq.com/openai/v1/chat/completions"
111
+ data = json.dumps({
112
+ "model": self.model,
113
+ "messages": self._normalize_messages(msg_list),
114
+ **params
115
+ }).encode("utf-8")
116
+ headers = {
117
+ "Authorization": "Bearer " + self.api_key,
118
+ "Content-Type": "application/json",
119
+ "User-Agent": "Groq/Python 0.9.0",
120
+ }
121
+ res = self._send_request(url, data, headers)
122
+ choices = [a["message"]["content"] for a in res.get("choices", [])]
123
+ return choices
124
+
125
+ def _call_anthropic(self, msg_list, params):
126
+ url = "https://api.anthropic.com/v1/messages"
127
+ data = json.dumps({
128
+ "model": self.model,
129
+ "max_tokens": params.get("max_tokens", 1024),
130
+ "messages": self._normalize_messages(msg_list),
131
+ }).encode("utf-8")
132
+ headers = {
133
+ "x-api-key": self.api_key,
134
+ "anthropic-version": "2023-06-01",
135
+ "Content-Type": "application/json",
136
+ }
137
+ res = self._send_request(url, data, headers)
138
+ # Anthropic returns {"content":[{"type":"text","text":"..."}], ...}
139
+ choices = [c.get("text", "") for c in res.get("content", [])]
140
+ return choices
141
+
142
+ def _call_gemini(self, msg_list, params):
143
+ """
144
+ Calls Google Gemini/SVertexAI chat-supported models via REST API.
145
+ Requires self.api_key to be set.
146
+ """
147
+ url = "https://generativelanguage.googleapis.com/v1beta/models/{}:generateContent?key={}".format(self.model, self.api_key)
148
+ # Gemini expects a list of "contents" alternating user/assistant
149
+ # We collapse the messages into a sequence of dicts as required by Gemini
150
+ # Gemini wants [{"role": "user/assistant", "parts": [{"text": ...}]}]
151
+ gemini_msgs = []
152
+ for m in self._normalize_messages(msg_list):
153
+ # Google's role scheme: "user" or "model"
154
+ g_role = {"user": "user", "assistant": "model", "system": "user"}.get(m["role"], "user")
155
+ gemini_msgs.append({
156
+ "role": g_role,
157
+ "parts": [{"text": str(m["content"])}] if isinstance(m["content"], str) else m["content"]
158
+ })
159
+ payload = {
160
+ "contents": gemini_msgs,
161
+ **{k: v for k, v in params.items() if k != "model"}
162
+ }
163
+ data = json.dumps(payload).encode("utf-8")
164
+ headers = {
165
+ "Content-Type": "application/json",
166
+ }
167
+ res = self._send_request(url, data, headers)
168
+ # Gemini returns { "candidates": [ { "content": { "parts": [ { "text": ... } ] } } ] }
169
+ choices = []
170
+ for cand in res.get("candidates", []):
171
+ parts = cand.get("content", {}).get("parts", [])
172
+ text = "".join([p.get("text", "") for p in parts])
173
+ choices.append(text)
174
+ return choices
175
+
176
+ def _call_openrouter(self, msg_list, params):
177
+ """
178
+ Calls an LLM via the OpenRouter API. Requires self.api_key.
179
+ API docs: https://openrouter.ai/docs
180
+ Model list: https://openrouter.ai/docs#models
181
+ """
182
+ url = "https://openrouter.ai/api/v1/chat/completions"
183
+ data = json.dumps({
184
+ "model": self.model,
185
+ "messages": self._normalize_messages(msg_list),
186
+ **params
187
+ }).encode("utf-8")
188
+ headers = {
189
+ "Authorization": "Bearer " + self.api_key,
190
+ "Content-Type": "application/json",
191
+ "HTTP-Referer": params.get("referer", "https://your-app.com"),
192
+ "X-Title": params.get("title", "Thoughtflow"),
193
+ }
194
+ res = self._send_request(url, data, headers)
195
+ choices = [a["message"]["content"] for a in res.get("choices", [])]
196
+ return choices
197
+
198
+ def _call_ollama(self, msg_list, params):
199
+ """
200
+ Calls a local model served via Ollama (http://localhost:11434 by default).
201
+ Expects no authentication. Ollama messages format is like OpenAI's.
202
+ """
203
+ base_url = params.get("ollama_url", "http://localhost:11434")
204
+ url = base_url.rstrip('/') + "/api/chat"
205
+ payload = {
206
+ "model": self.model,
207
+ "messages": self._normalize_messages(msg_list),
208
+ "stream": False, # Disable streaming to get a single JSON response
209
+ **{k: v for k, v in params.items() if k not in ("ollama_url", "model")}
210
+ }
211
+ data = json.dumps(payload).encode("utf-8")
212
+ headers = {
213
+ "Content-Type": "application/json",
214
+ }
215
+ res = self._send_request(url, data, headers)
216
+ # Ollama returns {"message": {...}, ...} or {"choices": [{...}]}
217
+ # Prefer OpenAI-style extraction if available, else fallback
218
+ if "choices" in res:
219
+ choices = [a["message"]["content"] for a in res.get("choices", [])]
220
+ elif "message" in res:
221
+ # single result
222
+ msg = res["message"]
223
+ choices = [msg.get("content", "")]
224
+ elif "response" in res:
225
+ # streaming/fallback
226
+ choices = [res["response"]]
227
+ else:
228
+ choices = []
229
+ return choices
230
+
231
+ def _send_request(self, url, data, headers):
232
+ # Sends the actual HTTP request and handles the response
233
+ try:
234
+ req = urllib.request.Request(url, data=data, headers=headers)
235
+ with urllib.request.urlopen(req) as response:
236
+ response_data = response.read().decode("utf-8")
237
+ # Attempt to parse JSON response; handle plain-text responses
238
+ try:
239
+ return json.loads(response_data) # Parse JSON response
240
+ except json.JSONDecodeError:
241
+ # If response is not JSON, return it as-is in a structured format
242
+ return {"error": "Non-JSON response", "response_data": response_data}
243
+
244
+ except urllib.error.HTTPError as e:
245
+ # Return the error details in case of an HTTP error
246
+ error_msg = e.read().decode("utf-8")
247
+ print("HTTP Error:", error_msg) # Log HTTP error for debugging
248
+ return {"error": json.loads(error_msg) if error_msg else "Unknown HTTP error"}
249
+ except Exception as e:
250
+ return {"error": str(e)}
@@ -0,0 +1,32 @@
1
+ """
2
+ Memory module for ThoughtFlow.
3
+
4
+ The MEMORY class is the event-sourced state container for managing events,
5
+ logs, messages, reflections, and variables in ThoughtFlow workflows.
6
+
7
+ Example:
8
+ >>> from thoughtflow.memory import MEMORY
9
+ >>>
10
+ >>> memory = MEMORY()
11
+ >>> memory.add_msg('user', 'Hello!', channel='webapp')
12
+ >>> memory.add_msg('assistant', 'Hi there!', channel='webapp')
13
+ >>> memory.set_var('session_id', 'abc123', desc='Current session')
14
+ >>>
15
+ >>> # Get messages
16
+ >>> memory.get_msgs(include=['user'])
17
+ >>>
18
+ >>> # Prepare context for LLM
19
+ >>> context = memory.prepare_context(format='openai')
20
+ >>>
21
+ >>> # Save/load state
22
+ >>> memory.save('memory.pkl')
23
+ >>> memory.to_json('memory.json')
24
+ """
25
+
26
+ from __future__ import annotations
27
+
28
+ from thoughtflow.memory.base import MEMORY
29
+
30
+ __all__ = [
31
+ "MEMORY",
32
+ ]