thoughtflow 0.0.2__py3-none-any.whl → 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
thoughtflow/llm.py ADDED
@@ -0,0 +1,250 @@
1
+ """
2
+ LLM class for ThoughtFlow.
3
+
4
+ A unified interface for calling various language model services.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import json
10
+ import urllib.request
11
+ import urllib.error
12
+
13
+
14
+ class LLM:
15
+ """
16
+ The LLM class is designed to interface with various language model services.
17
+
18
+ Attributes:
19
+ service (str): The name of the service provider (e.g., 'openai', 'groq', 'anthropic').
20
+ model (str): The specific model to be used within the service.
21
+ api_key (str): The API key for authenticating requests.
22
+ api_secret (str): The API secret for additional authentication.
23
+ last_params (dict): Stores the parameters used in the last API call.
24
+
25
+ Methods:
26
+ __init__(model_id, key, secret):
27
+ Initializes the LLM instance with a model ID, API key, and secret.
28
+
29
+ call(msg_list, params):
30
+ Calls the appropriate API based on the service with the given message list and parameters.
31
+
32
+ _call_openai(msg_list, params):
33
+ Sends a request to the OpenAI API with the specified messages and parameters.
34
+
35
+ _call_groq(msg_list, params):
36
+ Sends a request to the Groq API with the specified messages and parameters.
37
+
38
+ _call_anthropic(msg_list, params):
39
+ Sends a request to the Anthropic API with the specified messages and parameters.
40
+
41
+ _send_request(url, data, headers):
42
+ Helper function to send HTTP requests to the specified URL with data and headers.
43
+ """
44
+ def __init__(self, model_id='', key='API_KEY', secret='API_SECRET'):
45
+ # Parse model ID and initialize service and model name
46
+ if ':' not in model_id: model_id = 'openai:gpt-4-turbo'
47
+
48
+ splitted = model_id.split(':')
49
+ self.service = splitted[0]
50
+ self.model = ''.join(splitted[1:])
51
+ self.api_key = key
52
+ self.api_secret = secret
53
+ self.last_params = {}
54
+ # Make the object directly callable
55
+ self.__call__ = self.call
56
+
57
+ def _normalize_messages(self, msg_list):
58
+ """
59
+ Accepts either:
60
+ - list[str] -> converts to [{'role':'user','content': str}, ...]
61
+ - list[dict] with 'role' and 'content' -> passes through unchanged
62
+ - list[dict] with only 'content' -> assumes role='user'
63
+ Returns: list[{'role': str, 'content': str or list[...]}]
64
+ """
65
+ norm = []
66
+ for m in msg_list:
67
+ if isinstance(m, dict):
68
+ role = m.get("role", "user")
69
+ content = m.get("content", "")
70
+ norm.append({"role": role, "content": content})
71
+ else:
72
+ # treat as plain user text
73
+ norm.append({"role": "user", "content": str(m)})
74
+ return norm
75
+
76
+ def call(self, msg_list, params={}):
77
+ self.last_params = dict(params)
78
+ # General function to call the appropriate API with msg_list and optional parameters
79
+ if self.service == 'openai':
80
+ return self._call_openai(msg_list, params)
81
+ elif self.service == 'groq':
82
+ return self._call_groq(msg_list, params)
83
+ elif self.service == 'anthropic':
84
+ return self._call_anthropic(msg_list, params)
85
+ elif self.service == 'ollama':
86
+ return self._call_ollama(msg_list, params)
87
+ elif self.service == 'gemini':
88
+ return self._call_gemini(msg_list, params)
89
+ elif self.service == 'openrouter':
90
+ return self._call_openrouter(msg_list, params)
91
+ else:
92
+ raise ValueError("Unsupported service '{}'.".format(self.service))
93
+
94
+ def _call_openai(self, msg_list, params):
95
+ url = "https://api.openai.com/v1/chat/completions"
96
+ data = json.dumps({
97
+ "model": self.model,
98
+ "messages": self._normalize_messages(msg_list),
99
+ **params
100
+ }).encode("utf-8")
101
+ headers = {
102
+ "Authorization": "Bearer " + self.api_key,
103
+ "Content-Type": "application/json",
104
+ }
105
+ res = self._send_request(url, data, headers)
106
+ choices = [a["message"]["content"] for a in res.get("choices", [])]
107
+ return choices
108
+
109
+ def _call_groq(self, msg_list, params):
110
+ url = "https://api.groq.com/openai/v1/chat/completions"
111
+ data = json.dumps({
112
+ "model": self.model,
113
+ "messages": self._normalize_messages(msg_list),
114
+ **params
115
+ }).encode("utf-8")
116
+ headers = {
117
+ "Authorization": "Bearer " + self.api_key,
118
+ "Content-Type": "application/json",
119
+ "User-Agent": "Groq/Python 0.9.0",
120
+ }
121
+ res = self._send_request(url, data, headers)
122
+ choices = [a["message"]["content"] for a in res.get("choices", [])]
123
+ return choices
124
+
125
+ def _call_anthropic(self, msg_list, params):
126
+ url = "https://api.anthropic.com/v1/messages"
127
+ data = json.dumps({
128
+ "model": self.model,
129
+ "max_tokens": params.get("max_tokens", 1024),
130
+ "messages": self._normalize_messages(msg_list),
131
+ }).encode("utf-8")
132
+ headers = {
133
+ "x-api-key": self.api_key,
134
+ "anthropic-version": "2023-06-01",
135
+ "Content-Type": "application/json",
136
+ }
137
+ res = self._send_request(url, data, headers)
138
+ # Anthropic returns {"content":[{"type":"text","text":"..."}], ...}
139
+ choices = [c.get("text", "") for c in res.get("content", [])]
140
+ return choices
141
+
142
+ def _call_gemini(self, msg_list, params):
143
+ """
144
+ Calls Google Gemini/SVertexAI chat-supported models via REST API.
145
+ Requires self.api_key to be set.
146
+ """
147
+ url = "https://generativelanguage.googleapis.com/v1beta/models/{}:generateContent?key={}".format(self.model, self.api_key)
148
+ # Gemini expects a list of "contents" alternating user/assistant
149
+ # We collapse the messages into a sequence of dicts as required by Gemini
150
+ # Gemini wants [{"role": "user/assistant", "parts": [{"text": ...}]}]
151
+ gemini_msgs = []
152
+ for m in self._normalize_messages(msg_list):
153
+ # Google's role scheme: "user" or "model"
154
+ g_role = {"user": "user", "assistant": "model", "system": "user"}.get(m["role"], "user")
155
+ gemini_msgs.append({
156
+ "role": g_role,
157
+ "parts": [{"text": str(m["content"])}] if isinstance(m["content"], str) else m["content"]
158
+ })
159
+ payload = {
160
+ "contents": gemini_msgs,
161
+ **{k: v for k, v in params.items() if k != "model"}
162
+ }
163
+ data = json.dumps(payload).encode("utf-8")
164
+ headers = {
165
+ "Content-Type": "application/json",
166
+ }
167
+ res = self._send_request(url, data, headers)
168
+ # Gemini returns { "candidates": [ { "content": { "parts": [ { "text": ... } ] } } ] }
169
+ choices = []
170
+ for cand in res.get("candidates", []):
171
+ parts = cand.get("content", {}).get("parts", [])
172
+ text = "".join([p.get("text", "") for p in parts])
173
+ choices.append(text)
174
+ return choices
175
+
176
+ def _call_openrouter(self, msg_list, params):
177
+ """
178
+ Calls an LLM via the OpenRouter API. Requires self.api_key.
179
+ API docs: https://openrouter.ai/docs
180
+ Model list: https://openrouter.ai/docs#models
181
+ """
182
+ url = "https://openrouter.ai/api/v1/chat/completions"
183
+ data = json.dumps({
184
+ "model": self.model,
185
+ "messages": self._normalize_messages(msg_list),
186
+ **params
187
+ }).encode("utf-8")
188
+ headers = {
189
+ "Authorization": "Bearer " + self.api_key,
190
+ "Content-Type": "application/json",
191
+ "HTTP-Referer": params.get("referer", "https://your-app.com"),
192
+ "X-Title": params.get("title", "Thoughtflow"),
193
+ }
194
+ res = self._send_request(url, data, headers)
195
+ choices = [a["message"]["content"] for a in res.get("choices", [])]
196
+ return choices
197
+
198
+ def _call_ollama(self, msg_list, params):
199
+ """
200
+ Calls a local model served via Ollama (http://localhost:11434 by default).
201
+ Expects no authentication. Ollama messages format is like OpenAI's.
202
+ """
203
+ base_url = params.get("ollama_url", "http://localhost:11434")
204
+ url = base_url.rstrip('/') + "/api/chat"
205
+ payload = {
206
+ "model": self.model,
207
+ "messages": self._normalize_messages(msg_list),
208
+ "stream": False, # Disable streaming to get a single JSON response
209
+ **{k: v for k, v in params.items() if k not in ("ollama_url", "model")}
210
+ }
211
+ data = json.dumps(payload).encode("utf-8")
212
+ headers = {
213
+ "Content-Type": "application/json",
214
+ }
215
+ res = self._send_request(url, data, headers)
216
+ # Ollama returns {"message": {...}, ...} or {"choices": [{...}]}
217
+ # Prefer OpenAI-style extraction if available, else fallback
218
+ if "choices" in res:
219
+ choices = [a["message"]["content"] for a in res.get("choices", [])]
220
+ elif "message" in res:
221
+ # single result
222
+ msg = res["message"]
223
+ choices = [msg.get("content", "")]
224
+ elif "response" in res:
225
+ # streaming/fallback
226
+ choices = [res["response"]]
227
+ else:
228
+ choices = []
229
+ return choices
230
+
231
+ def _send_request(self, url, data, headers):
232
+ # Sends the actual HTTP request and handles the response
233
+ try:
234
+ req = urllib.request.Request(url, data=data, headers=headers)
235
+ with urllib.request.urlopen(req) as response:
236
+ response_data = response.read().decode("utf-8")
237
+ # Attempt to parse JSON response; handle plain-text responses
238
+ try:
239
+ return json.loads(response_data) # Parse JSON response
240
+ except json.JSONDecodeError:
241
+ # If response is not JSON, return it as-is in a structured format
242
+ return {"error": "Non-JSON response", "response_data": response_data}
243
+
244
+ except urllib.error.HTTPError as e:
245
+ # Return the error details in case of an HTTP error
246
+ error_msg = e.read().decode("utf-8")
247
+ print("HTTP Error:", error_msg) # Log HTTP error for debugging
248
+ return {"error": json.loads(error_msg) if error_msg else "Unknown HTTP error"}
249
+ except Exception as e:
250
+ return {"error": str(e)}
@@ -1,27 +1,32 @@
1
1
  """
2
- Memory hooks for ThoughtFlow.
2
+ Memory module for ThoughtFlow.
3
3
 
4
- Memory integration is handled as a service boundary, not a magical built-in.
5
- Memory is optional, pluggable, explicit at call-time, and recordable in traces.
4
+ The MEMORY class is the event-sourced state container for managing events,
5
+ logs, messages, reflections, and variables in ThoughtFlow workflows.
6
6
 
7
7
  Example:
8
- >>> from thoughtflow.memory import MemoryHook
8
+ >>> from thoughtflow.memory import MEMORY
9
9
  >>>
10
- >>> class VectorMemory(MemoryHook):
11
- ... def retrieve(self, query, k=5):
12
- ... # Retrieve relevant memories
13
- ... return memories
14
- ...
15
- ... def store(self, content, metadata=None):
16
- ... # Store new memory
17
- ... pass
10
+ >>> memory = MEMORY()
11
+ >>> memory.add_msg('user', 'Hello!', channel='webapp')
12
+ >>> memory.add_msg('assistant', 'Hi there!', channel='webapp')
13
+ >>> memory.set_var('session_id', 'abc123', desc='Current session')
14
+ >>>
15
+ >>> # Get messages
16
+ >>> memory.get_msgs(include=['user'])
17
+ >>>
18
+ >>> # Prepare context for LLM
19
+ >>> context = memory.prepare_context(format='openai')
20
+ >>>
21
+ >>> # Save/load state
22
+ >>> memory.save('memory.pkl')
23
+ >>> memory.to_json('memory.json')
18
24
  """
19
25
 
20
26
  from __future__ import annotations
21
27
 
22
- from thoughtflow.memory.base import MemoryHook, MemoryEvent
28
+ from thoughtflow.memory.base import MEMORY
23
29
 
24
30
  __all__ = [
25
- "MemoryHook",
26
- "MemoryEvent",
31
+ "MEMORY",
27
32
  ]