thoughtflow 0.0.2__py3-none-any.whl → 0.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- thoughtflow/__init__.py +57 -14
- thoughtflow/_util.py +713 -69
- thoughtflow/action.py +357 -0
- thoughtflow/agent.py +49 -130
- thoughtflow/llm.py +250 -0
- thoughtflow/memory/__init__.py +20 -15
- thoughtflow/memory/base.py +1615 -99
- thoughtflow/thought.py +1102 -0
- thoughtflow/thoughtflow6.py +4180 -0
- {thoughtflow-0.0.2.dist-info → thoughtflow-0.0.3.dist-info}/METADATA +1 -1
- thoughtflow-0.0.3.dist-info/RECORD +25 -0
- thoughtflow/adapters/__init__.py +0 -43
- thoughtflow/adapters/anthropic.py +0 -119
- thoughtflow/adapters/base.py +0 -140
- thoughtflow/adapters/local.py +0 -133
- thoughtflow/adapters/openai.py +0 -118
- thoughtflow-0.0.2.dist-info/RECORD +0 -26
- {thoughtflow-0.0.2.dist-info → thoughtflow-0.0.3.dist-info}/WHEEL +0 -0
- {thoughtflow-0.0.2.dist-info → thoughtflow-0.0.3.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: thoughtflow
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.3
|
|
4
4
|
Summary: A minimal, explicit, Pythonic substrate for building reproducible, portable, testable LLM and agent systems.
|
|
5
5
|
Project-URL: Homepage, https://github.com/jrolf/thoughtflow
|
|
6
6
|
Project-URL: Documentation, https://thoughtflow.dev
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
thoughtflow/__init__.py,sha256=oeMPn51oJCvIB1m4hOj7yemq_2Ng9sTAlQ3zb0nNXnU,2541
|
|
2
|
+
thoughtflow/_util.py,sha256=c0pMAdFXCGUFf9yhK3xwnqAD3SrlijD0eJVh0EekjMA,25260
|
|
3
|
+
thoughtflow/action.py,sha256=ZCKC95luZwZPEUkHIf8V0ZYxRXZ_msFLtb85Zs_cETg,14246
|
|
4
|
+
thoughtflow/agent.py,sha256=nSwUwUjjkcE2_dF81waSqRFmQ8xW3nQgwzX1VlEOJMo,1977
|
|
5
|
+
thoughtflow/llm.py,sha256=_aZiTHzlavabeKNgdYd-uM45_R10JvvgEgSavNx_k0M,10528
|
|
6
|
+
thoughtflow/message.py,sha256=5hR2Zs3tDRYLtYuuArqmJ4CgBDTUhjHj_RQhCN7tRrg,3800
|
|
7
|
+
thoughtflow/py.typed,sha256=Y3QpSe9qtWcL1vKplcWdRY0cn5071TLJQWBO0QbxTm8,84
|
|
8
|
+
thoughtflow/thought.py,sha256=i5Ca0IxaVGE688AhCpYFlk2vnJzotWZTX2IsW3cuLLU,43229
|
|
9
|
+
thoughtflow/thoughtflow6.py,sha256=GMKlqxFyNvvMD7yIU7NEYAtuDZplapeFeK1pGf20u5o,162824
|
|
10
|
+
thoughtflow/eval/__init__.py,sha256=WiXk5IarMdgQV-mCpWVq_ZwCq6iC6pAKwl9wbZsmmNA,866
|
|
11
|
+
thoughtflow/eval/harness.py,sha256=DH8EGajfxIGsb9HLk7g-hnQM0t1C90VAk7bOPLK9OBQ,5505
|
|
12
|
+
thoughtflow/eval/replay.py,sha256=-osU4BbjVdThLeM1bygCss7sqqHHvg9I43XEvVZkNd8,4011
|
|
13
|
+
thoughtflow/memory/__init__.py,sha256=3DAxZ4NlvJbvyP-8p86WlwoU1BUF24V97a4Z0c0ZC0w,859
|
|
14
|
+
thoughtflow/memory/base.py,sha256=TQn0JJd_t_k4FW73MWEbYNudwe0GEaaDqRyZIe9irvo,64870
|
|
15
|
+
thoughtflow/tools/__init__.py,sha256=1bDivRtNS2rGDForiLMOGKjStBeZAWFH8wylAN3Ihjk,648
|
|
16
|
+
thoughtflow/tools/base.py,sha256=VYRFDhzG912HFUR6W82kykm_FU-r4xg4aEzHBDN_b8M,4069
|
|
17
|
+
thoughtflow/tools/registry.py,sha256=ERKStxvh_UkfLg7GebVFaifSuudFYAfYXbnxr7lC5o0,3335
|
|
18
|
+
thoughtflow/trace/__init__.py,sha256=fQSpJJyYdtaI_L_QAD4NqvYA6yXS5xCJXxS5-rVUpoM,891
|
|
19
|
+
thoughtflow/trace/events.py,sha256=jJtVzs2xSLb43CAW6_CqgBVGgM0gBJ1x-_xmQjwUGkw,4647
|
|
20
|
+
thoughtflow/trace/schema.py,sha256=teyUJ3gV80ZgsU8oCYJmku_f-SDULKtvqqLjvp1eQ_E,3180
|
|
21
|
+
thoughtflow/trace/session.py,sha256=ZYKMGe3t98A7LujG-nSuRQo83BvzqhZGKFXHZZU0amw,4225
|
|
22
|
+
thoughtflow-0.0.3.dist-info/METADATA,sha256=4QewTCLBg1f8bGIsJiqUf6HPrZCMlk27t80eLiFGmkc,7548
|
|
23
|
+
thoughtflow-0.0.3.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
24
|
+
thoughtflow-0.0.3.dist-info/licenses/LICENSE,sha256=Z__Z0xyty_n2lxI7UNvfqkgemXIP0_UliF5sZN8GsPw,1073
|
|
25
|
+
thoughtflow-0.0.3.dist-info/RECORD,,
|
thoughtflow/adapters/__init__.py
DELETED
|
@@ -1,43 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Provider adapters for ThoughtFlow.
|
|
3
|
-
|
|
4
|
-
Adapters translate between ThoughtFlow's stable message schema and
|
|
5
|
-
provider-specific APIs (OpenAI, Anthropic, local models, etc.).
|
|
6
|
-
|
|
7
|
-
Example:
|
|
8
|
-
>>> from thoughtflow.adapters import OpenAIAdapter
|
|
9
|
-
>>> adapter = OpenAIAdapter(api_key="...")
|
|
10
|
-
>>> response = adapter.complete(messages, params)
|
|
11
|
-
"""
|
|
12
|
-
|
|
13
|
-
from __future__ import annotations
|
|
14
|
-
|
|
15
|
-
from thoughtflow.adapters.base import Adapter, AdapterConfig
|
|
16
|
-
|
|
17
|
-
# Lazy imports to avoid requiring all provider dependencies
|
|
18
|
-
# Users only need to install the providers they use
|
|
19
|
-
|
|
20
|
-
__all__ = [
|
|
21
|
-
"Adapter",
|
|
22
|
-
"AdapterConfig",
|
|
23
|
-
"OpenAIAdapter",
|
|
24
|
-
"AnthropicAdapter",
|
|
25
|
-
"LocalAdapter",
|
|
26
|
-
]
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
def __getattr__(name: str):
|
|
30
|
-
"""Lazy load adapters to avoid import errors for missing dependencies."""
|
|
31
|
-
if name == "OpenAIAdapter":
|
|
32
|
-
from thoughtflow.adapters.openai import OpenAIAdapter
|
|
33
|
-
|
|
34
|
-
return OpenAIAdapter
|
|
35
|
-
elif name == "AnthropicAdapter":
|
|
36
|
-
from thoughtflow.adapters.anthropic import AnthropicAdapter
|
|
37
|
-
|
|
38
|
-
return AnthropicAdapter
|
|
39
|
-
elif name == "LocalAdapter":
|
|
40
|
-
from thoughtflow.adapters.local import LocalAdapter
|
|
41
|
-
|
|
42
|
-
return LocalAdapter
|
|
43
|
-
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
|
@@ -1,119 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Anthropic adapter for ThoughtFlow.
|
|
3
|
-
|
|
4
|
-
Provides integration with Anthropic's API (Claude models).
|
|
5
|
-
|
|
6
|
-
Requires: pip install thoughtflow[anthropic]
|
|
7
|
-
"""
|
|
8
|
-
|
|
9
|
-
from __future__ import annotations
|
|
10
|
-
|
|
11
|
-
from typing import TYPE_CHECKING, Any
|
|
12
|
-
|
|
13
|
-
from thoughtflow.adapters.base import Adapter, AdapterConfig, AdapterResponse
|
|
14
|
-
|
|
15
|
-
if TYPE_CHECKING:
|
|
16
|
-
from thoughtflow.message import MessageList
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
class AnthropicAdapter(Adapter):
|
|
20
|
-
"""Adapter for Anthropic's API.
|
|
21
|
-
|
|
22
|
-
Supports Claude 3, Claude 2, and other Anthropic models.
|
|
23
|
-
|
|
24
|
-
Example:
|
|
25
|
-
>>> adapter = AnthropicAdapter(api_key="sk-ant-...")
|
|
26
|
-
>>> response = adapter.complete([
|
|
27
|
-
... {"role": "user", "content": "Hello!"}
|
|
28
|
-
... ])
|
|
29
|
-
>>> print(response.content)
|
|
30
|
-
|
|
31
|
-
Attributes:
|
|
32
|
-
config: Adapter configuration.
|
|
33
|
-
client: Anthropic client instance (created lazily).
|
|
34
|
-
"""
|
|
35
|
-
|
|
36
|
-
DEFAULT_MODEL = "claude-sonnet-4-20250514"
|
|
37
|
-
|
|
38
|
-
def __init__(
|
|
39
|
-
self,
|
|
40
|
-
api_key: str | None = None,
|
|
41
|
-
config: AdapterConfig | None = None,
|
|
42
|
-
**kwargs: Any,
|
|
43
|
-
) -> None:
|
|
44
|
-
"""Initialize the Anthropic adapter.
|
|
45
|
-
|
|
46
|
-
Args:
|
|
47
|
-
api_key: Anthropic API key. Can also be set via ANTHROPIC_API_KEY env var.
|
|
48
|
-
config: Full adapter configuration.
|
|
49
|
-
**kwargs: Additional config options.
|
|
50
|
-
"""
|
|
51
|
-
if config is None:
|
|
52
|
-
config = AdapterConfig(api_key=api_key, **kwargs)
|
|
53
|
-
super().__init__(config)
|
|
54
|
-
self._client = None
|
|
55
|
-
|
|
56
|
-
@property
|
|
57
|
-
def client(self) -> Any:
|
|
58
|
-
"""Lazy-load the Anthropic client.
|
|
59
|
-
|
|
60
|
-
Returns:
|
|
61
|
-
Anthropic client instance.
|
|
62
|
-
|
|
63
|
-
Raises:
|
|
64
|
-
ImportError: If anthropic package is not installed.
|
|
65
|
-
"""
|
|
66
|
-
if self._client is None:
|
|
67
|
-
try:
|
|
68
|
-
from anthropic import Anthropic
|
|
69
|
-
except ImportError as e:
|
|
70
|
-
raise ImportError(
|
|
71
|
-
"Anthropic package not installed. "
|
|
72
|
-
"Install with: pip install thoughtflow[anthropic]"
|
|
73
|
-
) from e
|
|
74
|
-
|
|
75
|
-
self._client = Anthropic(
|
|
76
|
-
api_key=self.config.api_key,
|
|
77
|
-
base_url=self.config.base_url,
|
|
78
|
-
timeout=self.config.timeout,
|
|
79
|
-
max_retries=self.config.max_retries,
|
|
80
|
-
)
|
|
81
|
-
return self._client
|
|
82
|
-
|
|
83
|
-
def complete(
|
|
84
|
-
self,
|
|
85
|
-
messages: MessageList,
|
|
86
|
-
params: dict[str, Any] | None = None,
|
|
87
|
-
) -> AdapterResponse:
|
|
88
|
-
"""Generate a completion using Anthropic's API.
|
|
89
|
-
|
|
90
|
-
Args:
|
|
91
|
-
messages: List of message dicts.
|
|
92
|
-
params: Optional parameters (model, temperature, max_tokens, etc.)
|
|
93
|
-
|
|
94
|
-
Returns:
|
|
95
|
-
AdapterResponse with the generated content.
|
|
96
|
-
|
|
97
|
-
Raises:
|
|
98
|
-
NotImplementedError: This is a placeholder implementation.
|
|
99
|
-
"""
|
|
100
|
-
# TODO: Implement actual Anthropic API call
|
|
101
|
-
# Note: Anthropic uses a different message format (system as separate param)
|
|
102
|
-
raise NotImplementedError(
|
|
103
|
-
"AnthropicAdapter.complete() is not yet implemented. "
|
|
104
|
-
"This is a placeholder for the ThoughtFlow alpha release."
|
|
105
|
-
)
|
|
106
|
-
|
|
107
|
-
def get_capabilities(self) -> dict[str, Any]:
|
|
108
|
-
"""Get Anthropic adapter capabilities.
|
|
109
|
-
|
|
110
|
-
Returns:
|
|
111
|
-
Dict of supported features.
|
|
112
|
-
"""
|
|
113
|
-
return {
|
|
114
|
-
"streaming": True,
|
|
115
|
-
"tool_calling": True,
|
|
116
|
-
"vision": True,
|
|
117
|
-
"json_mode": False, # Anthropic doesn't have native JSON mode
|
|
118
|
-
"seed": False,
|
|
119
|
-
}
|
thoughtflow/adapters/base.py
DELETED
|
@@ -1,140 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Base adapter interface for ThoughtFlow.
|
|
3
|
-
|
|
4
|
-
All provider adapters implement this interface, ensuring a stable
|
|
5
|
-
contract across different LLM providers.
|
|
6
|
-
"""
|
|
7
|
-
|
|
8
|
-
from __future__ import annotations
|
|
9
|
-
|
|
10
|
-
from abc import ABC, abstractmethod
|
|
11
|
-
from dataclasses import dataclass, field
|
|
12
|
-
from typing import TYPE_CHECKING, Any
|
|
13
|
-
|
|
14
|
-
if TYPE_CHECKING:
|
|
15
|
-
from thoughtflow.message import MessageList
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
@dataclass
|
|
19
|
-
class AdapterConfig:
|
|
20
|
-
"""Configuration for an adapter.
|
|
21
|
-
|
|
22
|
-
Attributes:
|
|
23
|
-
api_key: API key for the provider (if required).
|
|
24
|
-
base_url: Optional custom base URL for the API.
|
|
25
|
-
timeout: Request timeout in seconds.
|
|
26
|
-
max_retries: Maximum number of retries for failed requests.
|
|
27
|
-
default_model: Default model to use if not specified in params.
|
|
28
|
-
extra: Additional provider-specific configuration.
|
|
29
|
-
"""
|
|
30
|
-
|
|
31
|
-
api_key: str | None = None
|
|
32
|
-
base_url: str | None = None
|
|
33
|
-
timeout: float = 60.0
|
|
34
|
-
max_retries: int = 3
|
|
35
|
-
default_model: str | None = None
|
|
36
|
-
extra: dict[str, Any] = field(default_factory=dict)
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
@dataclass
|
|
40
|
-
class AdapterResponse:
|
|
41
|
-
"""Response from an adapter completion call.
|
|
42
|
-
|
|
43
|
-
Attributes:
|
|
44
|
-
content: The generated text content.
|
|
45
|
-
model: The model that generated the response.
|
|
46
|
-
usage: Token usage information (prompt, completion, total).
|
|
47
|
-
finish_reason: Why the model stopped (stop, length, tool_calls, etc.).
|
|
48
|
-
raw: The raw response from the provider (for debugging).
|
|
49
|
-
"""
|
|
50
|
-
|
|
51
|
-
content: str
|
|
52
|
-
model: str | None = None
|
|
53
|
-
usage: dict[str, int] | None = None
|
|
54
|
-
finish_reason: str | None = None
|
|
55
|
-
raw: Any = None
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
class Adapter(ABC):
|
|
59
|
-
"""Abstract base class for provider adapters.
|
|
60
|
-
|
|
61
|
-
Adapters are responsible for:
|
|
62
|
-
- Translating ThoughtFlow's message format to provider-specific format
|
|
63
|
-
- Making API calls to the provider
|
|
64
|
-
- Translating responses back to ThoughtFlow's format
|
|
65
|
-
- Handling provider-specific errors and retries
|
|
66
|
-
|
|
67
|
-
Subclasses must implement:
|
|
68
|
-
- `complete()`: Synchronous completion
|
|
69
|
-
- `complete_async()`: Asynchronous completion (optional)
|
|
70
|
-
- `get_capabilities()`: Report adapter capabilities
|
|
71
|
-
"""
|
|
72
|
-
|
|
73
|
-
def __init__(self, config: AdapterConfig | None = None, **kwargs: Any) -> None:
|
|
74
|
-
"""Initialize the adapter.
|
|
75
|
-
|
|
76
|
-
Args:
|
|
77
|
-
config: Adapter configuration object.
|
|
78
|
-
**kwargs: Shorthand for config fields (api_key, base_url, etc.)
|
|
79
|
-
"""
|
|
80
|
-
if config is None:
|
|
81
|
-
config = AdapterConfig(**kwargs)
|
|
82
|
-
self.config = config
|
|
83
|
-
|
|
84
|
-
@abstractmethod
|
|
85
|
-
def complete(
|
|
86
|
-
self,
|
|
87
|
-
messages: MessageList,
|
|
88
|
-
params: dict[str, Any] | None = None,
|
|
89
|
-
) -> AdapterResponse:
|
|
90
|
-
"""Generate a completion for the given messages.
|
|
91
|
-
|
|
92
|
-
Args:
|
|
93
|
-
messages: List of message dicts.
|
|
94
|
-
params: Optional parameters (model, temperature, max_tokens, etc.)
|
|
95
|
-
|
|
96
|
-
Returns:
|
|
97
|
-
AdapterResponse with the generated content.
|
|
98
|
-
|
|
99
|
-
Raises:
|
|
100
|
-
NotImplementedError: Subclasses must implement this method.
|
|
101
|
-
"""
|
|
102
|
-
raise NotImplementedError
|
|
103
|
-
|
|
104
|
-
async def complete_async(
|
|
105
|
-
self,
|
|
106
|
-
messages: MessageList,
|
|
107
|
-
params: dict[str, Any] | None = None,
|
|
108
|
-
) -> AdapterResponse:
|
|
109
|
-
"""Async version of complete().
|
|
110
|
-
|
|
111
|
-
Default implementation calls the sync version.
|
|
112
|
-
Override for true async support.
|
|
113
|
-
|
|
114
|
-
Args:
|
|
115
|
-
messages: List of message dicts.
|
|
116
|
-
params: Optional parameters.
|
|
117
|
-
|
|
118
|
-
Returns:
|
|
119
|
-
AdapterResponse with the generated content.
|
|
120
|
-
"""
|
|
121
|
-
# Default: fall back to sync
|
|
122
|
-
return self.complete(messages, params)
|
|
123
|
-
|
|
124
|
-
def get_capabilities(self) -> dict[str, Any]:
|
|
125
|
-
"""Get the capabilities of this adapter.
|
|
126
|
-
|
|
127
|
-
Returns:
|
|
128
|
-
Dict describing what this adapter supports:
|
|
129
|
-
- streaming: bool
|
|
130
|
-
- tool_calling: bool
|
|
131
|
-
- vision: bool
|
|
132
|
-
- json_mode: bool
|
|
133
|
-
- etc.
|
|
134
|
-
"""
|
|
135
|
-
return {
|
|
136
|
-
"streaming": False,
|
|
137
|
-
"tool_calling": False,
|
|
138
|
-
"vision": False,
|
|
139
|
-
"json_mode": False,
|
|
140
|
-
}
|
thoughtflow/adapters/local.py
DELETED
|
@@ -1,133 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Local model adapter for ThoughtFlow.
|
|
3
|
-
|
|
4
|
-
Provides integration with locally-running models via Ollama, LM Studio,
|
|
5
|
-
or other local inference servers.
|
|
6
|
-
|
|
7
|
-
Requires: pip install thoughtflow[local]
|
|
8
|
-
"""
|
|
9
|
-
|
|
10
|
-
from __future__ import annotations
|
|
11
|
-
|
|
12
|
-
from typing import TYPE_CHECKING, Any
|
|
13
|
-
|
|
14
|
-
from thoughtflow.adapters.base import Adapter, AdapterConfig, AdapterResponse
|
|
15
|
-
|
|
16
|
-
if TYPE_CHECKING:
|
|
17
|
-
from thoughtflow.message import MessageList
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
class LocalAdapter(Adapter):
|
|
21
|
-
"""Adapter for locally-running models.
|
|
22
|
-
|
|
23
|
-
Supports Ollama, LM Studio, and other OpenAI-compatible local servers.
|
|
24
|
-
|
|
25
|
-
Example:
|
|
26
|
-
>>> # Using Ollama
|
|
27
|
-
>>> adapter = LocalAdapter(base_url="http://localhost:11434/v1")
|
|
28
|
-
>>> response = adapter.complete([
|
|
29
|
-
... {"role": "user", "content": "Hello!"}
|
|
30
|
-
... ], params={"model": "llama3"})
|
|
31
|
-
|
|
32
|
-
>>> # Using LM Studio
|
|
33
|
-
>>> adapter = LocalAdapter(base_url="http://localhost:1234/v1")
|
|
34
|
-
|
|
35
|
-
Attributes:
|
|
36
|
-
config: Adapter configuration.
|
|
37
|
-
client: HTTP client for making requests.
|
|
38
|
-
"""
|
|
39
|
-
|
|
40
|
-
DEFAULT_BASE_URL = "http://localhost:11434/v1"
|
|
41
|
-
DEFAULT_MODEL = "llama3"
|
|
42
|
-
|
|
43
|
-
def __init__(
|
|
44
|
-
self,
|
|
45
|
-
base_url: str | None = None,
|
|
46
|
-
config: AdapterConfig | None = None,
|
|
47
|
-
**kwargs: Any,
|
|
48
|
-
) -> None:
|
|
49
|
-
"""Initialize the local adapter.
|
|
50
|
-
|
|
51
|
-
Args:
|
|
52
|
-
base_url: URL of the local inference server.
|
|
53
|
-
config: Full adapter configuration.
|
|
54
|
-
**kwargs: Additional config options.
|
|
55
|
-
"""
|
|
56
|
-
if config is None:
|
|
57
|
-
config = AdapterConfig(
|
|
58
|
-
base_url=base_url or self.DEFAULT_BASE_URL,
|
|
59
|
-
**kwargs,
|
|
60
|
-
)
|
|
61
|
-
super().__init__(config)
|
|
62
|
-
self._client = None
|
|
63
|
-
|
|
64
|
-
@property
|
|
65
|
-
def client(self) -> Any:
|
|
66
|
-
"""Lazy-load the HTTP client.
|
|
67
|
-
|
|
68
|
-
Returns:
|
|
69
|
-
Ollama client or httpx client instance.
|
|
70
|
-
|
|
71
|
-
Raises:
|
|
72
|
-
ImportError: If required packages are not installed.
|
|
73
|
-
"""
|
|
74
|
-
if self._client is None:
|
|
75
|
-
# Try Ollama first, fall back to generic OpenAI-compatible client
|
|
76
|
-
try:
|
|
77
|
-
from ollama import Client
|
|
78
|
-
|
|
79
|
-
self._client = Client(host=self.config.base_url)
|
|
80
|
-
except ImportError:
|
|
81
|
-
# Fall back to using OpenAI client with custom base_url
|
|
82
|
-
try:
|
|
83
|
-
from openai import OpenAI
|
|
84
|
-
|
|
85
|
-
self._client = OpenAI(
|
|
86
|
-
base_url=self.config.base_url,
|
|
87
|
-
api_key="not-needed", # Local servers often don't need keys
|
|
88
|
-
)
|
|
89
|
-
except ImportError as e:
|
|
90
|
-
raise ImportError(
|
|
91
|
-
"No local model client available. "
|
|
92
|
-
"Install with: pip install thoughtflow[local] or thoughtflow[openai]"
|
|
93
|
-
) from e
|
|
94
|
-
return self._client
|
|
95
|
-
|
|
96
|
-
def complete(
|
|
97
|
-
self,
|
|
98
|
-
messages: MessageList,
|
|
99
|
-
params: dict[str, Any] | None = None,
|
|
100
|
-
) -> AdapterResponse:
|
|
101
|
-
"""Generate a completion using a local model.
|
|
102
|
-
|
|
103
|
-
Args:
|
|
104
|
-
messages: List of message dicts.
|
|
105
|
-
params: Optional parameters (model, temperature, etc.)
|
|
106
|
-
|
|
107
|
-
Returns:
|
|
108
|
-
AdapterResponse with the generated content.
|
|
109
|
-
|
|
110
|
-
Raises:
|
|
111
|
-
NotImplementedError: This is a placeholder implementation.
|
|
112
|
-
"""
|
|
113
|
-
# TODO: Implement actual local model call
|
|
114
|
-
raise NotImplementedError(
|
|
115
|
-
"LocalAdapter.complete() is not yet implemented. "
|
|
116
|
-
"This is a placeholder for the ThoughtFlow alpha release."
|
|
117
|
-
)
|
|
118
|
-
|
|
119
|
-
def get_capabilities(self) -> dict[str, Any]:
|
|
120
|
-
"""Get local adapter capabilities.
|
|
121
|
-
|
|
122
|
-
Note: Capabilities depend on the specific model being used.
|
|
123
|
-
|
|
124
|
-
Returns:
|
|
125
|
-
Dict of supported features.
|
|
126
|
-
"""
|
|
127
|
-
return {
|
|
128
|
-
"streaming": True,
|
|
129
|
-
"tool_calling": False, # Depends on model
|
|
130
|
-
"vision": False, # Depends on model
|
|
131
|
-
"json_mode": False,
|
|
132
|
-
"seed": True,
|
|
133
|
-
}
|
thoughtflow/adapters/openai.py
DELETED
|
@@ -1,118 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
OpenAI adapter for ThoughtFlow.
|
|
3
|
-
|
|
4
|
-
Provides integration with OpenAI's API (GPT-4, GPT-3.5, etc.)
|
|
5
|
-
|
|
6
|
-
Requires: pip install thoughtflow[openai]
|
|
7
|
-
"""
|
|
8
|
-
|
|
9
|
-
from __future__ import annotations
|
|
10
|
-
|
|
11
|
-
from typing import TYPE_CHECKING, Any
|
|
12
|
-
|
|
13
|
-
from thoughtflow.adapters.base import Adapter, AdapterConfig, AdapterResponse
|
|
14
|
-
|
|
15
|
-
if TYPE_CHECKING:
|
|
16
|
-
from thoughtflow.message import MessageList
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
class OpenAIAdapter(Adapter):
|
|
20
|
-
"""Adapter for OpenAI's API.
|
|
21
|
-
|
|
22
|
-
Supports GPT-4, GPT-3.5-turbo, and other OpenAI models.
|
|
23
|
-
|
|
24
|
-
Example:
|
|
25
|
-
>>> adapter = OpenAIAdapter(api_key="sk-...")
|
|
26
|
-
>>> response = adapter.complete([
|
|
27
|
-
... {"role": "user", "content": "Hello!"}
|
|
28
|
-
... ])
|
|
29
|
-
>>> print(response.content)
|
|
30
|
-
|
|
31
|
-
Attributes:
|
|
32
|
-
config: Adapter configuration.
|
|
33
|
-
client: OpenAI client instance (created lazily).
|
|
34
|
-
"""
|
|
35
|
-
|
|
36
|
-
DEFAULT_MODEL = "gpt-4o"
|
|
37
|
-
|
|
38
|
-
def __init__(
|
|
39
|
-
self,
|
|
40
|
-
api_key: str | None = None,
|
|
41
|
-
config: AdapterConfig | None = None,
|
|
42
|
-
**kwargs: Any,
|
|
43
|
-
) -> None:
|
|
44
|
-
"""Initialize the OpenAI adapter.
|
|
45
|
-
|
|
46
|
-
Args:
|
|
47
|
-
api_key: OpenAI API key. Can also be set via OPENAI_API_KEY env var.
|
|
48
|
-
config: Full adapter configuration.
|
|
49
|
-
**kwargs: Additional config options.
|
|
50
|
-
"""
|
|
51
|
-
if config is None:
|
|
52
|
-
config = AdapterConfig(api_key=api_key, **kwargs)
|
|
53
|
-
super().__init__(config)
|
|
54
|
-
self._client = None
|
|
55
|
-
|
|
56
|
-
@property
|
|
57
|
-
def client(self) -> Any:
|
|
58
|
-
"""Lazy-load the OpenAI client.
|
|
59
|
-
|
|
60
|
-
Returns:
|
|
61
|
-
OpenAI client instance.
|
|
62
|
-
|
|
63
|
-
Raises:
|
|
64
|
-
ImportError: If openai package is not installed.
|
|
65
|
-
"""
|
|
66
|
-
if self._client is None:
|
|
67
|
-
try:
|
|
68
|
-
from openai import OpenAI
|
|
69
|
-
except ImportError as e:
|
|
70
|
-
raise ImportError(
|
|
71
|
-
"OpenAI package not installed. "
|
|
72
|
-
"Install with: pip install thoughtflow[openai]"
|
|
73
|
-
) from e
|
|
74
|
-
|
|
75
|
-
self._client = OpenAI(
|
|
76
|
-
api_key=self.config.api_key,
|
|
77
|
-
base_url=self.config.base_url,
|
|
78
|
-
timeout=self.config.timeout,
|
|
79
|
-
max_retries=self.config.max_retries,
|
|
80
|
-
)
|
|
81
|
-
return self._client
|
|
82
|
-
|
|
83
|
-
def complete(
|
|
84
|
-
self,
|
|
85
|
-
messages: MessageList,
|
|
86
|
-
params: dict[str, Any] | None = None,
|
|
87
|
-
) -> AdapterResponse:
|
|
88
|
-
"""Generate a completion using OpenAI's API.
|
|
89
|
-
|
|
90
|
-
Args:
|
|
91
|
-
messages: List of message dicts.
|
|
92
|
-
params: Optional parameters (model, temperature, max_tokens, etc.)
|
|
93
|
-
|
|
94
|
-
Returns:
|
|
95
|
-
AdapterResponse with the generated content.
|
|
96
|
-
|
|
97
|
-
Raises:
|
|
98
|
-
NotImplementedError: This is a placeholder implementation.
|
|
99
|
-
"""
|
|
100
|
-
# TODO: Implement actual OpenAI API call
|
|
101
|
-
raise NotImplementedError(
|
|
102
|
-
"OpenAIAdapter.complete() is not yet implemented. "
|
|
103
|
-
"This is a placeholder for the ThoughtFlow alpha release."
|
|
104
|
-
)
|
|
105
|
-
|
|
106
|
-
def get_capabilities(self) -> dict[str, Any]:
|
|
107
|
-
"""Get OpenAI adapter capabilities.
|
|
108
|
-
|
|
109
|
-
Returns:
|
|
110
|
-
Dict of supported features.
|
|
111
|
-
"""
|
|
112
|
-
return {
|
|
113
|
-
"streaming": True,
|
|
114
|
-
"tool_calling": True,
|
|
115
|
-
"vision": True,
|
|
116
|
-
"json_mode": True,
|
|
117
|
-
"seed": True,
|
|
118
|
-
}
|
|
@@ -1,26 +0,0 @@
|
|
|
1
|
-
thoughtflow/__init__.py,sha256=Jc_g5TDgnMFq3vNI5IBO8Xaja-f-KtjmW9NatIkUtYI,1375
|
|
2
|
-
thoughtflow/_util.py,sha256=64QWe9TV3reyhmnlZDO_c3AtntHY6AFXe4MyqpJ20hs,2804
|
|
3
|
-
thoughtflow/agent.py,sha256=yRL3OA1anQ6r_TUj8ncy0ZT5GF0G0rYaZxdV_YdpMig,4290
|
|
4
|
-
thoughtflow/message.py,sha256=5hR2Zs3tDRYLtYuuArqmJ4CgBDTUhjHj_RQhCN7tRrg,3800
|
|
5
|
-
thoughtflow/py.typed,sha256=Y3QpSe9qtWcL1vKplcWdRY0cn5071TLJQWBO0QbxTm8,84
|
|
6
|
-
thoughtflow/adapters/__init__.py,sha256=OmGEgr08B7kwsSydINBQzsW5S_YDrzhLII05F1aqdak,1235
|
|
7
|
-
thoughtflow/adapters/anthropic.py,sha256=IS-viYzbgde0em5cPiKkn8yQ6oHvkvBKL5_4JIBywG8,3500
|
|
8
|
-
thoughtflow/adapters/base.py,sha256=vZE3M84AiPWSaGJIEKjkfB-FEgEswtBahNuHC_yBuhs,4102
|
|
9
|
-
thoughtflow/adapters/local.py,sha256=XsCRtZ2HhLjXIOj-Du_RwLLgEOwGDVjvnQpze97wM1w,4037
|
|
10
|
-
thoughtflow/adapters/openai.py,sha256=RcE5Le__j8P_UQf3dF8WMTW1Ie7zTNYgBUOWK7r1Tcc,3287
|
|
11
|
-
thoughtflow/eval/__init__.py,sha256=WiXk5IarMdgQV-mCpWVq_ZwCq6iC6pAKwl9wbZsmmNA,866
|
|
12
|
-
thoughtflow/eval/harness.py,sha256=DH8EGajfxIGsb9HLk7g-hnQM0t1C90VAk7bOPLK9OBQ,5505
|
|
13
|
-
thoughtflow/eval/replay.py,sha256=-osU4BbjVdThLeM1bygCss7sqqHHvg9I43XEvVZkNd8,4011
|
|
14
|
-
thoughtflow/memory/__init__.py,sha256=jwakjf3Rfi4Rk9HfadNHy6WGJ3VNG9GI3gSZO5Ebe5I,691
|
|
15
|
-
thoughtflow/memory/base.py,sha256=WjnrJYT_b72BX3M6DontoVNhNW-SGVDbR2bAtyxy1-Q,4224
|
|
16
|
-
thoughtflow/tools/__init__.py,sha256=1bDivRtNS2rGDForiLMOGKjStBeZAWFH8wylAN3Ihjk,648
|
|
17
|
-
thoughtflow/tools/base.py,sha256=VYRFDhzG912HFUR6W82kykm_FU-r4xg4aEzHBDN_b8M,4069
|
|
18
|
-
thoughtflow/tools/registry.py,sha256=ERKStxvh_UkfLg7GebVFaifSuudFYAfYXbnxr7lC5o0,3335
|
|
19
|
-
thoughtflow/trace/__init__.py,sha256=fQSpJJyYdtaI_L_QAD4NqvYA6yXS5xCJXxS5-rVUpoM,891
|
|
20
|
-
thoughtflow/trace/events.py,sha256=jJtVzs2xSLb43CAW6_CqgBVGgM0gBJ1x-_xmQjwUGkw,4647
|
|
21
|
-
thoughtflow/trace/schema.py,sha256=teyUJ3gV80ZgsU8oCYJmku_f-SDULKtvqqLjvp1eQ_E,3180
|
|
22
|
-
thoughtflow/trace/session.py,sha256=ZYKMGe3t98A7LujG-nSuRQo83BvzqhZGKFXHZZU0amw,4225
|
|
23
|
-
thoughtflow-0.0.2.dist-info/METADATA,sha256=50ieenI8zDoK1IuHmNU1Tpa4ginxmUUuY7DQ5hw_Hck,7548
|
|
24
|
-
thoughtflow-0.0.2.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
25
|
-
thoughtflow-0.0.2.dist-info/licenses/LICENSE,sha256=Z__Z0xyty_n2lxI7UNvfqkgemXIP0_UliF5sZN8GsPw,1073
|
|
26
|
-
thoughtflow-0.0.2.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|