casual-llm 0.4.2__tar.gz → 0.5.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. casual_llm-0.5.0/PKG-INFO +192 -0
  2. casual_llm-0.5.0/README.md +160 -0
  3. {casual_llm-0.4.2 → casual_llm-0.5.0}/pyproject.toml +1 -1
  4. {casual_llm-0.4.2 → casual_llm-0.5.0}/src/casual_llm/__init__.py +42 -16
  5. casual_llm-0.5.0/src/casual_llm/config.py +95 -0
  6. {casual_llm-0.4.2 → casual_llm-0.5.0}/src/casual_llm/message_converters/anthropic.py +6 -7
  7. {casual_llm-0.4.2 → casual_llm-0.5.0}/src/casual_llm/message_converters/ollama.py +6 -6
  8. {casual_llm-0.4.2 → casual_llm-0.5.0}/src/casual_llm/message_converters/openai.py +4 -4
  9. casual_llm-0.4.2/src/casual_llm/providers/base.py → casual_llm-0.5.0/src/casual_llm/model.py +85 -33
  10. casual_llm-0.5.0/src/casual_llm/providers/__init__.py +140 -0
  11. {casual_llm-0.4.2 → casual_llm-0.5.0}/src/casual_llm/providers/anthropic.py +57 -85
  12. casual_llm-0.5.0/src/casual_llm/providers/base.py +109 -0
  13. {casual_llm-0.4.2 → casual_llm-0.5.0}/src/casual_llm/providers/ollama.py +57 -84
  14. {casual_llm-0.4.2 → casual_llm-0.5.0}/src/casual_llm/providers/openai.py +58 -86
  15. {casual_llm-0.4.2 → casual_llm-0.5.0}/src/casual_llm/tool_converters/anthropic.py +1 -1
  16. {casual_llm-0.4.2 → casual_llm-0.5.0}/src/casual_llm/tool_converters/ollama.py +1 -1
  17. {casual_llm-0.4.2 → casual_llm-0.5.0}/src/casual_llm/tool_converters/openai.py +1 -1
  18. {casual_llm-0.4.2 → casual_llm-0.5.0}/src/casual_llm/tools.py +8 -2
  19. casual_llm-0.5.0/src/casual_llm.egg-info/PKG-INFO +192 -0
  20. {casual_llm-0.4.2 → casual_llm-0.5.0}/src/casual_llm.egg-info/SOURCES.txt +1 -0
  21. {casual_llm-0.4.2 → casual_llm-0.5.0}/tests/test_anthropic_provider.py +126 -118
  22. {casual_llm-0.4.2 → casual_llm-0.5.0}/tests/test_providers.py +253 -315
  23. {casual_llm-0.4.2 → casual_llm-0.5.0}/tests/test_tools.py +64 -0
  24. {casual_llm-0.4.2 → casual_llm-0.5.0}/tests/test_vision_ollama.py +33 -31
  25. {casual_llm-0.4.2 → casual_llm-0.5.0}/tests/test_vision_openai.py +30 -28
  26. casual_llm-0.4.2/PKG-INFO +0 -525
  27. casual_llm-0.4.2/README.md +0 -493
  28. casual_llm-0.4.2/src/casual_llm/config.py +0 -64
  29. casual_llm-0.4.2/src/casual_llm/providers/__init__.py +0 -107
  30. casual_llm-0.4.2/src/casual_llm.egg-info/PKG-INFO +0 -525
  31. {casual_llm-0.4.2 → casual_llm-0.5.0}/LICENSE +0 -0
  32. {casual_llm-0.4.2 → casual_llm-0.5.0}/setup.cfg +0 -0
  33. {casual_llm-0.4.2 → casual_llm-0.5.0}/src/casual_llm/message_converters/__init__.py +0 -0
  34. {casual_llm-0.4.2 → casual_llm-0.5.0}/src/casual_llm/messages.py +0 -0
  35. {casual_llm-0.4.2 → casual_llm-0.5.0}/src/casual_llm/py.typed +0 -0
  36. {casual_llm-0.4.2 → casual_llm-0.5.0}/src/casual_llm/tool_converters/__init__.py +0 -0
  37. {casual_llm-0.4.2 → casual_llm-0.5.0}/src/casual_llm/usage.py +0 -0
  38. {casual_llm-0.4.2 → casual_llm-0.5.0}/src/casual_llm/utils/__init__.py +0 -0
  39. {casual_llm-0.4.2 → casual_llm-0.5.0}/src/casual_llm/utils/image.py +0 -0
  40. {casual_llm-0.4.2 → casual_llm-0.5.0}/src/casual_llm.egg-info/dependency_links.txt +0 -0
  41. {casual_llm-0.4.2 → casual_llm-0.5.0}/src/casual_llm.egg-info/requires.txt +0 -0
  42. {casual_llm-0.4.2 → casual_llm-0.5.0}/src/casual_llm.egg-info/top_level.txt +0 -0
  43. {casual_llm-0.4.2 → casual_llm-0.5.0}/tests/test_backward_compatibility.py +0 -0
  44. {casual_llm-0.4.2 → casual_llm-0.5.0}/tests/test_image_utils.py +0 -0
  45. {casual_llm-0.4.2 → casual_llm-0.5.0}/tests/test_messages.py +0 -0
  46. {casual_llm-0.4.2 → casual_llm-0.5.0}/tests/test_vision_integration.py +0 -0
@@ -0,0 +1,192 @@
1
+ Metadata-Version: 2.4
2
+ Name: casual-llm
3
+ Version: 0.5.0
4
+ Summary: Lightweight LLM provider abstraction with standardized message models
5
+ Author-email: Alex Stansfield <alex@casualgenius.com>
6
+ License: MIT
7
+ Project-URL: Homepage, https://github.com/casualgenius/casual-llm
8
+ Project-URL: Documentation, https://github.com/casualgenius/casual-llm#readme
9
+ Project-URL: Repository, https://github.com/casualgenius/casual-llm
10
+ Project-URL: Issues, https://github.com/casualgenius/casual-llm/issues
11
+ Keywords: llm,openai,ollama,ai,chatbot,casual
12
+ Classifier: Development Status :: 4 - Beta
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
19
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
20
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
21
+ Requires-Python: >=3.10
22
+ Description-Content-Type: text/markdown
23
+ License-File: LICENSE
24
+ Requires-Dist: pydantic>=2.0.0
25
+ Requires-Dist: ollama>=0.6.1
26
+ Requires-Dist: httpx[http2]>=0.28.1
27
+ Provides-Extra: openai
28
+ Requires-Dist: openai>=1.0.0; extra == "openai"
29
+ Provides-Extra: anthropic
30
+ Requires-Dist: anthropic>=0.20.0; extra == "anthropic"
31
+ Dynamic: license-file
32
+
33
+ # casual-llm
34
+
35
+ ![PyPI](https://img.shields.io/pypi/v/casual-llm)
36
+ ![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)
37
+ ![Python](https://img.shields.io/badge/python-3.10+-blue.svg)
38
+
39
+ **Lightweight LLM provider abstraction with standardized message models.**
40
+
41
+ Part of the "casual" ecosystem of lightweight AI tools.
42
+
43
+ > **Upgrading from v0.4.x?** See the [Migration Guide](MIGRATION-0.5.0.md) for breaking changes.
44
+
45
+ ## Features
46
+
47
+ - **Client/Model Separation** - Configure API connections once, create multiple models
48
+ - **Protocol-based** - Uses `typing.Protocol`, no inheritance required
49
+ - **Multi-provider** - Works with OpenAI, Anthropic (Claude), Ollama, or your custom provider
50
+ - **Lightweight** - Minimal dependencies (pydantic, ollama, httpx)
51
+ - **Async-first** - Built for modern async Python
52
+ - **Type-safe** - Full type hints with py.typed marker
53
+ - **OpenAI-compatible** - Standard message format used across the industry
54
+ - **Tool calling** - First-class support for function/tool calling
55
+ - **Per-model usage tracking** - Track token usage per model for cost monitoring
56
+ - **Vision support** - Send images to vision-capable models
57
+ - **Streaming** - Stream responses in real-time with `AsyncIterator`
58
+
59
+ ## Installation
60
+
61
+ ```bash
62
+ # Basic installation (includes Ollama support)
63
+ uv add casual-llm
64
+
65
+ # With OpenAI support
66
+ uv add casual-llm[openai]
67
+
68
+ # With Anthropic (Claude) support
69
+ uv add casual-llm[anthropic]
70
+
71
+ # With all providers
72
+ uv add casual-llm[openai,anthropic]
73
+
74
+ # Or using pip
75
+ pip install casual-llm[openai,anthropic]
76
+ ```
77
+
78
+ ## Quick Start
79
+
80
+ ```python
81
+ from casual_llm import OpenAIClient, Model, UserMessage
82
+
83
+ # Create client (works with OpenAI, OpenRouter, LM Studio, etc.)
84
+ client = OpenAIClient(
85
+ api_key="sk-...", # or set OPENAI_API_KEY env var
86
+ base_url="https://openrouter.ai/api/v1", # optional, omit for OpenAI
87
+ )
88
+
89
+ # Create model
90
+ model = Model(client, "gpt-4o-mini")
91
+
92
+ # Generate response
93
+ response = await model.chat([UserMessage(content="Hello!")])
94
+ print(response.content)
95
+ ```
96
+
97
+ **More examples:**
98
+ - [Quick Start Guide](docs/quick-start.md) - Ollama, Anthropic, and more
99
+ - [Vision Guide](docs/vision.md) - Send images to models
100
+ - [Streaming Guide](docs/streaming.md) - Real-time responses
101
+ - [Advanced Usage](docs/advanced.md) - Custom clients, configuration classes
102
+ - [API Reference](docs/api-reference.md) - Full API documentation
103
+ - [Examples Directory](examples/) - Complete working examples
104
+
105
+ ## Message Models
106
+
107
+ casual-llm provides OpenAI-compatible message models that work with any provider:
108
+
109
+ ```python
110
+ from casual_llm import (
111
+ UserMessage,
112
+ AssistantMessage,
113
+ SystemMessage,
114
+ ToolResultMessage,
115
+ TextContent,
116
+ ImageContent,
117
+ )
118
+
119
+ # System message (sets behavior)
120
+ system_msg = SystemMessage(content="You are a helpful assistant.")
121
+
122
+ # User message (simple text)
123
+ user_msg = UserMessage(content="Hello!")
124
+
125
+ # User message (multimodal - text + image)
126
+ vision_msg = UserMessage(
127
+ content=[
128
+ TextContent(text="What's in this image?"),
129
+ ImageContent(source="https://example.com/image.jpg"),
130
+ ]
131
+ )
132
+
133
+ # Assistant message (response from LLM)
134
+ assistant_msg = AssistantMessage(content="I'll help you with that.")
135
+
136
+ # Tool result message (after executing a tool)
137
+ tool_msg = ToolResultMessage(
138
+ name="get_weather",
139
+ tool_call_id="call_123",
140
+ content='{"temp": 20, "condition": "sunny"}'
141
+ )
142
+ ```
143
+
144
+ ## Why casual-llm?
145
+
146
+ | Feature | casual-llm | LangChain | litellm |
147
+ |---------|-----------|-----------|---------|
148
+ | **Dependencies** | 3 (pydantic, ollama, httpx) | 100+ | 50+ |
149
+ | **Protocol-based** | Yes | No | No |
150
+ | **Type-safe** | Full typing | Partial | Partial |
151
+ | **Message models** | Included | Separate | None |
152
+ | **Multi-model sharing** | Yes | No | Yes |
153
+ | **Vision support** | All providers | Yes | Yes |
154
+ | **Streaming** | All providers | Yes | Yes |
155
+ | **Providers** | OpenAI, Anthropic, Ollama | Many | Many |
156
+ | **Learning curve** | Minutes | Hours | Medium |
157
+
158
+ **Use casual-llm when you want:**
159
+ - Lightweight, focused library (not a framework)
160
+ - Protocol-based design (no inheritance)
161
+ - Standard message models shared across your codebase
162
+ - Efficient multi-model usage with shared connections
163
+ - Simple, predictable API
164
+
165
+ **Use LangChain when you need:**
166
+ - Full-featured framework with chains, agents, RAG
167
+ - Massive ecosystem of integrations
168
+ - Higher-level abstractions
169
+
170
+ ## Part of the casual-* Ecosystem
171
+
172
+ - **[casual-mcp](https://github.com/casualgenius/casual-mcp)** - MCP server orchestration and tool calling
173
+ - **casual-llm** (this library) - LLM provider abstraction
174
+ - **[casual-memory](https://github.com/casualgenius/casual-memory)** - Memory intelligence with conflict detection
175
+
176
+ All casual-* libraries share the same philosophy: lightweight, protocol-based, easy to use.
177
+
178
+ ## Contributing
179
+
180
+ Contributions welcome! Please see [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
181
+
182
+ ## License
183
+
184
+ MIT License - see [LICENSE](LICENSE) for details.
185
+
186
+ ## Links
187
+
188
+ - **GitHub**: https://github.com/casualgenius/casual-llm
189
+ - **PyPI**: https://pypi.org/project/casual-llm/
190
+ - **Issues**: https://github.com/casualgenius/casual-llm/issues
191
+ - **Migration Guide**: [MIGRATION-0.5.0.md](MIGRATION-0.5.0.md)
192
+ - **casual-mcp**: https://github.com/casualgenius/casual-mcp
@@ -0,0 +1,160 @@
1
+ # casual-llm
2
+
3
+ ![PyPI](https://img.shields.io/pypi/v/casual-llm)
4
+ ![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)
5
+ ![Python](https://img.shields.io/badge/python-3.10+-blue.svg)
6
+
7
+ **Lightweight LLM provider abstraction with standardized message models.**
8
+
9
+ Part of the "casual" ecosystem of lightweight AI tools.
10
+
11
+ > **Upgrading from v0.4.x?** See the [Migration Guide](MIGRATION-0.5.0.md) for breaking changes.
12
+
13
+ ## Features
14
+
15
+ - **Client/Model Separation** - Configure API connections once, create multiple models
16
+ - **Protocol-based** - Uses `typing.Protocol`, no inheritance required
17
+ - **Multi-provider** - Works with OpenAI, Anthropic (Claude), Ollama, or your custom provider
18
+ - **Lightweight** - Minimal dependencies (pydantic, ollama, httpx)
19
+ - **Async-first** - Built for modern async Python
20
+ - **Type-safe** - Full type hints with py.typed marker
21
+ - **OpenAI-compatible** - Standard message format used across the industry
22
+ - **Tool calling** - First-class support for function/tool calling
23
+ - **Per-model usage tracking** - Track token usage per model for cost monitoring
24
+ - **Vision support** - Send images to vision-capable models
25
+ - **Streaming** - Stream responses in real-time with `AsyncIterator`
26
+
27
+ ## Installation
28
+
29
+ ```bash
30
+ # Basic installation (includes Ollama support)
31
+ uv add casual-llm
32
+
33
+ # With OpenAI support
34
+ uv add casual-llm[openai]
35
+
36
+ # With Anthropic (Claude) support
37
+ uv add casual-llm[anthropic]
38
+
39
+ # With all providers
40
+ uv add casual-llm[openai,anthropic]
41
+
42
+ # Or using pip
43
+ pip install casual-llm[openai,anthropic]
44
+ ```
45
+
46
+ ## Quick Start
47
+
48
+ ```python
49
+ from casual_llm import OpenAIClient, Model, UserMessage
50
+
51
+ # Create client (works with OpenAI, OpenRouter, LM Studio, etc.)
52
+ client = OpenAIClient(
53
+ api_key="sk-...", # or set OPENAI_API_KEY env var
54
+ base_url="https://openrouter.ai/api/v1", # optional, omit for OpenAI
55
+ )
56
+
57
+ # Create model
58
+ model = Model(client, "gpt-4o-mini")
59
+
60
+ # Generate response
61
+ response = await model.chat([UserMessage(content="Hello!")])
62
+ print(response.content)
63
+ ```
64
+
65
+ **More examples:**
66
+ - [Quick Start Guide](docs/quick-start.md) - Ollama, Anthropic, and more
67
+ - [Vision Guide](docs/vision.md) - Send images to models
68
+ - [Streaming Guide](docs/streaming.md) - Real-time responses
69
+ - [Advanced Usage](docs/advanced.md) - Custom clients, configuration classes
70
+ - [API Reference](docs/api-reference.md) - Full API documentation
71
+ - [Examples Directory](examples/) - Complete working examples
72
+
73
+ ## Message Models
74
+
75
+ casual-llm provides OpenAI-compatible message models that work with any provider:
76
+
77
+ ```python
78
+ from casual_llm import (
79
+ UserMessage,
80
+ AssistantMessage,
81
+ SystemMessage,
82
+ ToolResultMessage,
83
+ TextContent,
84
+ ImageContent,
85
+ )
86
+
87
+ # System message (sets behavior)
88
+ system_msg = SystemMessage(content="You are a helpful assistant.")
89
+
90
+ # User message (simple text)
91
+ user_msg = UserMessage(content="Hello!")
92
+
93
+ # User message (multimodal - text + image)
94
+ vision_msg = UserMessage(
95
+ content=[
96
+ TextContent(text="What's in this image?"),
97
+ ImageContent(source="https://example.com/image.jpg"),
98
+ ]
99
+ )
100
+
101
+ # Assistant message (response from LLM)
102
+ assistant_msg = AssistantMessage(content="I'll help you with that.")
103
+
104
+ # Tool result message (after executing a tool)
105
+ tool_msg = ToolResultMessage(
106
+ name="get_weather",
107
+ tool_call_id="call_123",
108
+ content='{"temp": 20, "condition": "sunny"}'
109
+ )
110
+ ```
111
+
112
+ ## Why casual-llm?
113
+
114
+ | Feature | casual-llm | LangChain | litellm |
115
+ |---------|-----------|-----------|---------|
116
+ | **Dependencies** | 3 (pydantic, ollama, httpx) | 100+ | 50+ |
117
+ | **Protocol-based** | Yes | No | No |
118
+ | **Type-safe** | Full typing | Partial | Partial |
119
+ | **Message models** | Included | Separate | None |
120
+ | **Multi-model sharing** | Yes | No | Yes |
121
+ | **Vision support** | All providers | Yes | Yes |
122
+ | **Streaming** | All providers | Yes | Yes |
123
+ | **Providers** | OpenAI, Anthropic, Ollama | Many | Many |
124
+ | **Learning curve** | Minutes | Hours | Medium |
125
+
126
+ **Use casual-llm when you want:**
127
+ - Lightweight, focused library (not a framework)
128
+ - Protocol-based design (no inheritance)
129
+ - Standard message models shared across your codebase
130
+ - Efficient multi-model usage with shared connections
131
+ - Simple, predictable API
132
+
133
+ **Use LangChain when you need:**
134
+ - Full-featured framework with chains, agents, RAG
135
+ - Massive ecosystem of integrations
136
+ - Higher-level abstractions
137
+
138
+ ## Part of the casual-* Ecosystem
139
+
140
+ - **[casual-mcp](https://github.com/casualgenius/casual-mcp)** - MCP server orchestration and tool calling
141
+ - **casual-llm** (this library) - LLM provider abstraction
142
+ - **[casual-memory](https://github.com/casualgenius/casual-memory)** - Memory intelligence with conflict detection
143
+
144
+ All casual-* libraries share the same philosophy: lightweight, protocol-based, easy to use.
145
+
146
+ ## Contributing
147
+
148
+ Contributions welcome! Please see [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
149
+
150
+ ## License
151
+
152
+ MIT License - see [LICENSE](LICENSE) for details.
153
+
154
+ ## Links
155
+
156
+ - **GitHub**: https://github.com/casualgenius/casual-llm
157
+ - **PyPI**: https://pypi.org/project/casual-llm/
158
+ - **Issues**: https://github.com/casualgenius/casual-llm/issues
159
+ - **Migration Guide**: [MIGRATION-0.5.0.md](MIGRATION-0.5.0.md)
160
+ - **casual-mcp**: https://github.com/casualgenius/casual-mcp
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "casual-llm"
3
- version = "0.4.2"
3
+ version = "0.5.0"
4
4
  description = "Lightweight LLM provider abstraction with standardized message models"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.10"
@@ -2,25 +2,46 @@
2
2
  casual-llm - Lightweight LLM provider abstraction with standard message models.
3
3
 
4
4
  A simple, protocol-based library for working with different LLM providers
5
- (OpenAI, Ollama, etc.) using a unified interface and OpenAI-compatible message format.
5
+ (OpenAI, Ollama, Anthropic) using a unified interface and OpenAI-compatible message format.
6
6
 
7
7
  Part of the casual-* ecosystem of lightweight AI tools.
8
+
9
+ Example usage:
10
+ >>> from casual_llm import OpenAIClient, Model, UserMessage
11
+ >>>
12
+ >>> # Create client (configured once)
13
+ >>> client = OpenAIClient(api_key="...")
14
+ >>>
15
+ >>> # Create multiple models using the same client
16
+ >>> gpt4 = Model(client, name="gpt-4", temperature=0.7)
17
+ >>> gpt4o = Model(client, name="gpt-4o")
18
+ >>>
19
+ >>> # Use models
20
+ >>> response = await gpt4.chat([UserMessage(content="Hello")])
21
+ >>> print(response.content)
22
+ >>>
23
+ >>> # Each model tracks its own usage
24
+ >>> print(f"Used {gpt4.get_usage().total_tokens} tokens")
8
25
  """
9
26
 
10
- __version__ = "0.4.2"
27
+ __version__ = "0.5.0"
11
28
 
12
- # Model configuration
13
- from casual_llm.config import ModelConfig, Provider
29
+ # Configuration
30
+ from casual_llm.config import ClientConfig, ModelConfig, Provider
14
31
 
15
- # Provider protocol and implementations
32
+ # Client protocol and implementations
16
33
  from casual_llm.providers import (
17
- LLMProvider,
18
- OllamaProvider,
19
- OpenAIProvider,
20
- AnthropicProvider,
21
- create_provider,
34
+ LLMClient,
35
+ OllamaClient,
36
+ OpenAIClient,
37
+ AnthropicClient,
38
+ create_client,
39
+ create_model,
22
40
  )
23
41
 
42
+ # Model class
43
+ from casual_llm.model import Model
44
+
24
45
  # OpenAI-compatible message models
25
46
  from casual_llm.messages import (
26
47
  ChatMessage,
@@ -66,14 +87,19 @@ from casual_llm.message_converters import (
66
87
  __all__ = [
67
88
  # Version
68
89
  "__version__",
69
- # Providers
70
- "LLMProvider",
90
+ # Configuration
91
+ "ClientConfig",
71
92
  "ModelConfig",
72
93
  "Provider",
73
- "OllamaProvider",
74
- "OpenAIProvider",
75
- "AnthropicProvider",
76
- "create_provider",
94
+ # Clients
95
+ "LLMClient",
96
+ "OllamaClient",
97
+ "OpenAIClient",
98
+ "AnthropicClient",
99
+ "create_client",
100
+ "create_model",
101
+ # Model
102
+ "Model",
77
103
  # Messages
78
104
  "ChatMessage",
79
105
  "UserMessage",
@@ -0,0 +1,95 @@
1
+ """
2
+ Configuration for LLM clients and models.
3
+
4
+ This module defines configuration structures for LLM clients (API connections)
5
+ and models, allowing unified configuration across different provider backends.
6
+ """
7
+
8
+ from dataclasses import dataclass, field
9
+ from enum import Enum
10
+ from typing import Any
11
+
12
+
13
+ class Provider(Enum):
14
+ """Supported LLM providers"""
15
+
16
+ OPENAI = "openai"
17
+ OLLAMA = "ollama"
18
+ ANTHROPIC = "anthropic"
19
+
20
+
21
+ @dataclass
22
+ class ClientConfig:
23
+ """
24
+ Configuration for an LLM client (API connection).
25
+
26
+ Provides a unified way to configure client connections across different providers.
27
+
28
+ Attributes:
29
+ provider: Provider type (OPENAI, OLLAMA, or ANTHROPIC)
30
+ base_url: Optional custom API endpoint
31
+ api_key: Optional API key (for OpenAI/Anthropic providers)
32
+ timeout: HTTP request timeout in seconds (default: 60.0)
33
+ extra_kwargs: Additional kwargs passed to the client
34
+
35
+ Examples:
36
+ >>> from casual_llm import ClientConfig, Provider
37
+ >>>
38
+ >>> # OpenAI configuration
39
+ >>> config = ClientConfig(
40
+ ... provider=Provider.OPENAI,
41
+ ... api_key="sk-..."
42
+ ... )
43
+ >>>
44
+ >>> # Ollama configuration
45
+ >>> config = ClientConfig(
46
+ ... provider=Provider.OLLAMA,
47
+ ... base_url="http://localhost:11434"
48
+ ... )
49
+ >>>
50
+ >>> # OpenRouter configuration (OpenAI-compatible)
51
+ >>> config = ClientConfig(
52
+ ... provider=Provider.OPENAI,
53
+ ... api_key="sk-or-...",
54
+ ... base_url="https://openrouter.ai/api/v1"
55
+ ... )
56
+ """
57
+
58
+ provider: Provider
59
+ base_url: str | None = None
60
+ api_key: str | None = None
61
+ timeout: float = 60.0
62
+ extra_kwargs: dict[str, Any] = field(default_factory=dict)
63
+
64
+
65
+ @dataclass
66
+ class ModelConfig:
67
+ """
68
+ Configuration for a specific LLM model.
69
+
70
+ Used with a client to create Model instances.
71
+
72
+ Attributes:
73
+ name: Model name (e.g., "gpt-4o-mini", "qwen2.5:7b-instruct", "claude-3-5-sonnet-latest")
74
+ temperature: Sampling temperature (0.0-1.0, optional - uses provider default if not set)
75
+ extra_kwargs: Additional kwargs passed to chat/stream methods
76
+
77
+ Examples:
78
+ >>> from casual_llm import ModelConfig
79
+ >>>
80
+ >>> # GPT-4 configuration
81
+ >>> config = ModelConfig(
82
+ ... name="gpt-4",
83
+ ... temperature=0.7
84
+ ... )
85
+ >>>
86
+ >>> # Claude configuration
87
+ >>> config = ModelConfig(
88
+ ... name="claude-3-5-sonnet-latest",
89
+ ... temperature=0.5
90
+ ... )
91
+ """
92
+
93
+ name: str
94
+ temperature: float | None = None
95
+ extra_kwargs: dict[str, Any] = field(default_factory=dict)
@@ -162,7 +162,7 @@ def convert_messages_to_anthropic(messages: list[ChatMessage]) -> list[dict[str,
162
162
  if not messages:
163
163
  return []
164
164
 
165
- logger.debug(f"Converting {len(messages)} messages to Anthropic format")
165
+ logger.debug("Converting %d messages to Anthropic format", len(messages))
166
166
 
167
167
  anthropic_messages: list[dict[str, Any]] = []
168
168
 
@@ -185,7 +185,8 @@ def convert_messages_to_anthropic(messages: list[ChatMessage]) -> list[dict[str,
185
185
  except json.JSONDecodeError:
186
186
  input_data = {}
187
187
  logger.warning(
188
- f"Failed to parse tool call arguments: {tool_call.function.arguments}"
188
+ "Failed to parse tool call arguments: %s",
189
+ tool_call.function.arguments,
189
190
  )
190
191
 
191
192
  content_blocks.append(
@@ -236,7 +237,7 @@ def convert_messages_to_anthropic(messages: list[ChatMessage]) -> list[dict[str,
236
237
  )
237
238
 
238
239
  case _:
239
- logger.warning(f"Unknown message role: {msg.role}")
240
+ logger.warning("Unknown message role: %s", msg.role)
240
241
 
241
242
  return anthropic_messages
242
243
 
@@ -265,7 +266,7 @@ def convert_tool_calls_from_anthropic(
265
266
  tool_calls = []
266
267
 
267
268
  for tool in response_tool_calls:
268
- logger.debug(f"Converting tool call: {tool.name}")
269
+ logger.debug("Converting tool call: %s", tool.name)
269
270
 
270
271
  # Serialize input dict to JSON string for casual-llm format
271
272
  arguments = json.dumps(tool.input) if tool.input else "{}"
@@ -277,7 +278,7 @@ def convert_tool_calls_from_anthropic(
277
278
  )
278
279
  tool_calls.append(tool_call)
279
280
 
280
- logger.debug(f"Converted {len(tool_calls)} tool calls")
281
+ logger.debug("Converted %d tool calls", len(tool_calls))
281
282
  return tool_calls
282
283
 
283
284
 
@@ -285,6 +286,4 @@ __all__ = [
285
286
  "convert_messages_to_anthropic",
286
287
  "extract_system_message",
287
288
  "convert_tool_calls_from_anthropic",
288
- "_convert_image_to_anthropic",
289
- "_convert_user_content_to_anthropic",
290
289
  ]
@@ -45,7 +45,7 @@ async def _convert_image_to_ollama(image: ImageContent) -> str:
45
45
  return strip_base64_prefix(image.source)
46
46
  else:
47
47
  # Regular URL - fetch and convert to base64
48
- logger.debug(f"Fetching image from URL for Ollama: {image.source}")
48
+ logger.debug("Fetching image from URL for Ollama: %s", image.source)
49
49
  base64_data, _ = await fetch_image_as_base64(image.source)
50
50
  return base64_data
51
51
  else:
@@ -128,7 +128,7 @@ async def convert_messages_to_ollama(messages: list[ChatMessage]) -> list[dict[s
128
128
  if not messages:
129
129
  return []
130
130
 
131
- logger.debug(f"Converting {len(messages)} messages to Ollama format")
131
+ logger.debug("Converting %d messages to Ollama format", len(messages))
132
132
 
133
133
  ollama_messages: list[dict[str, Any]] = []
134
134
 
@@ -188,7 +188,7 @@ async def convert_messages_to_ollama(messages: list[ChatMessage]) -> list[dict[s
188
188
  ollama_messages.append(user_message)
189
189
 
190
190
  case _:
191
- logger.warning(f"Unknown message role: {msg.role}")
191
+ logger.warning("Unknown message role: %s", msg.role)
192
192
 
193
193
  return ollama_messages
194
194
 
@@ -221,9 +221,9 @@ def convert_tool_calls_from_ollama(
221
221
  tool_call_id = getattr(tool, "id", None)
222
222
  if not tool_call_id:
223
223
  tool_call_id = f"call_{uuid.uuid4().hex[:8]}"
224
- logger.debug(f"Generated tool call ID: {tool_call_id}")
224
+ logger.debug("Generated tool call ID: %s", tool_call_id)
225
225
 
226
- logger.debug(f"Converting tool call: {tool.function.name}")
226
+ logger.debug("Converting tool call: %s", tool.function.name)
227
227
 
228
228
  # Convert arguments from Mapping[str, Any] to JSON string
229
229
  # Ollama returns arguments as a dict, but we need a JSON string
@@ -237,7 +237,7 @@ def convert_tool_calls_from_ollama(
237
237
  )
238
238
  tool_calls.append(tool_call)
239
239
 
240
- logger.debug(f"Converted {len(tool_calls)} tool calls")
240
+ logger.debug("Converted %d tool calls", len(tool_calls))
241
241
  return tool_calls
242
242
 
243
243
 
@@ -92,7 +92,7 @@ def convert_messages_to_openai(messages: list[ChatMessage]) -> list[dict[str, An
92
92
  if not messages:
93
93
  return []
94
94
 
95
- logger.debug(f"Converting {len(messages)} messages to OpenAI format")
95
+ logger.debug("Converting %d messages to OpenAI format", len(messages))
96
96
 
97
97
  openai_messages: list[dict[str, Any]] = []
98
98
 
@@ -145,7 +145,7 @@ def convert_messages_to_openai(messages: list[ChatMessage]) -> list[dict[str, An
145
145
  )
146
146
 
147
147
  case _:
148
- logger.warning(f"Unknown message role: {msg.role}")
148
+ logger.warning("Unknown message role: %s", msg.role)
149
149
 
150
150
  return openai_messages
151
151
 
@@ -171,7 +171,7 @@ def convert_tool_calls_from_openai(
171
171
  tool_calls = []
172
172
 
173
173
  for tool in response_tool_calls:
174
- logger.debug(f"Converting tool call: {tool.function.name}")
174
+ logger.debug("Converting tool call: %s", tool.function.name)
175
175
 
176
176
  tool_call = AssistantToolCall(
177
177
  id=tool.id,
@@ -182,7 +182,7 @@ def convert_tool_calls_from_openai(
182
182
  )
183
183
  tool_calls.append(tool_call)
184
184
 
185
- logger.debug(f"Converted {len(tool_calls)} tool calls")
185
+ logger.debug("Converted %d tool calls", len(tool_calls))
186
186
  return tool_calls
187
187
 
188
188