casual-llm 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
casual_llm/__init__.py ADDED
@@ -0,0 +1,88 @@
1
+ """
2
+ casual-llm - Lightweight LLM provider abstraction with standard message models.
3
+
4
+ A simple, protocol-based library for working with different LLM providers
5
+ (OpenAI, Ollama, etc.) using a unified interface and OpenAI-compatible message format.
6
+
7
+ Part of the casual-* ecosystem of lightweight AI tools.
8
+ """
9
+
10
+ __version__ = "0.1.0"
11
+
12
+ # Model configuration
13
+ from casual_llm.config import ModelConfig, Provider
14
+
15
+ # Provider protocol and implementations
16
+ from casual_llm.providers import (
17
+ LLMProvider,
18
+ OllamaProvider,
19
+ OpenAIProvider,
20
+ create_provider,
21
+ )
22
+
23
+ # OpenAI-compatible message models
24
+ from casual_llm.messages import (
25
+ ChatMessage,
26
+ UserMessage,
27
+ AssistantMessage,
28
+ SystemMessage,
29
+ ToolResultMessage,
30
+ AssistantToolCall,
31
+ AssistantToolCallFunction,
32
+ )
33
+
34
+ # Tool models
35
+ from casual_llm.tools import Tool, ToolParameter
36
+
37
+ # Usage tracking
38
+ from casual_llm.usage import Usage
39
+
40
+ # Tool converters
41
+ from casual_llm.tool_converters import (
42
+ tool_to_ollama,
43
+ tools_to_ollama,
44
+ tool_to_openai,
45
+ tools_to_openai,
46
+ )
47
+
48
+ # Message converters
49
+ from casual_llm.message_converters import (
50
+ convert_messages_to_openai,
51
+ convert_messages_to_ollama,
52
+ convert_tool_calls_from_openai,
53
+ convert_tool_calls_from_ollama,
54
+ )
55
+
56
+ __all__ = [
57
+ # Version
58
+ "__version__",
59
+ # Providers
60
+ "LLMProvider",
61
+ "ModelConfig",
62
+ "Provider",
63
+ "OllamaProvider",
64
+ "OpenAIProvider",
65
+ "create_provider",
66
+ # Messages
67
+ "ChatMessage",
68
+ "UserMessage",
69
+ "AssistantMessage",
70
+ "SystemMessage",
71
+ "ToolResultMessage",
72
+ "AssistantToolCall",
73
+ "AssistantToolCallFunction",
74
+ # Tools
75
+ "Tool",
76
+ "ToolParameter",
77
+ # Usage
78
+ "Usage",
79
+ "tool_to_ollama",
80
+ "tools_to_ollama",
81
+ "tool_to_openai",
82
+ "tools_to_openai",
83
+ # Message converters
84
+ "convert_messages_to_openai",
85
+ "convert_messages_to_ollama",
86
+ "convert_tool_calls_from_openai",
87
+ "convert_tool_calls_from_ollama",
88
+ ]
casual_llm/config.py ADDED
@@ -0,0 +1,63 @@
1
+ """
2
+ Model configuration and provider enums.
3
+
4
+ This module defines configuration structures for LLM models,
5
+ allowing unified configuration across different provider backends.
6
+ """
7
+
8
+ from dataclasses import dataclass
9
+ from enum import Enum
10
+
11
+
12
+ class Provider(Enum):
13
+ """Supported LLM providers"""
14
+
15
+ OPENAI = "openai"
16
+ OLLAMA = "ollama"
17
+
18
+
19
+ @dataclass
20
+ class ModelConfig:
21
+ """
22
+ Configuration for a specific LLM model.
23
+
24
+ Provides a unified way to configure models across different providers.
25
+
26
+ Attributes:
27
+ name: Model name (e.g., "gpt-4o-mini", "qwen2.5:7b-instruct")
28
+ provider: Provider type (OPENAI or OLLAMA)
29
+ base_url: Optional custom API endpoint
30
+ api_key: Optional API key (for OpenAI/compatible providers)
31
+ temperature: Sampling temperature (0.0-1.0, optional - uses provider default if not set)
32
+
33
+ Examples:
34
+ >>> from casual_llm import ModelConfig, Provider
35
+ >>>
36
+ >>> # OpenAI configuration
37
+ >>> config = ModelConfig(
38
+ ... name="gpt-4o-mini",
39
+ ... provider=Provider.OPENAI,
40
+ ... api_key="sk-..."
41
+ ... )
42
+ >>>
43
+ >>> # Ollama configuration
44
+ >>> config = ModelConfig(
45
+ ... name="qwen2.5:7b-instruct",
46
+ ... provider=Provider.OLLAMA,
47
+ ... base_url="http://localhost:11434"
48
+ ... )
49
+ >>>
50
+ >>> # OpenRouter configuration (OpenAI-compatible)
51
+ >>> config = ModelConfig(
52
+ ... name="anthropic/claude-3.5-sonnet",
53
+ ... provider=Provider.OPENAI,
54
+ ... api_key="sk-or-...",
55
+ ... base_url="https://openrouter.ai/api/v1"
56
+ ... )
57
+ """
58
+
59
+ name: str
60
+ provider: Provider
61
+ base_url: str | None = None
62
+ api_key: str | None = None
63
+ temperature: float | None = None
@@ -0,0 +1,22 @@
1
+ """
2
+ Message converters for different LLM provider formats.
3
+
4
+ This package provides converters to translate between casual-llm's unified
5
+ ChatMessage format and provider-specific formats (OpenAI, Ollama).
6
+ """
7
+
8
+ from casual_llm.message_converters.openai import (
9
+ convert_messages_to_openai,
10
+ convert_tool_calls_from_openai,
11
+ )
12
+ from casual_llm.message_converters.ollama import (
13
+ convert_messages_to_ollama,
14
+ convert_tool_calls_from_ollama,
15
+ )
16
+
17
+ __all__ = [
18
+ "convert_messages_to_openai",
19
+ "convert_messages_to_ollama",
20
+ "convert_tool_calls_from_openai",
21
+ "convert_tool_calls_from_ollama",
22
+ ]
@@ -0,0 +1,159 @@
1
+ """
2
+ Ollama message converters.
3
+
4
+ Converts casual-llm ChatMessage format to Ollama API format and vice versa.
5
+ """
6
+
7
+ import json
8
+ import logging
9
+ import uuid
10
+ from typing import TYPE_CHECKING, Any
11
+
12
+ from casual_llm.messages import (
13
+ ChatMessage,
14
+ AssistantToolCall,
15
+ AssistantToolCallFunction,
16
+ )
17
+
18
+ if TYPE_CHECKING:
19
+ from ollama._types import Message
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+
24
+ def convert_messages_to_ollama(messages: list[ChatMessage]) -> list[dict[str, Any]]:
25
+ """
26
+ Convert casual-llm ChatMessage list to Ollama format.
27
+
28
+ Unlike OpenAI which expects tool call arguments as JSON strings,
29
+ Ollama expects them as dictionaries. This function handles that conversion.
30
+
31
+ Args:
32
+ messages: List of ChatMessage objects
33
+
34
+ Returns:
35
+ List of dictionaries in Ollama message format
36
+
37
+ Examples:
38
+ >>> from casual_llm import UserMessage
39
+ >>> messages = [UserMessage(content="Hello")]
40
+ >>> ollama_msgs = convert_messages_to_ollama(messages)
41
+ >>> ollama_msgs[0]["role"]
42
+ 'user'
43
+ """
44
+ if not messages:
45
+ return []
46
+
47
+ logger.debug(f"Converting {len(messages)} messages to Ollama format")
48
+
49
+ ollama_messages: list[dict[str, Any]] = []
50
+
51
+ for msg in messages:
52
+ match msg.role:
53
+ case "assistant":
54
+ # Handle assistant messages with optional tool calls
55
+ message: dict[str, Any] = {
56
+ "role": "assistant",
57
+ "content": msg.content,
58
+ }
59
+
60
+ # Add tool calls if present
61
+ # Ollama expects arguments as dict, not JSON string
62
+ if msg.tool_calls:
63
+ tool_calls = []
64
+ for tool_call in msg.tool_calls:
65
+ # Parse arguments from JSON string to dict for Ollama
66
+ arguments_dict = (
67
+ json.loads(tool_call.function.arguments)
68
+ if tool_call.function.arguments
69
+ else {}
70
+ )
71
+
72
+ tool_calls.append(
73
+ {
74
+ "id": tool_call.id,
75
+ "type": tool_call.type,
76
+ "function": {
77
+ "name": tool_call.function.name,
78
+ "arguments": arguments_dict, # dict for Ollama
79
+ },
80
+ }
81
+ )
82
+ message["tool_calls"] = tool_calls
83
+
84
+ ollama_messages.append(message)
85
+
86
+ case "system":
87
+ ollama_messages.append({"role": "system", "content": msg.content})
88
+
89
+ case "tool":
90
+ ollama_messages.append(
91
+ {
92
+ "role": "tool",
93
+ "content": msg.content,
94
+ "tool_call_id": msg.tool_call_id,
95
+ "name": msg.name,
96
+ }
97
+ )
98
+
99
+ case "user":
100
+ ollama_messages.append({"role": "user", "content": msg.content})
101
+
102
+ case _:
103
+ logger.warning(f"Unknown message role: {msg.role}")
104
+
105
+ return ollama_messages
106
+
107
+
108
+ def convert_tool_calls_from_ollama(
109
+ response_tool_calls: list["Message.ToolCall"],
110
+ ) -> list[AssistantToolCall]:
111
+ """
112
+ Convert Ollama tool calls to casual-llm format.
113
+
114
+ Handles Ollama's ToolCall objects which have function arguments as a Mapping
115
+ instead of a JSON string. Also generates unique IDs if not provided.
116
+
117
+ Args:
118
+ response_tool_calls: List of ollama._types.Message.ToolCall objects
119
+
120
+ Returns:
121
+ List of AssistantToolCall objects
122
+
123
+ Examples:
124
+ >>> # from ollama response.message.tool_calls
125
+ >>> # tool_calls = convert_tool_calls_from_ollama(response.message.tool_calls)
126
+ >>> # assert len(tool_calls) > 0
127
+ pass
128
+ """
129
+ tool_calls = []
130
+
131
+ for tool in response_tool_calls:
132
+ # Get tool call ID, generate one if missing
133
+ tool_call_id = getattr(tool, "id", None)
134
+ if not tool_call_id:
135
+ tool_call_id = f"call_{uuid.uuid4().hex[:8]}"
136
+ logger.debug(f"Generated tool call ID: {tool_call_id}")
137
+
138
+ logger.debug(f"Converting tool call: {tool.function.name}")
139
+
140
+ # Convert arguments from Mapping[str, Any] to JSON string
141
+ # Ollama returns arguments as a dict, but we need a JSON string
142
+ arguments_dict = tool.function.arguments
143
+ arguments_json = json.dumps(arguments_dict) if arguments_dict else "{}"
144
+
145
+ tool_call = AssistantToolCall(
146
+ id=tool_call_id,
147
+ type=getattr(tool, "type", "function"),
148
+ function=AssistantToolCallFunction(name=tool.function.name, arguments=arguments_json),
149
+ )
150
+ tool_calls.append(tool_call)
151
+
152
+ logger.debug(f"Converted {len(tool_calls)} tool calls")
153
+ return tool_calls
154
+
155
+
156
+ __all__ = [
157
+ "convert_messages_to_ollama",
158
+ "convert_tool_calls_from_ollama",
159
+ ]
@@ -0,0 +1,136 @@
1
+ """
2
+ OpenAI message converters.
3
+
4
+ Converts casual-llm ChatMessage format to OpenAI API format and vice versa.
5
+ """
6
+
7
+ import logging
8
+ from typing import TYPE_CHECKING, Any
9
+
10
+ from casual_llm.messages import (
11
+ ChatMessage,
12
+ AssistantToolCall,
13
+ AssistantToolCallFunction,
14
+ )
15
+
16
+ if TYPE_CHECKING:
17
+ from openai.types.chat import ChatCompletionMessageToolCall
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+
22
+ def convert_messages_to_openai(messages: list[ChatMessage]) -> list[dict[str, Any]]:
23
+ """
24
+ Convert casual-llm ChatMessage list to OpenAI format.
25
+
26
+ Handles all message types including tool calls and tool results.
27
+
28
+ Args:
29
+ messages: List of ChatMessage objects
30
+
31
+ Returns:
32
+ List of dictionaries in OpenAI ChatCompletionMessageParam format
33
+
34
+ Examples:
35
+ >>> from casual_llm import UserMessage, AssistantMessage
36
+ >>> messages = [UserMessage(content="Hello")]
37
+ >>> openai_msgs = convert_messages_to_openai(messages)
38
+ >>> openai_msgs[0]["role"]
39
+ 'user'
40
+ """
41
+ if not messages:
42
+ return []
43
+
44
+ logger.debug(f"Converting {len(messages)} messages to OpenAI format")
45
+
46
+ openai_messages: list[dict[str, Any]] = []
47
+
48
+ for msg in messages:
49
+ match msg.role:
50
+ case "assistant":
51
+ # Handle assistant messages with optional tool calls
52
+ message: dict[str, Any] = {
53
+ "role": "assistant",
54
+ "content": msg.content,
55
+ }
56
+
57
+ # Add tool calls if present
58
+ if msg.tool_calls:
59
+ tool_calls = []
60
+ for tool_call in msg.tool_calls:
61
+ tool_calls.append(
62
+ {
63
+ "id": tool_call.id,
64
+ "type": tool_call.type,
65
+ "function": {
66
+ "name": tool_call.function.name,
67
+ "arguments": tool_call.function.arguments,
68
+ },
69
+ }
70
+ )
71
+ message["tool_calls"] = tool_calls
72
+
73
+ openai_messages.append(message)
74
+
75
+ case "system":
76
+ openai_messages.append({"role": "system", "content": msg.content})
77
+
78
+ case "tool":
79
+ openai_messages.append(
80
+ {
81
+ "role": "tool",
82
+ "content": msg.content,
83
+ "tool_call_id": msg.tool_call_id,
84
+ "name": msg.name,
85
+ }
86
+ )
87
+
88
+ case "user":
89
+ openai_messages.append({"role": "user", "content": msg.content})
90
+
91
+ case _:
92
+ logger.warning(f"Unknown message role: {msg.role}")
93
+
94
+ return openai_messages
95
+
96
+
97
+ def convert_tool_calls_from_openai(
98
+ response_tool_calls: list["ChatCompletionMessageToolCall"],
99
+ ) -> list[AssistantToolCall]:
100
+ """
101
+ Convert OpenAI ChatCompletionMessageToolCall to casual-llm format.
102
+
103
+ Args:
104
+ response_tool_calls: List of ChatCompletionMessageToolCall from OpenAI response
105
+
106
+ Returns:
107
+ List of AssistantToolCall objects
108
+
109
+ Examples:
110
+ >>> # Assuming response has tool_calls
111
+ >>> # tool_calls = convert_tool_calls_from_openai(response.choices[0].message.tool_calls)
112
+ >>> # assert len(tool_calls) > 0
113
+ pass
114
+ """
115
+ tool_calls = []
116
+
117
+ for tool in response_tool_calls:
118
+ logger.debug(f"Converting tool call: {tool.function.name}")
119
+
120
+ tool_call = AssistantToolCall(
121
+ id=tool.id,
122
+ type="function",
123
+ function=AssistantToolCallFunction(
124
+ name=tool.function.name, arguments=tool.function.arguments
125
+ ),
126
+ )
127
+ tool_calls.append(tool_call)
128
+
129
+ logger.debug(f"Converted {len(tool_calls)} tool calls")
130
+ return tool_calls
131
+
132
+
133
+ __all__ = [
134
+ "convert_messages_to_openai",
135
+ "convert_tool_calls_from_openai",
136
+ ]
casual_llm/messages.py ADDED
@@ -0,0 +1,60 @@
1
+ """
2
+ OpenAI-compatible message models for LLM conversations.
3
+
4
+ These models follow the OpenAI chat completion API format and can be used
5
+ with any provider that implements the LLMProvider protocol.
6
+ """
7
+
8
+ from typing import Literal, TypeAlias
9
+
10
+ from pydantic import BaseModel
11
+
12
+
13
+ class AssistantToolCallFunction(BaseModel):
14
+ """Function call within an assistant tool call."""
15
+
16
+ name: str
17
+ arguments: str
18
+
19
+
20
+ class AssistantToolCall(BaseModel):
21
+ """Tool call made by the assistant."""
22
+
23
+ id: str
24
+ type: Literal["function"] = "function"
25
+ function: AssistantToolCallFunction
26
+
27
+
28
+ class AssistantMessage(BaseModel):
29
+ """Message from the AI assistant."""
30
+
31
+ role: Literal["assistant"] = "assistant"
32
+ content: str | None = None
33
+ tool_calls: list[AssistantToolCall] | None = None
34
+
35
+
36
+ class SystemMessage(BaseModel):
37
+ """System prompt message that sets the assistant's behavior."""
38
+
39
+ role: Literal["system"] = "system"
40
+ content: str
41
+
42
+
43
+ class ToolResultMessage(BaseModel):
44
+ """Result from a tool/function call execution."""
45
+
46
+ role: Literal["tool"] = "tool"
47
+ name: str
48
+ tool_call_id: str
49
+ content: str
50
+
51
+
52
+ class UserMessage(BaseModel):
53
+ """Message from the user."""
54
+
55
+ role: Literal["user"] = "user"
56
+ content: str | None
57
+
58
+
59
+ ChatMessage: TypeAlias = AssistantMessage | SystemMessage | ToolResultMessage | UserMessage
60
+ """Type alias for any chat message type (user, assistant, system, or tool result)."""
@@ -0,0 +1,86 @@
1
+ """
2
+ LLM provider implementations.
3
+
4
+ This module contains provider-specific implementations of the LLMProvider protocol.
5
+ """
6
+
7
+ from casual_llm.config import ModelConfig, Provider
8
+ from casual_llm.providers.base import LLMProvider
9
+ from casual_llm.providers.ollama import OllamaProvider
10
+
11
+ try:
12
+ from casual_llm.providers.openai import OpenAIProvider
13
+ except ImportError:
14
+ OpenAIProvider = None # type: ignore
15
+
16
+
17
+ def create_provider(
18
+ model_config: ModelConfig,
19
+ timeout: float = 60.0,
20
+ ) -> LLMProvider:
21
+ """
22
+ Factory function to create an LLM provider from a ModelConfig.
23
+
24
+ Args:
25
+ model_config: Model configuration (name, provider, base_url, api_key, temperature)
26
+ timeout: HTTP timeout in seconds (default: 60.0)
27
+
28
+ Returns:
29
+ Configured LLM provider (OllamaProvider or OpenAIProvider)
30
+
31
+ Raises:
32
+ ValueError: If provider type is not supported
33
+ ImportError: If openai package is not installed for OpenAI provider
34
+
35
+ Examples:
36
+ >>> from casual_llm import ModelConfig, Provider, create_provider
37
+ >>> config = ModelConfig(
38
+ ... name="gpt-4o-mini",
39
+ ... provider=Provider.OPENAI,
40
+ ... api_key="sk-..."
41
+ ... )
42
+ >>> provider = create_provider(config)
43
+
44
+ >>> config = ModelConfig(
45
+ ... name="qwen2.5:7b-instruct",
46
+ ... provider=Provider.OLLAMA,
47
+ ... base_url="http://localhost:11434"
48
+ ... )
49
+ >>> provider = create_provider(config)
50
+ """
51
+ if model_config.provider == Provider.OLLAMA:
52
+ host = model_config.base_url or "http://localhost:11434"
53
+ return OllamaProvider(
54
+ model=model_config.name,
55
+ host=host,
56
+ temperature=model_config.temperature,
57
+ timeout=timeout,
58
+ )
59
+
60
+ elif model_config.provider == Provider.OPENAI:
61
+ if OpenAIProvider is None:
62
+ raise ImportError(
63
+ "OpenAI provider requires the 'openai' package. "
64
+ "Install it with: pip install casual-llm[openai]"
65
+ )
66
+
67
+ return OpenAIProvider(
68
+ model=model_config.name,
69
+ api_key=model_config.api_key,
70
+ base_url=model_config.base_url,
71
+ temperature=model_config.temperature,
72
+ timeout=timeout,
73
+ )
74
+
75
+ else:
76
+ raise ValueError(f"Unsupported provider: {model_config.provider}")
77
+
78
+
79
+ __all__ = [
80
+ "LLMProvider",
81
+ "ModelConfig",
82
+ "Provider",
83
+ "OllamaProvider",
84
+ "OpenAIProvider",
85
+ "create_provider",
86
+ ]
@@ -0,0 +1,78 @@
1
+ """
2
+ Base protocol for LLM providers.
3
+
4
+ Provides a unified interface for different LLM backends (OpenAI, Ollama, etc.)
5
+ using standard OpenAI-compatible message formats.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from typing import Protocol, Literal
11
+
12
+ from casual_llm.messages import ChatMessage, AssistantMessage
13
+ from casual_llm.tools import Tool
14
+ from casual_llm.usage import Usage
15
+
16
+
17
+ class LLMProvider(Protocol):
18
+ """
19
+ Protocol for LLM providers.
20
+
21
+ Uses OpenAI-compatible ChatMessage format for all interactions.
22
+ Supports both structured (JSON) and unstructured (text) responses.
23
+
24
+ This is a Protocol (PEP 544), meaning any class that implements
25
+ the chat() method with this signature is compatible - no
26
+ inheritance required.
27
+
28
+ Examples:
29
+ >>> from casual_llm import LLMProvider, ChatMessage, UserMessage
30
+ >>>
31
+ >>> # Any provider implementing this protocol works
32
+ >>> async def get_response(provider: LLMProvider, prompt: str) -> str:
33
+ ... messages = [UserMessage(content=prompt)]
34
+ ... return await provider.chat(messages)
35
+ """
36
+
37
+ async def chat(
38
+ self,
39
+ messages: list[ChatMessage],
40
+ response_format: Literal["json", "text"] = "text",
41
+ max_tokens: int | None = None,
42
+ tools: list[Tool] | None = None,
43
+ temperature: float | None = None,
44
+ ) -> AssistantMessage:
45
+ """
46
+ Generate a chat response from the LLM.
47
+
48
+ Args:
49
+ messages: List of ChatMessage (UserMessage, AssistantMessage, SystemMessage, etc.)
50
+ response_format: Expected response format ("json" or "text")
51
+ max_tokens: Maximum tokens to generate (optional)
52
+ tools: List of tools available for the LLM to call (optional)
53
+ temperature: Temperature for this request (optional, overrides instance temperature)
54
+
55
+ Returns:
56
+ AssistantMessage with content and optional tool_calls
57
+
58
+ Raises:
59
+ Provider-specific exceptions (httpx.HTTPError, openai.OpenAIError, etc.)
60
+ """
61
+ ...
62
+
63
+ def get_usage(self) -> Usage | None:
64
+ """
65
+ Get token usage statistics from the last chat() call.
66
+
67
+ Returns:
68
+ Usage object with prompt_tokens, completion_tokens, and total_tokens,
69
+ or None if no calls have been made yet.
70
+
71
+ Examples:
72
+ >>> provider = OllamaProvider(model="llama3.1")
73
+ >>> await provider.chat([UserMessage(content="Hello")])
74
+ >>> usage = provider.get_usage()
75
+ >>> if usage:
76
+ ... print(f"Used {usage.total_tokens} tokens")
77
+ """
78
+ ...