composo 0.0.11__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- composo-0.0.11/MANIFEST.in +22 -0
- composo-0.0.11/PKG-INFO +162 -0
- composo-0.0.11/README.md +127 -0
- composo-0.0.11/composo/__init__.py +69 -0
- composo-0.0.11/composo/adapters/__init__.py +15 -0
- composo-0.0.11/composo/adapters/anthropic_adapter.py +31 -0
- composo-0.0.11/composo/adapters/base.py +32 -0
- composo-0.0.11/composo/adapters/factory.py +35 -0
- composo-0.0.11/composo/adapters/openai_adapter.py +40 -0
- composo-0.0.11/composo/chat_types.py +46 -0
- composo-0.0.11/composo/client/__init__.py +17 -0
- composo-0.0.11/composo/client/async_client.py +178 -0
- composo-0.0.11/composo/client/base.py +172 -0
- composo-0.0.11/composo/client/sync.py +168 -0
- composo-0.0.11/composo/client/types.py +41 -0
- composo-0.0.11/composo/exceptions/__init__.py +23 -0
- composo-0.0.11/composo/exceptions/api_exceptions.py +59 -0
- composo-0.0.11/composo/models/__init__.py +28 -0
- composo-0.0.11/composo/models/client_models.py +34 -0
- composo-0.0.11/composo/models/criteria.py +36 -0
- composo-0.0.11/composo/models/evaluation.py +14 -0
- composo-0.0.11/composo/models/message_data.py +18 -0
- composo-0.0.11/composo/models/messages.py +17 -0
- composo-0.0.11/composo/models/requests.py +41 -0
- composo-0.0.11/composo/models/responses.py +41 -0
- composo-0.0.11/composo/py.typed +0 -0
- composo-0.0.11/composo/utils/__init__.py +19 -0
- composo-0.0.11/composo/utils/validation.py +34 -0
- composo-0.0.11/composo/validation.py +68 -0
- composo-0.0.11/composo.egg-info/PKG-INFO +162 -0
- composo-0.0.11/composo.egg-info/SOURCES.txt +34 -0
- composo-0.0.11/composo.egg-info/dependency_links.txt +1 -0
- composo-0.0.11/composo.egg-info/requires.txt +21 -0
- composo-0.0.11/composo.egg-info/top_level.txt +1 -0
- composo-0.0.11/pyproject.toml +137 -0
- composo-0.0.11/setup.cfg +4 -0
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
prune tests
|
|
2
|
+
global-exclude test_*
|
|
3
|
+
global-exclude *test.py
|
|
4
|
+
global-exclude *test_*.py
|
|
5
|
+
global-exclude .pytest_cache/
|
|
6
|
+
global-exclude .coverage
|
|
7
|
+
global-exclude htmlcov/
|
|
8
|
+
global-exclude .tox/
|
|
9
|
+
global-exclude .mypy_cache/
|
|
10
|
+
global-exclude .eggs/
|
|
11
|
+
global-exclude *.egg-info/
|
|
12
|
+
global-exclude build/
|
|
13
|
+
global-exclude dist/
|
|
14
|
+
global-exclude .git/
|
|
15
|
+
global-exclude .gitignore
|
|
16
|
+
global-exclude .editorconfig
|
|
17
|
+
global-exclude .flake8
|
|
18
|
+
global-exclude .isort.cfg
|
|
19
|
+
global-exclude .mypy.ini
|
|
20
|
+
global-exclude pyproject.toml.bak
|
|
21
|
+
global-exclude build_and_publish.sh
|
|
22
|
+
global-exclude requirements.txt
|
composo-0.0.11/PKG-INFO
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: composo
|
|
3
|
+
Version: 0.0.11
|
|
4
|
+
Summary: Composo Python SDK
|
|
5
|
+
Author-email: Your Name <your.email@example.com>
|
|
6
|
+
Classifier: Development Status :: 3 - Alpha
|
|
7
|
+
Classifier: Intended Audience :: Developers
|
|
8
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
9
|
+
Classifier: Programming Language :: Python :: 3
|
|
10
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
14
|
+
Requires-Python: >=3.8
|
|
15
|
+
Description-Content-Type: text/markdown
|
|
16
|
+
Requires-Dist: httpx>=0.21.0
|
|
17
|
+
Requires-Dist: pydantic<3.0.0,>=1.9.2
|
|
18
|
+
Requires-Dist: typing-extensions>=4.0.0
|
|
19
|
+
Requires-Dist: deepdiff>=6.0.0
|
|
20
|
+
Requires-Dist: openai>=1.0.0
|
|
21
|
+
Requires-Dist: anthropic>=0.25.0
|
|
22
|
+
Requires-Dist: tenacity>=8.0.0
|
|
23
|
+
Provides-Extra: test
|
|
24
|
+
Requires-Dist: pytest>=7.0.0; extra == "test"
|
|
25
|
+
Requires-Dist: pytest-asyncio>=0.21.0; extra == "test"
|
|
26
|
+
Requires-Dist: pytest-cov>=4.0.0; extra == "test"
|
|
27
|
+
Provides-Extra: dev
|
|
28
|
+
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
29
|
+
Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
|
|
30
|
+
Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
|
|
31
|
+
Requires-Dist: black>=23.0.0; extra == "dev"
|
|
32
|
+
Requires-Dist: isort>=5.12.0; extra == "dev"
|
|
33
|
+
Requires-Dist: mypy>=1.0.0; extra == "dev"
|
|
34
|
+
Requires-Dist: flake8>=6.0.0; extra == "dev"
|
|
35
|
+
|
|
36
|
+
# Composo Python SDK
|
|
37
|
+
|
|
38
|
+
A Python SDK for Composo evaluation services, providing both synchronous and asynchronous clients for evaluating LLM conversations with support for OpenAI and Anthropic formats.
|
|
39
|
+
|
|
40
|
+
## Features
|
|
41
|
+
|
|
42
|
+
- **Dual Client Support**: Both synchronous and asynchronous clients
|
|
43
|
+
- **Multiple LLM Provider Support**: Native support for OpenAI and Anthropic formats
|
|
44
|
+
- **Connection Pooling**: Optimized HTTP client with connection reuse
|
|
45
|
+
- **Retry Logic**: Exponential backoff with jitter for robust API calls
|
|
46
|
+
- **Type Safety**: Full type hints and Pydantic models
|
|
47
|
+
- **Context Managers**: Proper resource management with context managers
|
|
48
|
+
|
|
49
|
+
## Installation
|
|
50
|
+
|
|
51
|
+
```bash
|
|
52
|
+
pip install composo
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
## Quick Start
|
|
56
|
+
|
|
57
|
+
### Basic Usage
|
|
58
|
+
|
|
59
|
+
```python
|
|
60
|
+
from composo import Composo, AsyncComposo
|
|
61
|
+
|
|
62
|
+
# Initialize client
|
|
63
|
+
client = Composo(api_key="your-api-key")
|
|
64
|
+
|
|
65
|
+
# Evaluate messages
|
|
66
|
+
messages = [
|
|
67
|
+
{"role": "user", "content": "What is machine learning?"},
|
|
68
|
+
{"role": "assistant", "content": "Machine learning is..."}
|
|
69
|
+
]
|
|
70
|
+
|
|
71
|
+
criteria = ["Reward responses that provide accurate technical explanations"]
|
|
72
|
+
|
|
73
|
+
result = client.evaluate(messages=messages, criteria=criteria)
|
|
74
|
+
print(f"Score: {result.score}")
|
|
75
|
+
print(f"Explanation: {result.explanation}")
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
### Async Usage
|
|
79
|
+
|
|
80
|
+
```python
|
|
81
|
+
import asyncio
|
|
82
|
+
from composo import AsyncComposo
|
|
83
|
+
|
|
84
|
+
async def main():
|
|
85
|
+
async with AsyncComposo(api_key="your-api-key") as client:
|
|
86
|
+
result = await client.evaluate(
|
|
87
|
+
messages=messages,
|
|
88
|
+
criteria=criteria
|
|
89
|
+
)
|
|
90
|
+
print(f"Score: {result.score}")
|
|
91
|
+
|
|
92
|
+
asyncio.run(main())
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
### With LLM Results
|
|
96
|
+
|
|
97
|
+
```python
|
|
98
|
+
import openai
|
|
99
|
+
from composo import Composo
|
|
100
|
+
|
|
101
|
+
# Get response from OpenAI
|
|
102
|
+
openai_client = openai.OpenAI(api_key="your-openai-key")
|
|
103
|
+
openai_result = openai_client.chat.completions.create(
|
|
104
|
+
model="gpt-4",
|
|
105
|
+
messages=[{"role": "user", "content": "What is machine learning?"}]
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
# Evaluate the response
|
|
109
|
+
composo_client = Composo(api_key="your-composo-key")
|
|
110
|
+
eval_result = composo_client.evaluate(
|
|
111
|
+
messages=[{"role": "user", "content": "What is machine learning?"}],
|
|
112
|
+
result=openai_result,
|
|
113
|
+
criteria=["Reward accurate technical explanations"]
|
|
114
|
+
)
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
## Configuration
|
|
118
|
+
|
|
119
|
+
### Client Options
|
|
120
|
+
|
|
121
|
+
- `api_key` (required): Your Composo API key
|
|
122
|
+
- `base_url` (optional): Custom API endpoint (default: https://platform.composo.ai)
|
|
123
|
+
- `num_retries` (optional): Number of retry attempts (default: 1)
|
|
124
|
+
- `model_core` (optional): Specific model core for evaluation
|
|
125
|
+
|
|
126
|
+
### Logging
|
|
127
|
+
|
|
128
|
+
The SDK uses Python's standard logging module. Configure logging level:
|
|
129
|
+
|
|
130
|
+
```python
|
|
131
|
+
import logging
|
|
132
|
+
logging.getLogger("composo").setLevel(logging.INFO)
|
|
133
|
+
```
|
|
134
|
+
|
|
135
|
+
## Error Handling
|
|
136
|
+
|
|
137
|
+
The SDK provides specific exception types:
|
|
138
|
+
|
|
139
|
+
```python
|
|
140
|
+
from composo import (
|
|
141
|
+
ComposoError,
|
|
142
|
+
RateLimitError,
|
|
143
|
+
MalformedError,
|
|
144
|
+
APIError,
|
|
145
|
+
AuthenticationError
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
try:
|
|
149
|
+
result = client.evaluate(messages=messages, criteria=criteria)
|
|
150
|
+
except RateLimitError:
|
|
151
|
+
print("Rate limit exceeded")
|
|
152
|
+
except AuthenticationError:
|
|
153
|
+
print("Invalid API key")
|
|
154
|
+
except ComposoError as e:
|
|
155
|
+
print(f"Composo error: {e}")
|
|
156
|
+
```
|
|
157
|
+
|
|
158
|
+
## Performance Optimization
|
|
159
|
+
|
|
160
|
+
- **Connection Pooling**: HTTP clients reuse connections for better performance
|
|
161
|
+
- **Context Managers**: Use context managers to properly close connections
|
|
162
|
+
- **Async Support**: Use async client for high-throughput scenarios
|
composo-0.0.11/README.md
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
# Composo Python SDK
|
|
2
|
+
|
|
3
|
+
A Python SDK for Composo evaluation services, providing both synchronous and asynchronous clients for evaluating LLM conversations with support for OpenAI and Anthropic formats.
|
|
4
|
+
|
|
5
|
+
## Features
|
|
6
|
+
|
|
7
|
+
- **Dual Client Support**: Both synchronous and asynchronous clients
|
|
8
|
+
- **Multiple LLM Provider Support**: Native support for OpenAI and Anthropic formats
|
|
9
|
+
- **Connection Pooling**: Optimized HTTP client with connection reuse
|
|
10
|
+
- **Retry Logic**: Exponential backoff with jitter for robust API calls
|
|
11
|
+
- **Type Safety**: Full type hints and Pydantic models
|
|
12
|
+
- **Context Managers**: Proper resource management with context managers
|
|
13
|
+
|
|
14
|
+
## Installation
|
|
15
|
+
|
|
16
|
+
```bash
|
|
17
|
+
pip install composo
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
## Quick Start
|
|
21
|
+
|
|
22
|
+
### Basic Usage
|
|
23
|
+
|
|
24
|
+
```python
|
|
25
|
+
from composo import Composo, AsyncComposo
|
|
26
|
+
|
|
27
|
+
# Initialize client
|
|
28
|
+
client = Composo(api_key="your-api-key")
|
|
29
|
+
|
|
30
|
+
# Evaluate messages
|
|
31
|
+
messages = [
|
|
32
|
+
{"role": "user", "content": "What is machine learning?"},
|
|
33
|
+
{"role": "assistant", "content": "Machine learning is..."}
|
|
34
|
+
]
|
|
35
|
+
|
|
36
|
+
criteria = ["Reward responses that provide accurate technical explanations"]
|
|
37
|
+
|
|
38
|
+
result = client.evaluate(messages=messages, criteria=criteria)
|
|
39
|
+
print(f"Score: {result.score}")
|
|
40
|
+
print(f"Explanation: {result.explanation}")
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
### Async Usage
|
|
44
|
+
|
|
45
|
+
```python
|
|
46
|
+
import asyncio
|
|
47
|
+
from composo import AsyncComposo
|
|
48
|
+
|
|
49
|
+
async def main():
|
|
50
|
+
async with AsyncComposo(api_key="your-api-key") as client:
|
|
51
|
+
result = await client.evaluate(
|
|
52
|
+
messages=messages,
|
|
53
|
+
criteria=criteria
|
|
54
|
+
)
|
|
55
|
+
print(f"Score: {result.score}")
|
|
56
|
+
|
|
57
|
+
asyncio.run(main())
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
### With LLM Results
|
|
61
|
+
|
|
62
|
+
```python
|
|
63
|
+
import openai
|
|
64
|
+
from composo import Composo
|
|
65
|
+
|
|
66
|
+
# Get response from OpenAI
|
|
67
|
+
openai_client = openai.OpenAI(api_key="your-openai-key")
|
|
68
|
+
openai_result = openai_client.chat.completions.create(
|
|
69
|
+
model="gpt-4",
|
|
70
|
+
messages=[{"role": "user", "content": "What is machine learning?"}]
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
# Evaluate the response
|
|
74
|
+
composo_client = Composo(api_key="your-composo-key")
|
|
75
|
+
eval_result = composo_client.evaluate(
|
|
76
|
+
messages=[{"role": "user", "content": "What is machine learning?"}],
|
|
77
|
+
result=openai_result,
|
|
78
|
+
criteria=["Reward accurate technical explanations"]
|
|
79
|
+
)
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
## Configuration
|
|
83
|
+
|
|
84
|
+
### Client Options
|
|
85
|
+
|
|
86
|
+
- `api_key` (required): Your Composo API key
|
|
87
|
+
- `base_url` (optional): Custom API endpoint (default: https://platform.composo.ai)
|
|
88
|
+
- `num_retries` (optional): Number of retry attempts (default: 1)
|
|
89
|
+
- `model_core` (optional): Specific model core for evaluation
|
|
90
|
+
|
|
91
|
+
### Logging
|
|
92
|
+
|
|
93
|
+
The SDK uses Python's standard logging module. Configure logging level:
|
|
94
|
+
|
|
95
|
+
```python
|
|
96
|
+
import logging
|
|
97
|
+
logging.getLogger("composo").setLevel(logging.INFO)
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
## Error Handling
|
|
101
|
+
|
|
102
|
+
The SDK provides specific exception types:
|
|
103
|
+
|
|
104
|
+
```python
|
|
105
|
+
from composo import (
|
|
106
|
+
ComposoError,
|
|
107
|
+
RateLimitError,
|
|
108
|
+
MalformedError,
|
|
109
|
+
APIError,
|
|
110
|
+
AuthenticationError
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
try:
|
|
114
|
+
result = client.evaluate(messages=messages, criteria=criteria)
|
|
115
|
+
except RateLimitError:
|
|
116
|
+
print("Rate limit exceeded")
|
|
117
|
+
except AuthenticationError:
|
|
118
|
+
print("Invalid API key")
|
|
119
|
+
except ComposoError as e:
|
|
120
|
+
print(f"Composo error: {e}")
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
## Performance Optimization
|
|
124
|
+
|
|
125
|
+
- **Connection Pooling**: HTTP clients reuse connections for better performance
|
|
126
|
+
- **Context Managers**: Use context managers to properly close connections
|
|
127
|
+
- **Async Support**: Use async client for high-throughput scenarios
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Composo SDK - A Python SDK for Composo evaluation services
|
|
3
|
+
|
|
4
|
+
This package provides both synchronous and asynchronous clients for evaluating
|
|
5
|
+
LLM conversations with support for OpenAI and Anthropic formats.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
__version__ = "0.1.0"
|
|
9
|
+
__author__ = "Composo Team"
|
|
10
|
+
__email__ = "support@composo.ai"
|
|
11
|
+
__description__ = "A Python SDK for Composo evaluation services"
|
|
12
|
+
|
|
13
|
+
from .client import Composo, AsyncComposo
|
|
14
|
+
from .exceptions import (
|
|
15
|
+
ComposoError,
|
|
16
|
+
RateLimitError,
|
|
17
|
+
MalformedError,
|
|
18
|
+
APIError,
|
|
19
|
+
AuthenticationError,
|
|
20
|
+
)
|
|
21
|
+
from .validation import validate_raw_chat_conforms_to_type
|
|
22
|
+
from .chat_types import OpenAIChatSessionType, AnthropicChatSessionType, ChatSessionType
|
|
23
|
+
from .models import CriteriaSet
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
# Create a criteria module-like object for backward compatibility
|
|
27
|
+
class CriteriaModule:
|
|
28
|
+
"""Module-like object for accessing predefined criteria sets"""
|
|
29
|
+
|
|
30
|
+
@property
|
|
31
|
+
def basic(self):
|
|
32
|
+
return CriteriaSet.basic
|
|
33
|
+
|
|
34
|
+
@property
|
|
35
|
+
def rag(self):
|
|
36
|
+
return CriteriaSet.rag
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
# Create a singleton instance
|
|
40
|
+
criteria = CriteriaModule()
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
# Package exports
|
|
44
|
+
__all__ = [
|
|
45
|
+
# Main clients
|
|
46
|
+
"Composo",
|
|
47
|
+
"AsyncComposo",
|
|
48
|
+
# Exceptions
|
|
49
|
+
"ComposoError",
|
|
50
|
+
"RateLimitError",
|
|
51
|
+
"MalformedError",
|
|
52
|
+
"APIError",
|
|
53
|
+
"AuthenticationError",
|
|
54
|
+
# Validation
|
|
55
|
+
"validate_raw_chat_conforms_to_type",
|
|
56
|
+
# Types
|
|
57
|
+
"OpenAIChatSessionType",
|
|
58
|
+
"AnthropicChatSessionType",
|
|
59
|
+
"ChatSessionType",
|
|
60
|
+
# Criteria libraries
|
|
61
|
+
"CriteriaSet",
|
|
62
|
+
"criteria",
|
|
63
|
+
# Metadata
|
|
64
|
+
"__version__",
|
|
65
|
+
]
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
# Welcome message - removed for performance
|
|
69
|
+
# print(f"🚀 Composo SDK v{__version__} loaded successfully!")
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Format adapters for different LLM providers
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from .base import FormatAdapter
|
|
6
|
+
from .openai_adapter import OpenAIAdapter
|
|
7
|
+
from .anthropic_adapter import AnthropicAdapter
|
|
8
|
+
from .factory import AdapterFactory
|
|
9
|
+
|
|
10
|
+
__all__ = [
|
|
11
|
+
"FormatAdapter",
|
|
12
|
+
"OpenAIAdapter",
|
|
13
|
+
"AnthropicAdapter",
|
|
14
|
+
"AdapterFactory",
|
|
15
|
+
]
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Anthropic format adapter
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import List, Dict, Any, Optional
|
|
6
|
+
from .base import FormatAdapter
|
|
7
|
+
from ..client.types import MessagesType, ToolsType, ResultType
|
|
8
|
+
|
|
9
|
+
# Import Anthropic types for type checking
|
|
10
|
+
from anthropic.types.message import Message
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class AnthropicAdapter(FormatAdapter):
|
|
14
|
+
"""Adapter for Anthropic format"""
|
|
15
|
+
|
|
16
|
+
def can_handle(self, result: ResultType) -> bool:
|
|
17
|
+
"""Check if result is Anthropic Message using type checking"""
|
|
18
|
+
return isinstance(result, Message)
|
|
19
|
+
|
|
20
|
+
def process_result(
|
|
21
|
+
self,
|
|
22
|
+
messages: MessagesType,
|
|
23
|
+
result: ResultType,
|
|
24
|
+
system: Optional[str] = None,
|
|
25
|
+
tools: ToolsType = None,
|
|
26
|
+
) -> tuple[List[Dict[str, Any]], Optional[str], Optional[List[Dict[str, Any]]]]:
|
|
27
|
+
if not self.can_handle(result):
|
|
28
|
+
return messages, system, tools
|
|
29
|
+
# Match notebook behavior: directly append without field filtering, excluding None values
|
|
30
|
+
messages.append(result.model_dump(exclude_none=True))
|
|
31
|
+
return messages, system, tools
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Base adapter interface
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from abc import ABC, abstractmethod
|
|
6
|
+
from typing import List, Dict, Any, Optional
|
|
7
|
+
from ..client.types import MessagesType, ToolsType, ResultType
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class FormatAdapter(ABC):
|
|
11
|
+
"""Abstract base class for format adapters"""
|
|
12
|
+
|
|
13
|
+
@abstractmethod
|
|
14
|
+
def process_result(
|
|
15
|
+
self,
|
|
16
|
+
messages: MessagesType,
|
|
17
|
+
result: ResultType,
|
|
18
|
+
system: Optional[str] = None,
|
|
19
|
+
tools: ToolsType = None,
|
|
20
|
+
) -> tuple[List[Dict[str, Any]], Optional[str], Optional[List[Dict[str, Any]]]]:
|
|
21
|
+
"""
|
|
22
|
+
Process LLM result and append to messages
|
|
23
|
+
|
|
24
|
+
Returns:
|
|
25
|
+
tuple: (updated_messages, system_message, tools)
|
|
26
|
+
"""
|
|
27
|
+
pass
|
|
28
|
+
|
|
29
|
+
@abstractmethod
|
|
30
|
+
def can_handle(self, result: ResultType) -> bool:
|
|
31
|
+
"""Check if this adapter can handle the given result type"""
|
|
32
|
+
pass
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Adapter factory for creating appropriate format adapters
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Any, Optional
|
|
6
|
+
from .base import FormatAdapter
|
|
7
|
+
from .openai_adapter import OpenAIAdapter
|
|
8
|
+
from .anthropic_adapter import AnthropicAdapter
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class AdapterFactory:
|
|
12
|
+
"""Factory for creating format adapters based on result type"""
|
|
13
|
+
|
|
14
|
+
_adapters = [
|
|
15
|
+
OpenAIAdapter(),
|
|
16
|
+
AnthropicAdapter(),
|
|
17
|
+
]
|
|
18
|
+
|
|
19
|
+
@classmethod
|
|
20
|
+
def get_adapter(cls, result: Any) -> Optional[FormatAdapter]:
|
|
21
|
+
"""Get appropriate adapter for the given result"""
|
|
22
|
+
for adapter in cls._adapters:
|
|
23
|
+
if adapter.can_handle(result):
|
|
24
|
+
return adapter
|
|
25
|
+
return None
|
|
26
|
+
|
|
27
|
+
@classmethod
|
|
28
|
+
def register_adapter(cls, adapter: FormatAdapter) -> None:
|
|
29
|
+
"""Register a new adapter"""
|
|
30
|
+
cls._adapters.insert(0, adapter) # Insert at beginning for priority
|
|
31
|
+
|
|
32
|
+
@classmethod
|
|
33
|
+
def get_all_adapters(cls) -> list[FormatAdapter]:
|
|
34
|
+
"""Get all registered adapters"""
|
|
35
|
+
return cls._adapters.copy()
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
"""
|
|
2
|
+
OpenAI format adapter
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import List, Dict, Any, Optional
|
|
6
|
+
from .base import FormatAdapter
|
|
7
|
+
from ..client.types import MessagesType, ToolsType, ResultType
|
|
8
|
+
|
|
9
|
+
# Import OpenAI types for type checking
|
|
10
|
+
from openai.types.chat.chat_completion import ChatCompletion
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class OpenAIAdapter(FormatAdapter):
|
|
14
|
+
"""Adapter for OpenAI format"""
|
|
15
|
+
|
|
16
|
+
def can_handle(self, result: ResultType) -> bool:
|
|
17
|
+
"""Check if this adapter can handle the given result type"""
|
|
18
|
+
# Check if result is OpenAI ChatCompletion using type checking
|
|
19
|
+
return isinstance(result, ChatCompletion)
|
|
20
|
+
|
|
21
|
+
def process_result(
|
|
22
|
+
self,
|
|
23
|
+
messages: MessagesType,
|
|
24
|
+
result: ResultType,
|
|
25
|
+
system: Optional[str] = None,
|
|
26
|
+
tools: ToolsType = None,
|
|
27
|
+
) -> tuple[List[Dict[str, Any]], Optional[str], Optional[List[Dict[str, Any]]]]:
|
|
28
|
+
"""Process OpenAI format result by preserving original structure"""
|
|
29
|
+
if not self.can_handle(result):
|
|
30
|
+
return messages, system, tools
|
|
31
|
+
|
|
32
|
+
# Extract the message from OpenAI result and preserve its structure
|
|
33
|
+
if hasattr(result, "choices") and result.choices:
|
|
34
|
+
choice = result.choices[0]
|
|
35
|
+
if hasattr(choice, "message"):
|
|
36
|
+
# Use model_dump() to preserve the original structure, excluding None values
|
|
37
|
+
message_dict = choice.message.model_dump(exclude_none=True)
|
|
38
|
+
messages.append(message_dict)
|
|
39
|
+
|
|
40
|
+
return messages, system, tools
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Type definitions for chat session types
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import List, Union
|
|
6
|
+
from typing_extensions import TypedDict, Required
|
|
7
|
+
|
|
8
|
+
# Import the actual types if available, otherwise use generic types
|
|
9
|
+
try:
|
|
10
|
+
from openai.types.chat.chat_completion_message_param import (
|
|
11
|
+
ChatCompletionMessageParam,
|
|
12
|
+
)
|
|
13
|
+
from openai.types.chat.chat_completion_tool_param import ChatCompletionToolParam
|
|
14
|
+
except ImportError:
|
|
15
|
+
# Fallback to generic types if OpenAI types are not available
|
|
16
|
+
ChatCompletionMessageParam = dict
|
|
17
|
+
ChatCompletionToolParam = dict
|
|
18
|
+
|
|
19
|
+
try:
|
|
20
|
+
from anthropic.types.message_param import MessageParam
|
|
21
|
+
from anthropic.types.text_block_param import TextBlockParam
|
|
22
|
+
from anthropic.types.tool_union_param import ToolUnionParam
|
|
23
|
+
except ImportError:
|
|
24
|
+
# Fallback to generic types if Anthropic types are not available
|
|
25
|
+
MessageParam = dict
|
|
26
|
+
TextBlockParam = dict
|
|
27
|
+
ToolUnionParam = dict
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class OpenAIChatSessionType(TypedDict):
|
|
31
|
+
"""Container for OpenAI messages with 'messages' key."""
|
|
32
|
+
|
|
33
|
+
messages: Required[List[ChatCompletionMessageParam]]
|
|
34
|
+
tools: List[ChatCompletionToolParam]
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class AnthropicChatSessionType(TypedDict, total=False):
|
|
38
|
+
"""Container for Anthropic messages with 'messages' key."""
|
|
39
|
+
|
|
40
|
+
messages: Required[List[MessageParam]]
|
|
41
|
+
system: Union[str, List[TextBlockParam]]
|
|
42
|
+
tools: List[ToolUnionParam]
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
# Union type for when you don't know the type of the chat session
|
|
46
|
+
ChatSessionType = Union[OpenAIChatSessionType, AnthropicChatSessionType]
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Client implementations for Composo SDK
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from .sync import Composo
|
|
6
|
+
from .async_client import AsyncComposo
|
|
7
|
+
from .types import MessagesType, ToolsType, ResultType, MessageType, ToolType
|
|
8
|
+
|
|
9
|
+
__all__ = [
|
|
10
|
+
"Composo",
|
|
11
|
+
"AsyncComposo",
|
|
12
|
+
"MessagesType",
|
|
13
|
+
"ToolsType",
|
|
14
|
+
"ResultType",
|
|
15
|
+
"MessageType",
|
|
16
|
+
"ToolType",
|
|
17
|
+
]
|