polos-sdk 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- polos/__init__.py +105 -0
- polos/agents/__init__.py +7 -0
- polos/agents/agent.py +746 -0
- polos/agents/conversation_history.py +121 -0
- polos/agents/stop_conditions.py +280 -0
- polos/agents/stream.py +635 -0
- polos/core/__init__.py +0 -0
- polos/core/context.py +143 -0
- polos/core/state.py +26 -0
- polos/core/step.py +1380 -0
- polos/core/workflow.py +1192 -0
- polos/features/__init__.py +0 -0
- polos/features/events.py +456 -0
- polos/features/schedules.py +110 -0
- polos/features/tracing.py +605 -0
- polos/features/wait.py +82 -0
- polos/llm/__init__.py +9 -0
- polos/llm/generate.py +152 -0
- polos/llm/providers/__init__.py +5 -0
- polos/llm/providers/anthropic.py +615 -0
- polos/llm/providers/azure.py +42 -0
- polos/llm/providers/base.py +196 -0
- polos/llm/providers/fireworks.py +41 -0
- polos/llm/providers/gemini.py +40 -0
- polos/llm/providers/groq.py +40 -0
- polos/llm/providers/openai.py +1021 -0
- polos/llm/providers/together.py +40 -0
- polos/llm/stream.py +183 -0
- polos/middleware/__init__.py +0 -0
- polos/middleware/guardrail.py +148 -0
- polos/middleware/guardrail_executor.py +253 -0
- polos/middleware/hook.py +164 -0
- polos/middleware/hook_executor.py +104 -0
- polos/runtime/__init__.py +0 -0
- polos/runtime/batch.py +87 -0
- polos/runtime/client.py +841 -0
- polos/runtime/queue.py +42 -0
- polos/runtime/worker.py +1365 -0
- polos/runtime/worker_server.py +249 -0
- polos/tools/__init__.py +0 -0
- polos/tools/tool.py +587 -0
- polos/types/__init__.py +23 -0
- polos/types/types.py +116 -0
- polos/utils/__init__.py +27 -0
- polos/utils/agent.py +27 -0
- polos/utils/client_context.py +41 -0
- polos/utils/config.py +12 -0
- polos/utils/output_schema.py +311 -0
- polos/utils/retry.py +47 -0
- polos/utils/serializer.py +167 -0
- polos/utils/tracing.py +27 -0
- polos/utils/worker_singleton.py +40 -0
- polos_sdk-0.1.0.dist-info/METADATA +650 -0
- polos_sdk-0.1.0.dist-info/RECORD +55 -0
- polos_sdk-0.1.0.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,196 @@
|
|
|
1
|
+
"""Base class for LLM providers."""
|
|
2
|
+
|
|
3
|
+
from abc import ABC, abstractmethod
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
from pydantic import BaseModel, Field
|
|
7
|
+
|
|
8
|
+
# Provider registry - providers register themselves here
|
|
9
|
+
_PROVIDER_REGISTRY: dict[str, type["LLMProvider"]] = {}
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def register_provider(name: str):
|
|
13
|
+
"""
|
|
14
|
+
Decorator to register an LLM provider class.
|
|
15
|
+
|
|
16
|
+
Usage:
|
|
17
|
+
@register_provider("openai")
|
|
18
|
+
class OpenAIProvider(LLMProvider):
|
|
19
|
+
...
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
name: Provider name (e.g., "openai", "anthropic")
|
|
23
|
+
|
|
24
|
+
Returns:
|
|
25
|
+
Decorator function
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
def decorator(cls: type["LLMProvider"]) -> type["LLMProvider"]:
|
|
29
|
+
_PROVIDER_REGISTRY[name.lower()] = cls
|
|
30
|
+
return cls
|
|
31
|
+
|
|
32
|
+
return decorator
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class LLMResponse(BaseModel):
|
|
36
|
+
"""Response from an LLM call."""
|
|
37
|
+
|
|
38
|
+
content: str | None = None
|
|
39
|
+
usage: dict[str, Any] | None = Field(default_factory=dict)
|
|
40
|
+
tool_calls: list[dict[str, Any]] | None = Field(default_factory=list)
|
|
41
|
+
raw_output: list[dict[str, Any]] | None = Field(default_factory=list)
|
|
42
|
+
model: str | None = None
|
|
43
|
+
stop_reason: str | None = None
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class LLMProvider(ABC):
|
|
47
|
+
"""Base class for LLM providers."""
|
|
48
|
+
|
|
49
|
+
@abstractmethod
|
|
50
|
+
async def generate(
|
|
51
|
+
self,
|
|
52
|
+
messages: list[dict[str, Any]],
|
|
53
|
+
model: str,
|
|
54
|
+
tools: list[dict[str, Any]] | None = None,
|
|
55
|
+
temperature: float | None = None,
|
|
56
|
+
max_tokens: int | None = None,
|
|
57
|
+
top_p: float | None = None,
|
|
58
|
+
agent_config: dict[str, Any] | None = None,
|
|
59
|
+
tool_results: list[dict[str, Any]] | None = None,
|
|
60
|
+
**kwargs,
|
|
61
|
+
) -> LLMResponse:
|
|
62
|
+
"""
|
|
63
|
+
Make a chat completion request to the LLM.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
messages: List of message dicts with 'role' and 'content' keys
|
|
67
|
+
model: Model identifier (e.g., "gpt-4", "claude-3-opus")
|
|
68
|
+
tools: Optional list of tool schemas for function calling
|
|
69
|
+
temperature: Optional temperature parameter
|
|
70
|
+
max_tokens: Optional max tokens parameter
|
|
71
|
+
top_p: Optional top_p parameter for nucleus sampling
|
|
72
|
+
agent_config: Optional AgentConfig dict containing system_prompt and other config
|
|
73
|
+
tool_results: Optional list of tool results in OpenAI format to add to messages
|
|
74
|
+
**kwargs: Provider-specific additional parameters
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
LLMResponse with content, usage, cost, tool_calls, model, and stop_reason
|
|
78
|
+
"""
|
|
79
|
+
pass
|
|
80
|
+
|
|
81
|
+
async def stream(
|
|
82
|
+
self,
|
|
83
|
+
messages: list[dict[str, Any]],
|
|
84
|
+
model: str,
|
|
85
|
+
tools: list[dict[str, Any]] | None = None,
|
|
86
|
+
temperature: float | None = None,
|
|
87
|
+
max_tokens: int | None = None,
|
|
88
|
+
top_p: float | None = None,
|
|
89
|
+
agent_config: dict[str, Any] | None = None,
|
|
90
|
+
tool_results: list[dict[str, Any]] | None = None,
|
|
91
|
+
**kwargs,
|
|
92
|
+
):
|
|
93
|
+
"""
|
|
94
|
+
Stream responses from the LLM.
|
|
95
|
+
|
|
96
|
+
This is an optional method. If not implemented, the system will fall back to chat().
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
messages: List of message dicts with 'role' and 'content' keys
|
|
100
|
+
model: Model identifier
|
|
101
|
+
tools: Optional list of tool schemas
|
|
102
|
+
temperature: Optional temperature parameter
|
|
103
|
+
max_tokens: Optional max tokens parameter
|
|
104
|
+
top_p: Optional top_p parameter for nucleus sampling
|
|
105
|
+
agent_config: Optional AgentConfig dict containing system_prompt and other config
|
|
106
|
+
tool_results: Optional list of tool results in OpenAI format to add to messages
|
|
107
|
+
**kwargs: Provider-specific additional parameters
|
|
108
|
+
|
|
109
|
+
Yields:
|
|
110
|
+
Dict with event information:
|
|
111
|
+
- type: "text_delta", "text_complete", "tool_call", "done", "error"
|
|
112
|
+
- data: Event-specific data
|
|
113
|
+
"""
|
|
114
|
+
# Default implementation: not supported
|
|
115
|
+
raise NotImplementedError(f"Streaming not implemented for {self.__class__.__name__}")
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def get_provider(provider_name: str, **kwargs) -> LLMProvider:
|
|
119
|
+
"""
|
|
120
|
+
Get LLM provider instance by name from the registry.
|
|
121
|
+
|
|
122
|
+
Providers are dynamically imported when requested. If a provider's SDK is not installed,
|
|
123
|
+
a helpful error message will be raised.
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
provider_name: Name of the provider ("openai", "anthropic", etc.)
|
|
127
|
+
**kwargs: Provider-specific initialization parameters
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
LLMProvider instance
|
|
131
|
+
|
|
132
|
+
Raises:
|
|
133
|
+
ValueError: If the provider is not found or not supported
|
|
134
|
+
ImportError: If the provider's SDK is not installed
|
|
135
|
+
"""
|
|
136
|
+
provider_name_lower = provider_name.lower()
|
|
137
|
+
|
|
138
|
+
# Check if already registered
|
|
139
|
+
provider_class = _PROVIDER_REGISTRY.get(provider_name_lower)
|
|
140
|
+
if provider_class:
|
|
141
|
+
return provider_class(**kwargs)
|
|
142
|
+
|
|
143
|
+
# Try to dynamically import the provider module
|
|
144
|
+
provider_modules = {
|
|
145
|
+
"openai": ".openai",
|
|
146
|
+
"anthropic": ".anthropic",
|
|
147
|
+
"gemini": ".gemini",
|
|
148
|
+
"groq": ".groq",
|
|
149
|
+
"fireworks": ".fireworks",
|
|
150
|
+
"together": ".together",
|
|
151
|
+
"azure": ".azure",
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
module_path = provider_modules.get(provider_name_lower)
|
|
155
|
+
if not module_path:
|
|
156
|
+
available = ", ".join(sorted(provider_modules.keys()))
|
|
157
|
+
raise ValueError(
|
|
158
|
+
f"Unknown LLM provider: {provider_name}. "
|
|
159
|
+
f"Supported providers: {available}. "
|
|
160
|
+
f"To use a provider, install it with: pip install polos[{provider_name_lower}]"
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
# Dynamically import the provider module
|
|
164
|
+
# This will trigger the @register_provider decorator to register the class
|
|
165
|
+
try:
|
|
166
|
+
if provider_name_lower == "openai":
|
|
167
|
+
from . import openai # noqa: F401
|
|
168
|
+
elif provider_name_lower == "anthropic":
|
|
169
|
+
from . import anthropic # noqa: F401
|
|
170
|
+
elif provider_name_lower == "gemini":
|
|
171
|
+
from . import gemini # noqa: F401
|
|
172
|
+
elif provider_name_lower == "groq":
|
|
173
|
+
from . import groq # noqa: F401
|
|
174
|
+
elif provider_name_lower == "fireworks":
|
|
175
|
+
from . import fireworks # noqa: F401
|
|
176
|
+
elif provider_name_lower == "together":
|
|
177
|
+
from . import together # noqa: F401
|
|
178
|
+
elif provider_name_lower == "azure":
|
|
179
|
+
from . import azure # noqa: F401
|
|
180
|
+
except ImportError as e:
|
|
181
|
+
# The import failed - likely the SDK is not installed
|
|
182
|
+
# The provider module itself will raise a more helpful error
|
|
183
|
+
raise ImportError(
|
|
184
|
+
f"Failed to import {provider_name} provider. "
|
|
185
|
+
f"Install the required SDK with: pip install polos[{provider_name_lower}]"
|
|
186
|
+
) from e
|
|
187
|
+
|
|
188
|
+
# After import, the provider should be registered
|
|
189
|
+
provider_class = _PROVIDER_REGISTRY.get(provider_name_lower)
|
|
190
|
+
if not provider_class:
|
|
191
|
+
raise ValueError(
|
|
192
|
+
f"Provider {provider_name} was imported but not registered. "
|
|
193
|
+
f"This is likely a bug in the provider implementation."
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
return provider_class(**kwargs)
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
"""Fireworks provider - routes to OpenAI provider with chat_completions API."""
|
|
2
|
+
|
|
3
|
+
from .base import register_provider
|
|
4
|
+
from .openai import OpenAIProvider
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@register_provider("fireworks")
|
|
8
|
+
class FireworksProvider(OpenAIProvider):
|
|
9
|
+
"""Fireworks provider using OpenAI provider with Chat Completions API."""
|
|
10
|
+
|
|
11
|
+
def __init__(self, api_key=None):
|
|
12
|
+
"""
|
|
13
|
+
Initialize Fireworks provider.
|
|
14
|
+
|
|
15
|
+
Args:
|
|
16
|
+
api_key: Fireworks API key. If not provided, uses FIREWORKS_API_KEY env var.
|
|
17
|
+
"""
|
|
18
|
+
import os
|
|
19
|
+
|
|
20
|
+
fireworks_api_key = api_key or os.getenv("FIREWORKS_API_KEY")
|
|
21
|
+
if not fireworks_api_key:
|
|
22
|
+
raise ValueError(
|
|
23
|
+
"Fireworks API key not provided. Set FIREWORKS_API_KEY "
|
|
24
|
+
"environment variable or pass api_key parameter."
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
try:
|
|
28
|
+
from openai import AsyncOpenAI # noqa: F401
|
|
29
|
+
except ImportError:
|
|
30
|
+
raise ImportError(
|
|
31
|
+
"OpenAI SDK not installed. Install it with: pip install 'polos[fireworks]'"
|
|
32
|
+
) from None
|
|
33
|
+
|
|
34
|
+
# Initialize with Fireworks' base URL and chat_completions API version
|
|
35
|
+
# Fireworks supports structured output
|
|
36
|
+
super().__init__(
|
|
37
|
+
api_key=fireworks_api_key,
|
|
38
|
+
base_url="https://api.fireworks.ai/inference/v1",
|
|
39
|
+
llm_api="chat_completions",
|
|
40
|
+
)
|
|
41
|
+
self.supports_structured_output = True
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
"""Gemini provider - routes to OpenAI provider with chat_completions API."""
|
|
2
|
+
|
|
3
|
+
from .base import register_provider
|
|
4
|
+
from .openai import OpenAIProvider
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@register_provider("gemini")
|
|
8
|
+
class GeminiProvider(OpenAIProvider):
|
|
9
|
+
"""Gemini provider using OpenAI provider with Chat Completions API."""
|
|
10
|
+
|
|
11
|
+
def __init__(self, api_key=None):
|
|
12
|
+
"""
|
|
13
|
+
Initialize Gemini provider.
|
|
14
|
+
|
|
15
|
+
Args:
|
|
16
|
+
api_key: Gemini API key. If not provided, uses GEMINI_API_KEY env var.
|
|
17
|
+
"""
|
|
18
|
+
import os
|
|
19
|
+
|
|
20
|
+
gemini_api_key = api_key or os.getenv("GEMINI_API_KEY")
|
|
21
|
+
if not gemini_api_key:
|
|
22
|
+
raise ValueError(
|
|
23
|
+
"Gemini API key not provided. Set GEMINI_API_KEY environment "
|
|
24
|
+
"variable or pass api_key parameter."
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
try:
|
|
28
|
+
from openai import AsyncOpenAI # noqa: F401
|
|
29
|
+
except ImportError:
|
|
30
|
+
raise ImportError(
|
|
31
|
+
"OpenAI SDK not installed. Install it with: pip install 'polos[openai]'"
|
|
32
|
+
) from None
|
|
33
|
+
|
|
34
|
+
# Initialize with Gemini's base URL and chat_completions API version
|
|
35
|
+
super().__init__(
|
|
36
|
+
api_key=gemini_api_key,
|
|
37
|
+
base_url="https://generativelanguage.googleapis.com/v1beta/openai/",
|
|
38
|
+
llm_api="chat_completions",
|
|
39
|
+
)
|
|
40
|
+
self.supports_structured_output = True
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
"""Groq provider - routes to OpenAI provider with chat_completions API."""
|
|
2
|
+
|
|
3
|
+
from .base import register_provider
|
|
4
|
+
from .openai import OpenAIProvider
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@register_provider("groq")
|
|
8
|
+
class GroqProvider(OpenAIProvider):
|
|
9
|
+
"""Groq provider using OpenAI provider with Chat Completions API."""
|
|
10
|
+
|
|
11
|
+
def __init__(self, api_key=None):
|
|
12
|
+
"""
|
|
13
|
+
Initialize Groq provider.
|
|
14
|
+
|
|
15
|
+
Args:
|
|
16
|
+
api_key: Groq API key. If not provided, uses GROQ_API_KEY env var.
|
|
17
|
+
"""
|
|
18
|
+
import os
|
|
19
|
+
|
|
20
|
+
groq_api_key = api_key or os.getenv("GROQ_API_KEY")
|
|
21
|
+
if not groq_api_key:
|
|
22
|
+
raise ValueError(
|
|
23
|
+
"Groq API key not provided. Set GROQ_API_KEY environment variable "
|
|
24
|
+
"or pass api_key parameter."
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
try:
|
|
28
|
+
from openai import AsyncOpenAI # noqa: F401
|
|
29
|
+
except ImportError:
|
|
30
|
+
raise ImportError(
|
|
31
|
+
"OpenAI SDK not installed. Install it with: pip install polos[groq]"
|
|
32
|
+
) from None
|
|
33
|
+
|
|
34
|
+
# Initialize with Groq's base URL and chat_completions API version
|
|
35
|
+
super().__init__(
|
|
36
|
+
api_key=groq_api_key,
|
|
37
|
+
base_url="https://api.groq.com/openai/v1",
|
|
38
|
+
llm_api="chat_completions",
|
|
39
|
+
)
|
|
40
|
+
self.supports_structured_output = False
|