spaik-sdk 0.6.4__py3-none-any.whl → 0.6.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -17,7 +17,6 @@ from spaik_sdk.llm.cost.cost_provider import CostProvider
17
17
  from spaik_sdk.llm.langchain_service import LangChainService
18
18
  from spaik_sdk.models.llm_config import LLMConfig
19
19
  from spaik_sdk.models.llm_model import LLMModel
20
- from spaik_sdk.models.providers.provider_type import ProviderType
21
20
  from spaik_sdk.prompt.get_prompt_loader import get_prompt_loader
22
21
  from spaik_sdk.prompt.prompt_loader import PromptLoader
23
22
  from spaik_sdk.prompt.prompt_loader_mode import PromptLoaderMode
@@ -147,11 +146,9 @@ class BaseAgent(ABC):
147
146
  if llm_model is None:
148
147
  llm_model = self.get_llm_model()
149
148
 
150
- provider_type = ProviderType.from_family(llm_model.family)
151
-
152
149
  return LLMConfig(
153
150
  model=llm_model,
154
- provider_type=provider_type,
151
+ provider_type=env_config.get_provider_type(),
155
152
  reasoning=reasoning if reasoning is not None else llm_model.reasoning,
156
153
  tool_usage=len(self.tools) > 0,
157
154
  )
spaik_sdk/config/env.py CHANGED
@@ -1,5 +1,4 @@
1
1
  import os
2
- from typing import Dict
3
2
  from typing import Optional as OptionalType
4
3
 
5
4
  from spaik_sdk.models.llm_model import LLMModel
@@ -16,16 +15,6 @@ class EnvConfig:
16
15
  raise ValueError(f"Environment variable {key} is required but not set")
17
16
  return value
18
17
 
19
- def get_azure_keys(self) -> Dict[str, str]:
20
- return {
21
- "api_key": self.get_key("AZURE_API_KEY"),
22
- "api_version": self.get_key("AZURE_API_VERSION"),
23
- "endpoint": self.get_key("AZURE_ENDPOINT"),
24
- "o3-mini_deployment": self.get_key("AZURE_O3_MINI_DEPLOYMENT", required=False),
25
- "gpt-4_1_deployment": self.get_key("AZURE_GPT_4_1_DEPLOYMENT", required=False),
26
- "gpt-4o_deployment": self.get_key("AZURE_GPT_4O_DEPLOYMENT", required=False),
27
- }
28
-
29
18
  def get_default_model(self) -> LLMModel:
30
19
  return ModelRegistry.from_name(self.get_key("DEFAULT_MODEL"))
31
20
 
@@ -21,7 +21,7 @@ class AnthropicModelFactory(BaseModelFactory):
21
21
  model_config: Dict[str, Any] = {
22
22
  "model_name": config.model.name,
23
23
  "streaming": config.streaming,
24
- "max_tokens": config.max_output_tokens,
24
+ "max_tokens": config.max_output_tokens if config.max_output_tokens is not None else 8192,
25
25
  }
26
26
 
27
27
  # Handle thinking mode via model_kwargs for LangChain compatibility
@@ -54,15 +54,27 @@ class BaseModelFactory(ABC):
54
54
  """Factory method to create appropriate factory instance."""
55
55
 
56
56
  from spaik_sdk.models.factories.anthropic_factory import AnthropicModelFactory
57
+ from spaik_sdk.models.factories.cohere_factory import CohereModelFactory
58
+ from spaik_sdk.models.factories.deepseek_factory import DeepSeekModelFactory
57
59
  from spaik_sdk.models.factories.google_factory import GoogleModelFactory
60
+ from spaik_sdk.models.factories.meta_factory import MetaModelFactory
61
+ from spaik_sdk.models.factories.mistral_factory import MistralModelFactory
62
+ from spaik_sdk.models.factories.moonshot_factory import MoonshotModelFactory
58
63
  from spaik_sdk.models.factories.ollama_factory import OllamaModelFactory
59
64
  from spaik_sdk.models.factories.openai_factory import OpenAIModelFactory
65
+ from spaik_sdk.models.factories.xai_factory import XAIModelFactory
60
66
 
61
67
  factories = [
62
68
  AnthropicModelFactory(),
63
69
  OpenAIModelFactory(),
64
70
  GoogleModelFactory(),
65
71
  OllamaModelFactory(),
72
+ DeepSeekModelFactory(),
73
+ XAIModelFactory(),
74
+ CohereModelFactory(),
75
+ MistralModelFactory(),
76
+ MetaModelFactory(),
77
+ MoonshotModelFactory(),
66
78
  ]
67
79
  for factory in factories:
68
80
  if factory.supports_model_config(config):
@@ -0,0 +1,24 @@
1
+ from typing import Any, Dict, Optional
2
+
3
+ from spaik_sdk.models.factories.base_model_factory import BaseModelFactory
4
+ from spaik_sdk.models.llm_config import LLMConfig
5
+ from spaik_sdk.models.llm_families import LLMFamilies
6
+ from spaik_sdk.models.llm_model import LLMModel
7
+ from spaik_sdk.models.model_registry import ModelRegistry
8
+
9
+
10
+ class CohereModelFactory(BaseModelFactory):
11
+ MODELS = ModelRegistry.get_by_family(LLMFamilies.COHERE)
12
+
13
+ def supports_model(self, model: LLMModel) -> bool:
14
+ return model in CohereModelFactory.MODELS
15
+
16
+ def get_cache_control(self, config: LLMConfig) -> Optional[Dict[str, Any]]:
17
+ return None
18
+
19
+ def get_model_specific_config(self, config: LLMConfig) -> Dict[str, Any]:
20
+ model_config: Dict[str, Any] = {
21
+ "model": config.model.name,
22
+ "temperature": config.temperature,
23
+ }
24
+ return model_config
@@ -0,0 +1,26 @@
1
+ from typing import Any, Dict, Optional
2
+
3
+ from spaik_sdk.models.factories.base_model_factory import BaseModelFactory
4
+ from spaik_sdk.models.llm_config import LLMConfig
5
+ from spaik_sdk.models.llm_families import LLMFamilies
6
+ from spaik_sdk.models.llm_model import LLMModel
7
+ from spaik_sdk.models.model_registry import ModelRegistry
8
+
9
+
10
+ class DeepSeekModelFactory(BaseModelFactory):
11
+ MODELS = ModelRegistry.get_by_family(LLMFamilies.DEEPSEEK)
12
+
13
+ def supports_model(self, model: LLMModel) -> bool:
14
+ return model in DeepSeekModelFactory.MODELS
15
+
16
+ def get_cache_control(self, config: LLMConfig) -> Optional[Dict[str, Any]]:
17
+ return None
18
+
19
+ def get_model_specific_config(self, config: LLMConfig) -> Dict[str, Any]:
20
+ model_config: Dict[str, Any] = {
21
+ "model": config.model.name,
22
+ "temperature": config.temperature,
23
+ }
24
+ if config.max_output_tokens is not None:
25
+ model_config["max_tokens"] = config.max_output_tokens
26
+ return model_config
@@ -0,0 +1,26 @@
1
+ from typing import Any, Dict, Optional
2
+
3
+ from spaik_sdk.models.factories.base_model_factory import BaseModelFactory
4
+ from spaik_sdk.models.llm_config import LLMConfig
5
+ from spaik_sdk.models.llm_families import LLMFamilies
6
+ from spaik_sdk.models.llm_model import LLMModel
7
+ from spaik_sdk.models.model_registry import ModelRegistry
8
+
9
+
10
+ class MetaModelFactory(BaseModelFactory):
11
+ MODELS = ModelRegistry.get_by_family(LLMFamilies.META)
12
+
13
+ def supports_model(self, model: LLMModel) -> bool:
14
+ return model in MetaModelFactory.MODELS
15
+
16
+ def get_cache_control(self, config: LLMConfig) -> Optional[Dict[str, Any]]:
17
+ return None
18
+
19
+ def get_model_specific_config(self, config: LLMConfig) -> Dict[str, Any]:
20
+ model_config: Dict[str, Any] = {
21
+ "model": config.model.name,
22
+ "temperature": config.temperature,
23
+ }
24
+ if config.max_output_tokens is not None:
25
+ model_config["max_tokens"] = config.max_output_tokens
26
+ return model_config
@@ -0,0 +1,26 @@
1
+ from typing import Any, Dict, Optional
2
+
3
+ from spaik_sdk.models.factories.base_model_factory import BaseModelFactory
4
+ from spaik_sdk.models.llm_config import LLMConfig
5
+ from spaik_sdk.models.llm_families import LLMFamilies
6
+ from spaik_sdk.models.llm_model import LLMModel
7
+ from spaik_sdk.models.model_registry import ModelRegistry
8
+
9
+
10
+ class MistralModelFactory(BaseModelFactory):
11
+ MODELS = ModelRegistry.get_by_family(LLMFamilies.MISTRAL)
12
+
13
+ def supports_model(self, model: LLMModel) -> bool:
14
+ return model in MistralModelFactory.MODELS
15
+
16
+ def get_cache_control(self, config: LLMConfig) -> Optional[Dict[str, Any]]:
17
+ return None
18
+
19
+ def get_model_specific_config(self, config: LLMConfig) -> Dict[str, Any]:
20
+ model_config: Dict[str, Any] = {
21
+ "model": config.model.name,
22
+ "temperature": config.temperature,
23
+ }
24
+ if config.max_output_tokens is not None:
25
+ model_config["max_tokens"] = config.max_output_tokens
26
+ return model_config
@@ -0,0 +1,26 @@
1
+ from typing import Any, Dict, Optional
2
+
3
+ from spaik_sdk.models.factories.base_model_factory import BaseModelFactory
4
+ from spaik_sdk.models.llm_config import LLMConfig
5
+ from spaik_sdk.models.llm_families import LLMFamilies
6
+ from spaik_sdk.models.llm_model import LLMModel
7
+ from spaik_sdk.models.model_registry import ModelRegistry
8
+
9
+
10
+ class MoonshotModelFactory(BaseModelFactory):
11
+ MODELS = ModelRegistry.get_by_family(LLMFamilies.MOONSHOT)
12
+
13
+ def supports_model(self, model: LLMModel) -> bool:
14
+ return model in MoonshotModelFactory.MODELS
15
+
16
+ def get_cache_control(self, config: LLMConfig) -> Optional[Dict[str, Any]]:
17
+ return None
18
+
19
+ def get_model_specific_config(self, config: LLMConfig) -> Dict[str, Any]:
20
+ model_config: Dict[str, Any] = {
21
+ "model": config.model.name,
22
+ "temperature": config.temperature,
23
+ }
24
+ if config.max_output_tokens is not None:
25
+ model_config["max_tokens"] = config.max_output_tokens
26
+ return model_config
@@ -0,0 +1,26 @@
1
+ from typing import Any, Dict, Optional
2
+
3
+ from spaik_sdk.models.factories.base_model_factory import BaseModelFactory
4
+ from spaik_sdk.models.llm_config import LLMConfig
5
+ from spaik_sdk.models.llm_families import LLMFamilies
6
+ from spaik_sdk.models.llm_model import LLMModel
7
+ from spaik_sdk.models.model_registry import ModelRegistry
8
+
9
+
10
+ class XAIModelFactory(BaseModelFactory):
11
+ MODELS = ModelRegistry.get_by_family(LLMFamilies.XAI)
12
+
13
+ def supports_model(self, model: LLMModel) -> bool:
14
+ return model in XAIModelFactory.MODELS
15
+
16
+ def get_cache_control(self, config: LLMConfig) -> Optional[Dict[str, Any]]:
17
+ return None
18
+
19
+ def get_model_specific_config(self, config: LLMConfig) -> Dict[str, Any]:
20
+ model_config: Dict[str, Any] = {
21
+ "model": config.model.name,
22
+ "temperature": config.temperature,
23
+ }
24
+ if config.max_output_tokens is not None:
25
+ model_config["max_tokens"] = config.max_output_tokens
26
+ return model_config
@@ -16,7 +16,7 @@ class LLMConfig:
16
16
  streaming: bool = True
17
17
  reasoning_summary: str = "detailed" # Options: "auto", "concise", "detailed", None
18
18
  reasoning_effort: str = "medium" # Options: "low", "medium", "high"
19
- max_output_tokens: int = 8192
19
+ max_output_tokens: Optional[int] = None
20
20
  reasoning_budget_tokens: int = 4096
21
21
  temperature: float = 0.1
22
22
  structured_response: bool = False
@@ -5,3 +5,9 @@ class LLMFamilies:
5
5
  OPENAI = "openai"
6
6
  GOOGLE = "google"
7
7
  OLLAMA = "ollama"
8
+ DEEPSEEK = "deepseek"
9
+ MISTRAL = "mistral"
10
+ META = "meta"
11
+ COHERE = "cohere"
12
+ XAI = "xai"
13
+ MOONSHOT = "moonshot"
@@ -22,17 +22,32 @@ class ModelRegistry:
22
22
 
23
23
  # OpenAI models
24
24
  GPT_4_1 = LLMModel(family=LLMFamilies.OPENAI, name="gpt-4.1", reasoning=False, prompt_caching=True)
25
+ GPT_4_1_MINI = LLMModel(family=LLMFamilies.OPENAI, name="gpt-4.1-mini", reasoning=False, prompt_caching=True)
26
+ GPT_4_1_NANO = LLMModel(family=LLMFamilies.OPENAI, name="gpt-4.1-nano", reasoning=False, prompt_caching=True)
25
27
  GPT_4O = LLMModel(family=LLMFamilies.OPENAI, name="gpt-4o", reasoning=False, prompt_caching=True)
28
+ GPT_4O_MINI = LLMModel(family=LLMFamilies.OPENAI, name="gpt-4o-mini", reasoning=False, prompt_caching=True)
29
+ O1 = LLMModel(family=LLMFamilies.OPENAI, name="o1")
30
+ O1_MINI = LLMModel(family=LLMFamilies.OPENAI, name="o1-mini")
31
+ O3 = LLMModel(family=LLMFamilies.OPENAI, name="o3")
32
+ O3_MINI = LLMModel(family=LLMFamilies.OPENAI, name="o3-mini")
33
+ O3_PRO = LLMModel(family=LLMFamilies.OPENAI, name="o3-pro")
26
34
  O4_MINI = LLMModel(family=LLMFamilies.OPENAI, name="o4-mini")
27
35
  O4_MINI_APRIL_2025 = LLMModel(family=LLMFamilies.OPENAI, name="o4-mini-2025-04-16")
36
+ CODEX_MINI = LLMModel(family=LLMFamilies.OPENAI, name="codex-mini")
28
37
  GPT_5 = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5", reasoning=True, reasoning_min_effort="minimal", prompt_caching=True)
29
38
  GPT_5_MINI = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5-mini", reasoning=True, reasoning_min_effort="minimal", prompt_caching=True)
30
39
  GPT_5_NANO = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5-nano", reasoning=True, reasoning_min_effort="minimal", prompt_caching=True)
40
+ GPT_5_CHAT = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5-chat", reasoning=False, prompt_caching=True)
41
+ GPT_5_CODEX = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5-codex", reasoning=True, prompt_caching=True)
42
+ GPT_5_PRO = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5-pro", reasoning=True, prompt_caching=True)
31
43
  GPT_5_1 = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5.1", reasoning=True, prompt_caching=True)
44
+ GPT_5_1_CHAT = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5.1-chat", reasoning=True, prompt_caching=True)
32
45
  GPT_5_1_CODEX = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5.1-codex", reasoning=True, prompt_caching=True)
33
46
  GPT_5_1_CODEX_MINI = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5.1-codex-mini", reasoning=True, prompt_caching=True)
34
47
  GPT_5_1_CODEX_MAX = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5.1-codex-max", reasoning=True, prompt_caching=True)
35
48
  GPT_5_2 = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5.2", reasoning=True, prompt_caching=True)
49
+ GPT_5_2_CHAT = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5.2-chat", reasoning=False, prompt_caching=True)
50
+ GPT_5_2_CODEX = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5.2-codex", reasoning=True, prompt_caching=True)
36
51
  GPT_5_2_PRO = LLMModel(family=LLMFamilies.OPENAI, name="gpt-5.2-pro", reasoning=True, prompt_caching=True)
37
52
 
38
53
  # Google models
@@ -43,6 +58,35 @@ class ModelRegistry:
43
58
  GEMINI_3_FLASH = LLMModel(family=LLMFamilies.GOOGLE, name="gemini-3-flash-preview", prompt_caching=True)
44
59
  GEMINI_3_PRO = LLMModel(family=LLMFamilies.GOOGLE, name="gemini-3-pro-preview", prompt_caching=True)
45
60
 
61
+ # DeepSeek models
62
+ DEEPSEEK_V3 = LLMModel(family=LLMFamilies.DEEPSEEK, name="DeepSeek-V3-0324")
63
+ DEEPSEEK_V3_1 = LLMModel(family=LLMFamilies.DEEPSEEK, name="DeepSeek-V3.1")
64
+ DEEPSEEK_V3_2 = LLMModel(family=LLMFamilies.DEEPSEEK, name="DeepSeek-V3.2")
65
+ DEEPSEEK_V3_2_SPECIALE = LLMModel(family=LLMFamilies.DEEPSEEK, name="DeepSeek-V3.2-Speciale")
66
+ DEEPSEEK_R1 = LLMModel(family=LLMFamilies.DEEPSEEK, name="DeepSeek-R1")
67
+ DEEPSEEK_R1_0528 = LLMModel(family=LLMFamilies.DEEPSEEK, name="DeepSeek-R1-0528")
68
+
69
+ # Mistral models
70
+ MISTRAL_LARGE_3 = LLMModel(family=LLMFamilies.MISTRAL, name="Mistral-Large-3", reasoning=False)
71
+
72
+ # Meta Llama models
73
+ LLAMA_4_MAVERICK = LLMModel(family=LLMFamilies.META, name="Llama-4-Maverick-17B-128E-Instruct-FP8", reasoning=False)
74
+ LLAMA_3_3_70B = LLMModel(family=LLMFamilies.META, name="Llama-3.3-70B-Instruct", reasoning=False)
75
+
76
+ # Cohere models
77
+ COHERE_COMMAND_A = LLMModel(family=LLMFamilies.COHERE, name="Cohere-command-a", reasoning=False)
78
+
79
+ # xAI Grok models
80
+ GROK_3 = LLMModel(family=LLMFamilies.XAI, name="grok-3")
81
+ GROK_3_MINI = LLMModel(family=LLMFamilies.XAI, name="grok-3-mini")
82
+ GROK_4 = LLMModel(family=LLMFamilies.XAI, name="grok-4")
83
+ GROK_4_FAST_REASONING = LLMModel(family=LLMFamilies.XAI, name="grok-4-fast-reasoning")
84
+ GROK_4_FAST_NON_REASONING = LLMModel(family=LLMFamilies.XAI, name="grok-4-fast-non-reasoning", reasoning=False)
85
+ GROK_CODE_FAST_1 = LLMModel(family=LLMFamilies.XAI, name="grok-code-fast-1")
86
+
87
+ # Moonshot AI models
88
+ KIMI_K2_THINKING = LLMModel(family=LLMFamilies.MOONSHOT, name="Kimi-K2-Thinking")
89
+
46
90
  # Registry for custom models
47
91
  _custom_models: Set[LLMModel] = set()
48
92
 
@@ -82,6 +126,7 @@ class ModelRegistry:
82
126
  def _get_aliases(cls) -> Dict[str, LLMModel]:
83
127
  """Get aliases mapping."""
84
128
  return {
129
+ # Claude aliases
85
130
  "sonnet": cls.CLAUDE_4_SONNET,
86
131
  "sonnet 3.7": cls.CLAUDE_3_7_SONNET,
87
132
  "sonnet 4.5": cls.CLAUDE_4_5_SONNET,
@@ -100,25 +145,73 @@ class ModelRegistry:
100
145
  "claude 4.5 sonnet": cls.CLAUDE_4_5_SONNET,
101
146
  "claude 4.5 haiku": cls.CLAUDE_4_5_HAIKU,
102
147
  "claude 4 opus": cls.CLAUDE_4_OPUS,
148
+ # OpenAI aliases
149
+ "o1": cls.O1,
150
+ "o1 mini": cls.O1_MINI,
151
+ "o3": cls.O3,
152
+ "o3 mini": cls.O3_MINI,
153
+ "o3 pro": cls.O3_PRO,
103
154
  "o4 mini": cls.O4_MINI,
104
155
  "o4 mini 2025-04-16": cls.O4_MINI_APRIL_2025,
156
+ "codex mini": cls.CODEX_MINI,
105
157
  "gpt 4.1": cls.GPT_4_1,
158
+ "gpt 4.1 mini": cls.GPT_4_1_MINI,
159
+ "gpt 4.1 nano": cls.GPT_4_1_NANO,
106
160
  "gpt 4o": cls.GPT_4O,
161
+ "gpt 4o mini": cls.GPT_4O_MINI,
107
162
  "gpt 5": cls.GPT_5,
108
163
  "gpt 5 mini": cls.GPT_5_MINI,
109
164
  "gpt 5 nano": cls.GPT_5_NANO,
165
+ "gpt 5 chat": cls.GPT_5_CHAT,
166
+ "gpt 5 codex": cls.GPT_5_CODEX,
167
+ "gpt 5 pro": cls.GPT_5_PRO,
110
168
  "gpt 5.1": cls.GPT_5_1,
169
+ "gpt 5.1 chat": cls.GPT_5_1_CHAT,
111
170
  "gpt 5.1 codex": cls.GPT_5_1_CODEX,
112
171
  "gpt 5.1 codex mini": cls.GPT_5_1_CODEX_MINI,
113
172
  "gpt 5.1 codex max": cls.GPT_5_1_CODEX_MAX,
114
173
  "gpt 5.2": cls.GPT_5_2,
174
+ "gpt 5.2 chat": cls.GPT_5_2_CHAT,
175
+ "gpt 5.2 codex": cls.GPT_5_2_CODEX,
115
176
  "gpt 5.2 pro": cls.GPT_5_2_PRO,
177
+ # Gemini aliases
116
178
  "gemini 2.5 flash": cls.GEMINI_2_5_FLASH,
117
179
  "gemini 2.5 pro": cls.GEMINI_2_5_PRO,
118
180
  "gemini 3 flash": cls.GEMINI_3_FLASH,
119
181
  "gemini 3.0 flash": cls.GEMINI_3_FLASH,
120
182
  "gemini 3 pro": cls.GEMINI_3_PRO,
121
183
  "gemini 3.0 pro": cls.GEMINI_3_PRO,
184
+ # DeepSeek aliases
185
+ "deepseek": cls.DEEPSEEK_V3_2,
186
+ "deepseek v3": cls.DEEPSEEK_V3,
187
+ "deepseek v3.1": cls.DEEPSEEK_V3_1,
188
+ "deepseek v3.2": cls.DEEPSEEK_V3_2,
189
+ "deepseek v3.2 speciale": cls.DEEPSEEK_V3_2_SPECIALE,
190
+ "deepseek r1": cls.DEEPSEEK_R1,
191
+ # Mistral aliases
192
+ "mistral": cls.MISTRAL_LARGE_3,
193
+ "mistral large": cls.MISTRAL_LARGE_3,
194
+ "mistral large 3": cls.MISTRAL_LARGE_3,
195
+ # Meta Llama aliases
196
+ "llama": cls.LLAMA_3_3_70B,
197
+ "llama 3.3": cls.LLAMA_3_3_70B,
198
+ "llama 3.3 70b": cls.LLAMA_3_3_70B,
199
+ "llama 4": cls.LLAMA_4_MAVERICK,
200
+ "llama 4 maverick": cls.LLAMA_4_MAVERICK,
201
+ # Cohere aliases
202
+ "cohere": cls.COHERE_COMMAND_A,
203
+ "cohere command": cls.COHERE_COMMAND_A,
204
+ "command a": cls.COHERE_COMMAND_A,
205
+ # xAI Grok aliases
206
+ "grok": cls.GROK_4,
207
+ "grok 3": cls.GROK_3,
208
+ "grok 3 mini": cls.GROK_3_MINI,
209
+ "grok 4": cls.GROK_4,
210
+ "grok 4 fast": cls.GROK_4_FAST_REASONING,
211
+ "grok code": cls.GROK_CODE_FAST_1,
212
+ # Moonshot aliases
213
+ "kimi": cls.KIMI_K2_THINKING,
214
+ "kimi k2": cls.KIMI_K2_THINKING,
122
215
  }
123
216
 
124
217
 
@@ -1,31 +1,101 @@
1
- from typing import Any, Collection, Dict
1
+ import os
2
+ from typing import Any, Collection, Dict, Set
2
3
 
3
4
  from langchain_core.language_models.chat_models import BaseChatModel
4
5
  from langchain_openai import AzureChatOpenAI
5
6
 
6
- from spaik_sdk.config.env import env_config
7
- from spaik_sdk.models.factories.openai_factory import OpenAIModelFactory
8
7
  from spaik_sdk.models.llm_config import LLMConfig
9
8
  from spaik_sdk.models.llm_model import LLMModel
9
+ from spaik_sdk.models.model_registry import ModelRegistry
10
10
  from spaik_sdk.models.providers.base_provider import BaseProvider
11
11
 
12
+ # Model name -> Environment variable for Azure deployment name
13
+ AZURE_DEPLOYMENT_ENV_VARS: Dict[str, str] = {
14
+ # OpenAI models
15
+ "gpt-4.1": "AZURE_GPT_4_1_DEPLOYMENT",
16
+ "gpt-4.1-mini": "AZURE_GPT_4_1_MINI_DEPLOYMENT",
17
+ "gpt-4.1-nano": "AZURE_GPT_4_1_NANO_DEPLOYMENT",
18
+ "gpt-4o": "AZURE_GPT_4O_DEPLOYMENT",
19
+ "gpt-4o-mini": "AZURE_GPT_4O_MINI_DEPLOYMENT",
20
+ "o1": "AZURE_O1_DEPLOYMENT",
21
+ "o1-mini": "AZURE_O1_MINI_DEPLOYMENT",
22
+ "o3": "AZURE_O3_DEPLOYMENT",
23
+ "o3-mini": "AZURE_O3_MINI_DEPLOYMENT",
24
+ "o3-pro": "AZURE_O3_PRO_DEPLOYMENT",
25
+ "o4-mini": "AZURE_O4_MINI_DEPLOYMENT",
26
+ "o4-mini-2025-04-16": "AZURE_O4_MINI_2025_04_16_DEPLOYMENT",
27
+ "codex-mini": "AZURE_CODEX_MINI_DEPLOYMENT",
28
+ "gpt-5": "AZURE_GPT_5_DEPLOYMENT",
29
+ "gpt-5-mini": "AZURE_GPT_5_MINI_DEPLOYMENT",
30
+ "gpt-5-nano": "AZURE_GPT_5_NANO_DEPLOYMENT",
31
+ "gpt-5-chat": "AZURE_GPT_5_CHAT_DEPLOYMENT",
32
+ "gpt-5-codex": "AZURE_GPT_5_CODEX_DEPLOYMENT",
33
+ "gpt-5-pro": "AZURE_GPT_5_PRO_DEPLOYMENT",
34
+ "gpt-5.1": "AZURE_GPT_5_1_DEPLOYMENT",
35
+ "gpt-5.1-chat": "AZURE_GPT_5_1_CHAT_DEPLOYMENT",
36
+ "gpt-5.1-codex": "AZURE_GPT_5_1_CODEX_DEPLOYMENT",
37
+ "gpt-5.1-codex-mini": "AZURE_GPT_5_1_CODEX_MINI_DEPLOYMENT",
38
+ "gpt-5.1-codex-max": "AZURE_GPT_5_1_CODEX_MAX_DEPLOYMENT",
39
+ "gpt-5.2": "AZURE_GPT_5_2_DEPLOYMENT",
40
+ "gpt-5.2-chat": "AZURE_GPT_5_2_CHAT_DEPLOYMENT",
41
+ "gpt-5.2-codex": "AZURE_GPT_5_2_CODEX_DEPLOYMENT",
42
+ "gpt-5.2-pro": "AZURE_GPT_5_2_PRO_DEPLOYMENT",
43
+ # DeepSeek models (Azure AI Foundry)
44
+ "DeepSeek-V3-0324": "AZURE_DEEPSEEK_V3_DEPLOYMENT",
45
+ "DeepSeek-V3.1": "AZURE_DEEPSEEK_V3_1_DEPLOYMENT",
46
+ "DeepSeek-V3.2": "AZURE_DEEPSEEK_V3_2_DEPLOYMENT",
47
+ "DeepSeek-V3.2-Speciale": "AZURE_DEEPSEEK_V3_2_SPECIALE_DEPLOYMENT",
48
+ "DeepSeek-R1": "AZURE_DEEPSEEK_R1_DEPLOYMENT",
49
+ "DeepSeek-R1-0528": "AZURE_DEEPSEEK_R1_0528_DEPLOYMENT",
50
+ # Mistral models (Azure AI Foundry)
51
+ "Mistral-Large-3": "AZURE_MISTRAL_LARGE_3_DEPLOYMENT",
52
+ # Meta Llama models (Azure AI Foundry)
53
+ "Llama-4-Maverick-17B-128E-Instruct-FP8": "AZURE_LLAMA_4_MAVERICK_DEPLOYMENT",
54
+ "Llama-3.3-70B-Instruct": "AZURE_LLAMA_3_3_70B_DEPLOYMENT",
55
+ # Cohere models (Azure AI Foundry)
56
+ "Cohere-command-a": "AZURE_COHERE_COMMAND_A_DEPLOYMENT",
57
+ # xAI Grok models (Azure AI Foundry)
58
+ "grok-3": "AZURE_GROK_3_DEPLOYMENT",
59
+ "grok-3-mini": "AZURE_GROK_3_MINI_DEPLOYMENT",
60
+ "grok-4": "AZURE_GROK_4_DEPLOYMENT",
61
+ "grok-4-fast-reasoning": "AZURE_GROK_4_FAST_REASONING_DEPLOYMENT",
62
+ "grok-4-fast-non-reasoning": "AZURE_GROK_4_FAST_NON_REASONING_DEPLOYMENT",
63
+ "grok-code-fast-1": "AZURE_GROK_CODE_FAST_1_DEPLOYMENT",
64
+ # Moonshot AI models (Azure AI Foundry)
65
+ "Kimi-K2-Thinking": "AZURE_KIMI_K2_THINKING_DEPLOYMENT",
66
+ }
67
+
12
68
 
13
69
  class AzureProvider(BaseProvider):
14
70
  def get_supported_models(self) -> Collection[LLMModel]:
15
- """Get list of models supported by Azure provider."""
16
- return OpenAIModelFactory.MODELS
71
+ supported: Set[LLMModel] = set()
72
+ for model_name in AZURE_DEPLOYMENT_ENV_VARS.keys():
73
+ try:
74
+ model = ModelRegistry.from_name(model_name)
75
+ supported.add(model)
76
+ except ValueError:
77
+ pass
78
+ return supported
17
79
 
18
80
  def get_model_config(self, config: LLMConfig) -> Dict[str, Any]:
19
- """Get Azure AI Foundry provider configuration (provider-specific only)."""
20
81
  return {
21
- "api_key": env_config.get_azure_keys()["api_key"],
22
- "api_version": env_config.get_azure_keys()["api_version"],
23
- "azure_endpoint": env_config.get_azure_keys()["endpoint"],
82
+ "api_key": self._get_required_env("AZURE_API_KEY"),
83
+ "api_version": self._get_required_env("AZURE_API_VERSION"),
84
+ "azure_endpoint": self._get_required_env("AZURE_ENDPOINT"),
24
85
  }
25
86
 
26
87
  def create_langchain_model(self, config: LLMConfig, full_config: Dict[str, Any]) -> BaseChatModel:
27
- """Create Azure langchain model with complete configuration and model-specific deployments."""
28
- # Add Azure provider-specific deployment configuration
29
- full_config["deployment_name"] = env_config.get_azure_keys()[f"{config.model.name.lower()}_deployment"]
30
-
88
+ full_config["deployment_name"] = self._get_deployment_name(config.model.name)
31
89
  return AzureChatOpenAI(**full_config)
90
+
91
+ def _get_deployment_name(self, model_name: str) -> str:
92
+ env_var = AZURE_DEPLOYMENT_ENV_VARS.get(model_name)
93
+ if not env_var:
94
+ raise ValueError(f"Model '{model_name}' not supported on Azure. Add it to AZURE_DEPLOYMENT_ENV_VARS.")
95
+ return os.environ.get(env_var, model_name)
96
+
97
+ def _get_required_env(self, key: str) -> str:
98
+ value = os.environ.get(key)
99
+ if not value:
100
+ raise ValueError(f"Environment variable {key} is required but not set")
101
+ return value
@@ -58,5 +58,21 @@ class BaseProvider(ABC):
58
58
  from spaik_sdk.models.providers.ollama_provider import OllamaProvider
59
59
 
60
60
  return OllamaProvider()
61
+ elif provider_type == ProviderType.DEEPSEEK:
62
+ from spaik_sdk.models.providers.deepseek_provider import DeepSeekProvider
63
+
64
+ return DeepSeekProvider()
65
+ elif provider_type == ProviderType.XAI:
66
+ from spaik_sdk.models.providers.xai_provider import XAIProvider
67
+
68
+ return XAIProvider()
69
+ elif provider_type == ProviderType.COHERE:
70
+ from spaik_sdk.models.providers.cohere_provider import CohereProvider
71
+
72
+ return CohereProvider()
73
+ elif provider_type == ProviderType.MISTRAL:
74
+ from spaik_sdk.models.providers.mistral_provider import MistralProvider
75
+
76
+ return MistralProvider()
61
77
  else:
62
78
  raise ValueError(f"Unsupported provider type: {provider_type}")
@@ -0,0 +1,23 @@
1
+ from typing import Any, Collection, Dict
2
+
3
+ from langchain_cohere import ChatCohere
4
+ from langchain_core.language_models.chat_models import BaseChatModel
5
+
6
+ from spaik_sdk.config.get_credentials_provider import credentials_provider
7
+ from spaik_sdk.models.factories.cohere_factory import CohereModelFactory
8
+ from spaik_sdk.models.llm_config import LLMConfig
9
+ from spaik_sdk.models.llm_model import LLMModel
10
+ from spaik_sdk.models.providers.base_provider import BaseProvider
11
+
12
+
13
+ class CohereProvider(BaseProvider):
14
+ def get_supported_models(self) -> Collection[LLMModel]:
15
+ return CohereModelFactory.MODELS
16
+
17
+ def get_model_config(self, config: LLMConfig) -> Dict[str, Any]:
18
+ return {
19
+ "cohere_api_key": credentials_provider.get_provider_key("cohere"),
20
+ }
21
+
22
+ def create_langchain_model(self, config: LLMConfig, full_config: Dict[str, Any]) -> BaseChatModel:
23
+ return ChatCohere(**full_config)
@@ -0,0 +1,23 @@
1
+ from typing import Any, Collection, Dict
2
+
3
+ from langchain_core.language_models.chat_models import BaseChatModel
4
+ from langchain_deepseek import ChatDeepSeek
5
+
6
+ from spaik_sdk.config.get_credentials_provider import credentials_provider
7
+ from spaik_sdk.models.factories.deepseek_factory import DeepSeekModelFactory
8
+ from spaik_sdk.models.llm_config import LLMConfig
9
+ from spaik_sdk.models.llm_model import LLMModel
10
+ from spaik_sdk.models.providers.base_provider import BaseProvider
11
+
12
+
13
+ class DeepSeekProvider(BaseProvider):
14
+ def get_supported_models(self) -> Collection[LLMModel]:
15
+ return DeepSeekModelFactory.MODELS
16
+
17
+ def get_model_config(self, config: LLMConfig) -> Dict[str, Any]:
18
+ return {
19
+ "api_key": credentials_provider.get_provider_key("deepseek"),
20
+ }
21
+
22
+ def create_langchain_model(self, config: LLMConfig, full_config: Dict[str, Any]) -> BaseChatModel:
23
+ return ChatDeepSeek(**full_config)
@@ -0,0 +1,23 @@
1
+ from typing import Any, Collection, Dict
2
+
3
+ from langchain_core.language_models.chat_models import BaseChatModel
4
+ from langchain_mistralai import ChatMistralAI
5
+
6
+ from spaik_sdk.config.get_credentials_provider import credentials_provider
7
+ from spaik_sdk.models.factories.mistral_factory import MistralModelFactory
8
+ from spaik_sdk.models.llm_config import LLMConfig
9
+ from spaik_sdk.models.llm_model import LLMModel
10
+ from spaik_sdk.models.providers.base_provider import BaseProvider
11
+
12
+
13
+ class MistralProvider(BaseProvider):
14
+ def get_supported_models(self) -> Collection[LLMModel]:
15
+ return MistralModelFactory.MODELS
16
+
17
+ def get_model_config(self, config: LLMConfig) -> Dict[str, Any]:
18
+ return {
19
+ "api_key": credentials_provider.get_provider_key("mistral"),
20
+ }
21
+
22
+ def create_langchain_model(self, config: LLMConfig, full_config: Dict[str, Any]) -> BaseChatModel:
23
+ return ChatMistralAI(**full_config)
@@ -9,6 +9,10 @@ class ProviderType(Enum):
9
9
  OPENAI_DIRECT = "openai"
10
10
  GOOGLE = "google"
11
11
  OLLAMA = "ollama"
12
+ DEEPSEEK = "deepseek"
13
+ XAI = "xai"
14
+ COHERE = "cohere"
15
+ MISTRAL = "mistral"
12
16
 
13
17
  @classmethod
14
18
  def from_name(cls, name: str) -> "ProviderType":
@@ -39,6 +43,14 @@ class ProviderType(Enum):
39
43
  return cls.GOOGLE
40
44
  elif family_lower == "ollama":
41
45
  return cls.OLLAMA
46
+ elif family_lower == "deepseek":
47
+ return cls.DEEPSEEK
48
+ elif family_lower == "xai":
49
+ return cls.XAI
50
+ elif family_lower == "cohere":
51
+ return cls.COHERE
52
+ elif family_lower == "mistral":
53
+ return cls.MISTRAL
42
54
  else:
43
55
  raise ValueError(f"Unknown model family: {family}")
44
56
 
@@ -51,6 +63,11 @@ PROVIDER_ALIASES = {
51
63
  "openai": ProviderType.OPENAI_DIRECT,
52
64
  "google": ProviderType.GOOGLE,
53
65
  "gemini": ProviderType.GOOGLE,
66
+ "deepseek": ProviderType.DEEPSEEK,
67
+ "xai": ProviderType.XAI,
68
+ "grok": ProviderType.XAI,
69
+ "cohere": ProviderType.COHERE,
70
+ "mistral": ProviderType.MISTRAL,
54
71
  }
55
72
 
56
73
 
@@ -0,0 +1,23 @@
1
+ from typing import Any, Collection, Dict
2
+
3
+ from langchain_core.language_models.chat_models import BaseChatModel
4
+ from langchain_xai import ChatXAI
5
+
6
+ from spaik_sdk.config.get_credentials_provider import credentials_provider
7
+ from spaik_sdk.models.factories.xai_factory import XAIModelFactory
8
+ from spaik_sdk.models.llm_config import LLMConfig
9
+ from spaik_sdk.models.llm_model import LLMModel
10
+ from spaik_sdk.models.providers.base_provider import BaseProvider
11
+
12
+
13
+ class XAIProvider(BaseProvider):
14
+ def get_supported_models(self) -> Collection[LLMModel]:
15
+ return XAIModelFactory.MODELS
16
+
17
+ def get_model_config(self, config: LLMConfig) -> Dict[str, Any]:
18
+ return {
19
+ "xai_api_key": credentials_provider.get_provider_key("xai"),
20
+ }
21
+
22
+ def create_langchain_model(self, config: LLMConfig, full_config: Dict[str, Any]) -> BaseChatModel:
23
+ return ChatXAI(**full_config)
@@ -178,7 +178,7 @@ class AudioRouterFactory:
178
178
  try:
179
179
  audio_bytes = await file.read()
180
180
  filename = file.filename or "audio.webm"
181
-
181
+
182
182
  logger.info(f"STT request: language={language}, filename={filename}, size={len(audio_bytes)}")
183
183
 
184
184
  options = STTOptions(
@@ -54,7 +54,7 @@ class BaseAuthorizer(ABC, Generic[TUser]):
54
54
 
55
55
  async def can_read_file(self, user: TUser, file_metadata: "FileMetadata") -> bool:
56
56
  """Check if user has permission to read a file.
57
-
57
+
58
58
  By default, users can read files they own, or files owned by 'system' (agent-generated).
59
59
  """
60
60
  return file_metadata.owner_id == user.get_id() or file_metadata.owner_id == "system"
@@ -73,6 +73,4 @@ class AgentTrace:
73
73
 
74
74
  def save(self, name: str) -> None:
75
75
  trace_content = self.to_string(include_system_prompt=False)
76
- self._trace_sink.save_trace(
77
- name, trace_content, self.system_prompt, self.agent_instance_id
78
- )
76
+ self._trace_sink.save_trace(name, trace_content, self.system_prompt, self.agent_instance_id)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: spaik-sdk
3
- Version: 0.6.4
3
+ Version: 0.6.6
4
4
  Summary: Python SDK for building AI agents with multi-LLM support, streaming, and production-ready infrastructure
5
5
  Project-URL: Homepage, https://github.com/siilisolutions/spaik-sdk
6
6
  Project-URL: Repository, https://github.com/siilisolutions/spaik-sdk
@@ -24,12 +24,16 @@ Requires-Dist: dotenv>=0.9.9
24
24
  Requires-Dist: fastapi>=0.115.12
25
25
  Requires-Dist: httpx>=0.25.0
26
26
  Requires-Dist: langchain-anthropic>=1.3.0
27
+ Requires-Dist: langchain-cohere>=0.5.0
27
28
  Requires-Dist: langchain-core>=1.2.0
29
+ Requires-Dist: langchain-deepseek>=1.0.0
28
30
  Requires-Dist: langchain-google-genai>=4.0.0
29
31
  Requires-Dist: langchain-mcp-adapters>=0.2.1
32
+ Requires-Dist: langchain-mistralai>=0.2.0
30
33
  Requires-Dist: langchain-ollama>=0.3.0
31
34
  Requires-Dist: langchain-openai>=1.1.0
32
35
  Requires-Dist: langchain-tavily>=0.2.15
36
+ Requires-Dist: langchain-xai>=0.2.0
33
37
  Requires-Dist: langchain>=1.2.0
34
38
  Requires-Dist: langgraph>=1.0.0
35
39
  Requires-Dist: mcp>=1.9.2
@@ -1,7 +1,7 @@
1
1
  spaik_sdk/__init__.py,sha256=UhJdqPEBVFTlyWHPicbcpcvOuOqmObenwnJv_GkPbVA,576
2
2
  spaik_sdk/py.typed,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
3
3
  spaik_sdk/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- spaik_sdk/agent/base_agent.py,sha256=sk1Q3ediUHm6hOVIIAv90XwXAL7pSrlLcDIm7qtG6z0,11037
4
+ spaik_sdk/agent/base_agent.py,sha256=KuIsiLtZlthYNNimA5569BxgUvoMFuYcQZuzIBvYoMU,10920
5
5
  spaik_sdk/attachments/__init__.py,sha256=ckqaKkl8rCVg-V8hNkE_RG57peYkt1zMWTszSRgWZeE,678
6
6
  spaik_sdk/attachments/builder.py,sha256=WgB14KcZ491KqjY6QMeIYXS18KElqsnvO-XAc7wuP0s,1758
7
7
  spaik_sdk/attachments/file_storage_provider.py,sha256=3EKDCCfhKi2iDpLR3BMsKt9KCR8iFvUZz2LV8cMFs3s,692
@@ -21,7 +21,7 @@ spaik_sdk/audio/providers/google_tts.py,sha256=-7gohXszE_A3sFQbiMp3kk1VZBhGFyLkx
21
21
  spaik_sdk/audio/providers/openai_stt.py,sha256=ZD44obwqOTmvnmvPcD6RQu4cL6B4rWkMmSmLwlEeoQY,2078
22
22
  spaik_sdk/audio/providers/openai_tts.py,sha256=SHfav2hgPnk84Dy784XcJHAGQ7PgUcUuwvq2eU4ceW0,3436
23
23
  spaik_sdk/config/credentials_provider.py,sha256=mfaAUb8yRr9VEHqwxxIGlbXv-_v7ZnmaBxqDmq9SRys,299
24
- spaik_sdk/config/env.py,sha256=H4xdFhRqoE64Rj6J0DM_J_zaq10seh2WpDRldNmxaZI,2754
24
+ spaik_sdk/config/env.py,sha256=xBEBq7yvOjxy59KhNhBIHGm0DwVelHC1LQeo4Pky4eQ,2208
25
25
  spaik_sdk/config/env_credentials_provider.py,sha256=Y4Tti-T3IAAFQDV2rTsBaa3mngZcJ6RBY6Pk1TMmJRM,307
26
26
  spaik_sdk/config/get_credentials_provider.py,sha256=D2EF3ezConXlCmNvl9mQTSoR3BsIlDmxKM0a7jgGym8,516
27
27
  spaik_sdk/image_gen/__init__.py,sha256=AEujgMjNTj0y0x5EPsbX8IV9MB_UXDRjf3nc3Sc7Ou4,245
@@ -53,25 +53,35 @@ spaik_sdk/llm/streaming/streaming_content_handler.py,sha256=-RLUEczCUKtzHRFatVgX
53
53
  spaik_sdk/llm/streaming/streaming_event_handler.py,sha256=6YX1xQnaDEfExQbIIxb_A-x-ETdowm1DeGg5BGngcs0,10629
54
54
  spaik_sdk/llm/streaming/streaming_state_manager.py,sha256=Nsfcf9umUk2WAVdsank3a_a0ihi4g94HoYA0lY1wcXc,2249
55
55
  spaik_sdk/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
56
- spaik_sdk/models/llm_config.py,sha256=ICIY_y_-loD7X0h-0_sjJUufxPpZgrUKfEQ5uVxA740,1641
57
- spaik_sdk/models/llm_families.py,sha256=mtsMfih5FbkX52eFMkMSfjiSIpkroZ_uqe8JlreJscQ,169
56
+ spaik_sdk/models/llm_config.py,sha256=eCp3I7Dbqm5Xoudak0IosEWfzW5a05xxK28zxNq8uZU,1651
57
+ spaik_sdk/models/llm_families.py,sha256=oyjAVMWV6EePMFIYlDCRSGtYTdxqlrPRlun_-4v0IJw,301
58
58
  spaik_sdk/models/llm_model.py,sha256=DO8wlN4Gj_AB_lxTpqzQGDABvDgyOI3JcQda_J-UiKU,440
59
59
  spaik_sdk/models/llm_wrapper.py,sha256=CB07qSPJUWScN3hj1SO_9qi8QQ7Zg5p53JLnXFZ4O6A,929
60
- spaik_sdk/models/model_registry.py,sha256=bzMi-_amifpuE-YNkK2lAPDYOiAPX0Unz2xNzhATEQM,7914
60
+ spaik_sdk/models/model_registry.py,sha256=U9hUcdeAjNpPAbOixq5aAoai_W7NK8AV7U8fhjs1pn0,12955
61
61
  spaik_sdk/models/factories/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
62
- spaik_sdk/models/factories/anthropic_factory.py,sha256=1Dmn-RBTmDHnXVCt2D3xx1Xw9OVMaecTk1kBFur8prs,1330
63
- spaik_sdk/models/factories/base_model_factory.py,sha256=iLTUhVh7G4l9TiQdFlq9HU-cs-t5uaul4SeoIdSFNww,2845
62
+ spaik_sdk/models/factories/anthropic_factory.py,sha256=0bnA02fr2iQcwCUXGw_ZXgoogFtkpREJ_vGSak7F_uw,1380
63
+ spaik_sdk/models/factories/base_model_factory.py,sha256=iZh9DLxIVgXDA6u5Hv4mh_zU0i_ed5ce2DJMWlj1dH0,3535
64
+ spaik_sdk/models/factories/cohere_factory.py,sha256=Qn4UHV0jKZ8j1SYrj3ziNYRzRXp6UPnwkje-iR9PDHk,884
65
+ spaik_sdk/models/factories/deepseek_factory.py,sha256=tq6HxumlVI67XJ4h_SFE9lApkVljEfBLCCHVzDPi_G4,1005
64
66
  spaik_sdk/models/factories/google_factory.py,sha256=5Xc-I6h_SwPJFaSVu0m87O2xcRgWns0OwURK_hFUpbg,1486
67
+ spaik_sdk/models/factories/meta_factory.py,sha256=J0sdmRV726OIwCMTXy_L7jgb60u-wYzU1TomdLpPHis,993
68
+ spaik_sdk/models/factories/mistral_factory.py,sha256=84J12FWWohGRWRpa57kTpCXaiQ680cqCuPjaP_piXyg,1002
69
+ spaik_sdk/models/factories/moonshot_factory.py,sha256=3S-SSPFAkB6zf3XxOHxcYUdRRuAto0hqnwJLefEJ-QA,1005
65
70
  spaik_sdk/models/factories/ollama_factory.py,sha256=7RXPlbF7b-wJA45FluSGkVIzMXdHToxxGQ99KatiLfs,1674
66
71
  spaik_sdk/models/factories/openai_factory.py,sha256=LvKx5ueuL3uROfI453BQSq6vuLFHJwMzLVqIbeg4G9s,2489
72
+ spaik_sdk/models/factories/xai_factory.py,sha256=chuxuemTPDxbip7zZUHBg7QHpdGALL31tP222qKOCzI,990
67
73
  spaik_sdk/models/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
68
74
  spaik_sdk/models/providers/anthropic_provider.py,sha256=NY_K8cbph5PKMRdKWOY4iw2C6wUZtnZ4id8DGwuKv1M,1329
69
- spaik_sdk/models/providers/azure_provider.py,sha256=Ddzhmi1TCWleAMJx1qoagxxMEfv8Tot46h15eocS-Bg,1453
70
- spaik_sdk/models/providers/base_provider.py,sha256=RoAyUAVMxHIVrYXgz9mHX2hfCfMj1qI-B9Y8g2YmRVk,2392
75
+ spaik_sdk/models/providers/azure_provider.py,sha256=5i74zs97gU-kWjls2PgKmnietoTrKhyTTOl_NxVEA2Y,4608
76
+ spaik_sdk/models/providers/base_provider.py,sha256=qcbigiCMLS4OtRVXnzd-uAhktWWethiCmHfAMo3YC0k,3072
77
+ spaik_sdk/models/providers/cohere_provider.py,sha256=vsXFWVXNiyTIxsJDigE7ar5s6hwUoq38JbIukDXBv3E,924
78
+ spaik_sdk/models/providers/deepseek_provider.py,sha256=-B_qynQNZyV56CfXDd4J-vj_vKyaO8kNS3UZdUFqe4s,933
71
79
  spaik_sdk/models/providers/google_provider.py,sha256=HVl9tQaAiUX9orM9v5zwTKMhYYGrPLSPRVZ5hso8-_g,1180
80
+ spaik_sdk/models/providers/mistral_provider.py,sha256=Dc4E-hzjdcSqaDcbthzDOBmouIS0LXL4a7YtVAAEC7U,931
72
81
  spaik_sdk/models/providers/ollama_provider.py,sha256=7BnAb5rdeTyc8EKy39OquuOyMexULC53FdxtvhHdHvY,1099
73
82
  spaik_sdk/models/providers/openai_provider.py,sha256=mEA1C5wGY01ZiDJyihkhn5eZB2rx9P_ebaSbreHtMqA,1127
74
- spaik_sdk/models/providers/provider_type.py,sha256=tMu_JwR1MwFDgG9jagQLYlzLLDTraGo8aSOSDK_Ny6o,3141
83
+ spaik_sdk/models/providers/provider_type.py,sha256=Hj4-oCFC9uhzCAjEn2_8OKa0eoA8Jz_OLdGdODv1a9c,3675
84
+ spaik_sdk/models/providers/xai_provider.py,sha256=MEZmkax38coElrj7foATzUO5WioMJBdwVvvPBID5RDU,897
75
85
  spaik_sdk/orchestration/__init__.py,sha256=SOsO7m686FX81698irjigoDWW2xhMSYvtPLufWdGlwM,582
76
86
  spaik_sdk/orchestration/base_orchestrator.py,sha256=ksPkia-WvLYD6byBy5PgmohdgHJr0IZeASQ2dgGbQUc,8705
77
87
  spaik_sdk/orchestration/checkpoint.py,sha256=ToiCtD5vP4FTslNyM2MkKMVjIG9YShor2RNPzVFdir0,2416
@@ -92,7 +102,7 @@ spaik_sdk/recording/impl/local_recorder.py,sha256=6v7ARykjseSskenvsaBU5UcgOJ15e8
92
102
  spaik_sdk/server/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
93
103
  spaik_sdk/server/api/routers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
94
104
  spaik_sdk/server/api/routers/api_builder.py,sha256=svuqLYJJWam7I5Aywg9ncHtB6YS8MStv28-UoLbMYX0,6655
95
- spaik_sdk/server/api/routers/audio_router_factory.py,sha256=x0W7reFAVerdUqUkKn_rc4WvVyIaMHckbDuESGUmOhY,7369
105
+ spaik_sdk/server/api/routers/audio_router_factory.py,sha256=5MyUzWSPpYrQr7eAh9DczIs_FyQw-EKaAMWXUKM1Evw,7353
96
106
  spaik_sdk/server/api/routers/file_router_factory.py,sha256=0nT-L61AcCr6Z7jsXJgGPPDucho1MzoA8jDYbx5OQHs,4020
97
107
  spaik_sdk/server/api/routers/thread_router_factory.py,sha256=h8nCA0-a26WnKHOR9QvaBnQ6ezXn8sIsG21Vf1qfBB0,14183
98
108
  spaik_sdk/server/api/streaming/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -100,7 +110,7 @@ spaik_sdk/server/api/streaming/format_sse_event.py,sha256=EEBxbXL_Dbne30Lv31JeNp
100
110
  spaik_sdk/server/api/streaming/negotiate_streaming_response.py,sha256=InamT7fTY99tUkcuT-68CmH1JB9Nult0Jp4qzpAeKUk,149
101
111
  spaik_sdk/server/api/streaming/streaming_negotiator.py,sha256=xfjgm_GRfh0VV9Fw_5fimz3eyb5VMKSlzlAoVn4euf8,369
102
112
  spaik_sdk/server/authorization/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
103
- spaik_sdk/server/authorization/base_authorizer.py,sha256=ry_r_uBMoJoI179Fk_THiqjTQovJW-Hw1VjIuDeQ2xw,2628
113
+ spaik_sdk/server/authorization/base_authorizer.py,sha256=5geuXut1h3OArGWjJRpaknHmV_LBPZQUz3WSfwJ_F0M,2620
104
114
  spaik_sdk/server/authorization/base_user.py,sha256=x3bthycXLy_ws3xKH3ZSb_84ho1qymdlOQTWrfqqmXo,274
105
115
  spaik_sdk/server/authorization/dummy_authorizer.py,sha256=uaMW916GISBFopzydhmUIU7ClZKb8rS7wiL5LwrD-kM,573
106
116
  spaik_sdk/server/job_processor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -149,7 +159,7 @@ spaik_sdk/tools/impl/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
149
159
  spaik_sdk/tools/impl/mcp_tool_provider.py,sha256=g2ULJkTW7J3nyXI43RdrTj-AzFtMiGCP5Aog79vKnMI,2750
150
160
  spaik_sdk/tools/impl/search_tool_provider.py,sha256=fi8SBmvC7--n0cCNaTz6PhEe6Bf4RkyrxYkqKzEDlY4,515
151
161
  spaik_sdk/tracing/__init__.py,sha256=kmLFmy1Lb7bS2sryIFoFaknxYXlyuswuP_4gHmwEtv0,526
152
- spaik_sdk/tracing/agent_trace.py,sha256=ORxHCiilgElebx7O5CyZtwgeWTiruB7sFky98xgsf_M,3039
162
+ spaik_sdk/tracing/agent_trace.py,sha256=9G2WHQOiVL_HOnlD8Qz2DHtd1Trldqg4jqcFDPGPf_w,3017
153
163
  spaik_sdk/tracing/get_trace_sink.py,sha256=ZPg8pVLS1BbY0PwXJXC-O8qSvUvQSzmDb4SWRtsRSSc,2573
154
164
  spaik_sdk/tracing/local_trace_sink.py,sha256=QTqkzDv8S0cLtRvScwPmejnj6EpccYaHFjd7KkP9Xrk,984
155
165
  spaik_sdk/tracing/noop_trace_sink.py,sha256=AxImIYh8MPzISTp6qDp8ShtWyPLVqLRwsh7yyAVDSjs,540
@@ -157,6 +167,6 @@ spaik_sdk/tracing/trace_sink.py,sha256=LU6aF848Kz2hMZuz0q6l-4IaD0sC-ex0AKFk8mVTS
157
167
  spaik_sdk/tracing/trace_sink_mode.py,sha256=74qiL4P3sNVGM3_DUkWKlqlu9UvT928NLKTskD_vxgk,791
158
168
  spaik_sdk/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
159
169
  spaik_sdk/utils/init_logger.py,sha256=htxNtHMxRXVNAXBbS9f6Wmd0aET7kl3ClJ062b3YHmQ,791
160
- spaik_sdk-0.6.4.dist-info/METADATA,sha256=Si8vJmRo18czibAqovNT87HsVqPl9nxqz-ztBYG90DM,10085
161
- spaik_sdk-0.6.4.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
162
- spaik_sdk-0.6.4.dist-info/RECORD,,
170
+ spaik_sdk-0.6.6.dist-info/METADATA,sha256=yOAYCAPgoLuONrEw2lCgVHJbm40DaQpAxl0hHtkPjNA,10243
171
+ spaik_sdk-0.6.6.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
172
+ spaik_sdk-0.6.6.dist-info/RECORD,,