donkit-llm 0.1.7__py3-none-any.whl → 0.1.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
donkit/llm/__init__.py CHANGED
@@ -26,6 +26,13 @@ from .factory import ModelFactory
26
26
  from .gemini_model import GeminiModel, GeminiEmbeddingModel
27
27
  from .donkit_model import DonkitModel
28
28
 
29
+ import importlib.util
30
+
31
+ if importlib.util.find_spec("donkit.llm_gate.client") is not None:
32
+ from .llm_gate_model import LLMGateModel
33
+ else:
34
+ LLMGateModel = None
35
+
29
36
  __all__ = [
30
37
  "ModelFactory",
31
38
  # Abstract base
@@ -58,3 +65,6 @@ __all__ = [
58
65
  "GeminiEmbeddingModel",
59
66
  "DonkitModel",
60
67
  ]
68
+
69
+ if LLMGateModel is not None:
70
+ __all__.append("LLMGateModel")
donkit/llm/factory.py CHANGED
@@ -4,6 +4,13 @@ from .claude_model import ClaudeModel
4
4
  from .claude_model import ClaudeVertexModel
5
5
  from .donkit_model import DonkitModel
6
6
  from .gemini_model import GeminiModel
7
+
8
+ import importlib.util
9
+
10
+ if importlib.util.find_spec("donkit.llm_gate.client") is not None:
11
+ from .llm_gate_model import LLMGateModel
12
+ else:
13
+ LLMGateModel = None
7
14
  from .model_abstract import LLMModelAbstract
8
15
  from .openai_model import AzureOpenAIEmbeddingModel
9
16
  from .openai_model import AzureOpenAIModel
@@ -174,6 +181,30 @@ class ModelFactory:
174
181
  model_name=model_name,
175
182
  )
176
183
 
184
+ @staticmethod
185
+ def create_llm_gate_model(
186
+ model_name: str | None,
187
+ base_url: str,
188
+ provider: str = "default",
189
+ embedding_provider: str | None = None,
190
+ embedding_model_name: str | None = None,
191
+ user_id: str | None = None,
192
+ project_id: str | None = None,
193
+ ) -> LLMGateModel:
194
+ if LLMGateModel is None:
195
+ raise ImportError(
196
+ "Provider 'llm_gate' requires optional dependency 'donkit-llm-gate-client'"
197
+ )
198
+ return LLMGateModel(
199
+ base_url=base_url,
200
+ provider=provider,
201
+ model_name=model_name,
202
+ embedding_provider=embedding_provider,
203
+ embedding_model_name=embedding_model_name,
204
+ user_id=user_id,
205
+ project_id=project_id,
206
+ )
207
+
177
208
  @staticmethod
178
209
  def create_model(
179
210
  provider: Literal[
@@ -184,6 +215,7 @@ class ModelFactory:
184
215
  "vertex",
185
216
  "ollama",
186
217
  "donkit",
218
+ "llm_gate",
187
219
  ],
188
220
  model_name: str | None,
189
221
  credentials: dict,
@@ -198,6 +230,7 @@ class ModelFactory:
198
230
  "vertex": "gemini-2.5-flash",
199
231
  "ollama": "mistral",
200
232
  "donkit": None,
233
+ "llm_gate": None,
201
234
  }
202
235
  model_name = default_models.get(provider, "default")
203
236
  if provider == "openai":
@@ -258,5 +291,15 @@ class ModelFactory:
258
291
  api_key=credentials["api_key"],
259
292
  base_url=credentials["base_url"],
260
293
  )
294
+ elif provider == "llm_gate":
295
+ return ModelFactory.create_llm_gate_model(
296
+ model_name=model_name,
297
+ base_url=credentials["base_url"],
298
+ provider=credentials.get("provider", "default"),
299
+ embedding_provider=credentials.get("embedding_provider"),
300
+ embedding_model_name=credentials.get("embedding_model_name"),
301
+ user_id=credentials.get("user_id"),
302
+ project_id=credentials.get("project_id"),
303
+ )
261
304
  else:
262
305
  raise ValueError(f"Unknown provider: {provider}")
@@ -0,0 +1,210 @@
1
+ from typing import Any, AsyncIterator
2
+
3
+ from .model_abstract import (
4
+ EmbeddingRequest,
5
+ EmbeddingResponse,
6
+ FunctionCall,
7
+ GenerateRequest,
8
+ GenerateResponse,
9
+ LLMModelAbstract,
10
+ Message,
11
+ ModelCapability,
12
+ StreamChunk,
13
+ Tool,
14
+ ToolCall,
15
+ )
16
+
17
+
18
+ class LLMGateModel(LLMModelAbstract):
19
+ name = "llm_gate"
20
+
21
+ @staticmethod
22
+ def _get_client() -> type:
23
+ try:
24
+ from donkit.llm_gate.client import LLMGate
25
+
26
+ return LLMGate
27
+ except Exception as e:
28
+ raise ImportError(
29
+ "LLMGateModel requires 'donkit-llm-gate-client' to be installed"
30
+ ) from e
31
+
32
+ def __init__(
33
+ self,
34
+ base_url: str = "http://localhost:8002",
35
+ provider: str = "default",
36
+ model_name: str | None = None,
37
+ embedding_provider: str | None = None,
38
+ embedding_model_name: str | None = None,
39
+ user_id: str | None = None,
40
+ project_id: str | None = None,
41
+ ):
42
+ self.base_url = base_url
43
+ self.provider = provider
44
+ self._model_name = model_name
45
+ self.embedding_provider = embedding_provider
46
+ self.embedding_model_name = embedding_model_name
47
+ self.user_id = user_id
48
+ self.project_id = project_id
49
+ self._capabilities = self._determine_capabilities()
50
+
51
+ @property
52
+ def model_name(self) -> str:
53
+ return self._model_name or "default"
54
+
55
+ @model_name.setter
56
+ def model_name(self, value: str):
57
+ self._model_name = value
58
+ self._capabilities = self._determine_capabilities()
59
+
60
+ @property
61
+ def capabilities(self) -> ModelCapability:
62
+ return self._capabilities
63
+
64
+ def _determine_capabilities(self) -> ModelCapability:
65
+ caps = (
66
+ ModelCapability.TEXT_GENERATION
67
+ | ModelCapability.STREAMING
68
+ | ModelCapability.STRUCTURED_OUTPUT
69
+ | ModelCapability.TOOL_CALLING
70
+ | ModelCapability.MULTIMODAL_INPUT
71
+ | ModelCapability.EMBEDDINGS
72
+ )
73
+ return caps
74
+
75
+ def _convert_message(self, msg: Message) -> dict:
76
+ result: dict[str, Any] = {"role": msg.role}
77
+ if isinstance(msg.content, str):
78
+ result["content"] = msg.content
79
+ else:
80
+ content_parts = []
81
+ for part in msg.content if msg.content else []:
82
+ content_parts.append(part.model_dump(exclude_none=True))
83
+ result["content"] = content_parts
84
+ if msg.tool_calls:
85
+ result["tool_calls"] = [tc.model_dump() for tc in msg.tool_calls]
86
+ if msg.tool_call_id:
87
+ result["tool_call_id"] = msg.tool_call_id
88
+ if msg.name:
89
+ result["name"] = msg.name
90
+ return result
91
+
92
+ def _convert_tools(self, tools: list[Tool]) -> list[dict]:
93
+ return [tool.model_dump(exclude_none=True) for tool in tools]
94
+
95
+ def _prepare_generate_kwargs(self, request: GenerateRequest) -> dict:
96
+ messages = [self._convert_message(msg) for msg in request.messages]
97
+ tools_payload = self._convert_tools(request.tools) if request.tools else None
98
+
99
+ kwargs: dict[str, Any] = {
100
+ "provider": self.provider,
101
+ "model_name": self.model_name,
102
+ "messages": messages,
103
+ "user_id": self.user_id,
104
+ "project_id": self.project_id,
105
+ }
106
+
107
+ if request.temperature is not None:
108
+ kwargs["temperature"] = request.temperature
109
+ if request.max_tokens is not None:
110
+ kwargs["max_tokens"] = request.max_tokens
111
+ if request.top_p is not None:
112
+ kwargs["top_p"] = request.top_p
113
+ if request.stop:
114
+ kwargs["stop"] = request.stop
115
+ if tools_payload:
116
+ kwargs["tools"] = tools_payload
117
+ if request.tool_choice:
118
+ if isinstance(request.tool_choice, (str, dict)):
119
+ kwargs["tool_choice"] = request.tool_choice
120
+ else:
121
+ kwargs["tool_choice"] = "auto"
122
+ if request.response_format:
123
+ kwargs["response_format"] = request.response_format
124
+
125
+ return kwargs
126
+
127
+ async def generate(self, request: GenerateRequest) -> GenerateResponse:
128
+ await self.validate_request(request)
129
+
130
+ kwargs = self._prepare_generate_kwargs(request)
131
+
132
+ llm_gate = self._get_client()
133
+
134
+ async with llm_gate(base_url=self.base_url) as client:
135
+ response = await client.generate(**kwargs)
136
+
137
+ tool_calls = None
138
+ if response.tool_calls:
139
+ tool_calls = [
140
+ ToolCall(
141
+ id=tc.id,
142
+ type=tc.type,
143
+ function=FunctionCall(
144
+ name=tc.function.name,
145
+ arguments=tc.function.arguments,
146
+ ),
147
+ )
148
+ for tc in response.tool_calls
149
+ ]
150
+
151
+ return GenerateResponse(
152
+ content=response.content,
153
+ tool_calls=tool_calls,
154
+ finish_reason=response.finish_reason,
155
+ usage=response.usage,
156
+ metadata=response.metadata,
157
+ )
158
+
159
+ async def generate_stream(
160
+ self, request: GenerateRequest
161
+ ) -> AsyncIterator[StreamChunk]:
162
+ await self.validate_request(request)
163
+
164
+ kwargs = self._prepare_generate_kwargs(request)
165
+
166
+ llm_gate = self._get_client()
167
+
168
+ async with llm_gate(base_url=self.base_url) as client:
169
+ async for chunk in client.generate_stream(**kwargs):
170
+ tool_calls = None
171
+ if chunk.tool_calls:
172
+ tool_calls = [
173
+ ToolCall(
174
+ id=tc.id,
175
+ type=tc.type,
176
+ function=FunctionCall(
177
+ name=tc.function.name,
178
+ arguments=tc.function.arguments,
179
+ ),
180
+ )
181
+ for tc in chunk.tool_calls
182
+ ]
183
+
184
+ yield StreamChunk(
185
+ content=chunk.content,
186
+ tool_calls=tool_calls,
187
+ finish_reason=chunk.finish_reason,
188
+ )
189
+
190
+ async def embed(self, request: EmbeddingRequest) -> EmbeddingResponse:
191
+ provider = self.embedding_provider or self.provider
192
+ model_name = self.embedding_model_name
193
+
194
+ llm_gate = self._get_client()
195
+
196
+ async with llm_gate(base_url=self.base_url) as client:
197
+ response = await client.embeddings(
198
+ provider=provider,
199
+ input=request.input,
200
+ model_name=model_name,
201
+ dimensions=request.dimensions,
202
+ user_id=self.user_id,
203
+ project_id=self.project_id,
204
+ )
205
+
206
+ return EmbeddingResponse(
207
+ embeddings=response.embeddings,
208
+ usage=response.usage,
209
+ metadata=response.metadata,
210
+ )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: donkit-llm
3
- Version: 0.1.7
3
+ Version: 0.1.8
4
4
  Summary: Unified LLM model implementations for Donkit (OpenAI, Azure OpenAI, Claude, Vertex AI, Ollama)
5
5
  License: MIT
6
6
  Author: Donkit AI
@@ -1,11 +1,12 @@
1
- donkit/llm/__init__.py,sha256=0w5hPdaJDpzL1EpnBZm-7WV1Rz1OUdcCMcnxNRpXdiM,1357
1
+ donkit/llm/__init__.py,sha256=OXVV7y6uOdkU5CTjrodfS4IBt8vP1ac2S6IZ6UHXmfE,1588
2
2
  donkit/llm/claude_model.py,sha256=9UjNkACc6wHFus2gOXLKOi9yjX2FkP3cpQ9zFZEcXWU,16650
3
3
  donkit/llm/donkit_model.py,sha256=rEPxBW6k_BhIBF4XkgLzibVfwW6OJHiX89yMadcJkY4,8497
4
- donkit/llm/factory.py,sha256=KoZ9bD6FsZjU3ldKL7szznDSB8gI1slnI1jGGwKIuVY,9195
4
+ donkit/llm/factory.py,sha256=zYhnBrgurif1uTrtZip0d8gb09amvGW6u1enHk5UGIc,10751
5
5
  donkit/llm/gemini_model.py,sha256=2uLoZr9HjUf1wxiZRGLQFcURCutsB2SV9f-1VaR6kGI,14413
6
+ donkit/llm/llm_gate_model.py,sha256=BQc4w1oJ0HqrI_Ng3qnynfZ-vcHcMqjXJmbMwd7u34Q,6958
6
7
  donkit/llm/model_abstract.py,sha256=aOgYh3I96PsxSxnkIJ1ETx5UFeRxozCD1c44wiKoBSs,8191
7
8
  donkit/llm/openai_model.py,sha256=uS1COq1Ctys7tUmt2IJcMyzu-U_eQkDHLwCzfSlGQ1k,30489
8
9
  donkit/llm/vertex_model.py,sha256=LcdWBdx4JYzom2IsXxhNGEsrYf0N6JmwuRc3sqfKIos,29350
9
- donkit_llm-0.1.7.dist-info/METADATA,sha256=k8mkXHvxvcAJSLlvtUSFoVQFzSTnJM69DYtEty5IOgQ,742
10
- donkit_llm-0.1.7.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
11
- donkit_llm-0.1.7.dist-info/RECORD,,
10
+ donkit_llm-0.1.8.dist-info/METADATA,sha256=tMSUh1sR6TzIwW_PhXrY1GkC6Vve0jvaA_B2QEuxKR4,742
11
+ donkit_llm-0.1.8.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
12
+ donkit_llm-0.1.8.dist-info/RECORD,,