pygpt-net 2.5.18__py3-none-any.whl → 2.5.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. pygpt_net/CHANGELOG.txt +13 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/app.py +8 -4
  4. pygpt_net/container.py +3 -3
  5. pygpt_net/controller/chat/command.py +4 -4
  6. pygpt_net/controller/chat/input.py +3 -3
  7. pygpt_net/controller/chat/stream.py +6 -2
  8. pygpt_net/controller/config/placeholder.py +28 -14
  9. pygpt_net/controller/lang/custom.py +2 -2
  10. pygpt_net/controller/mode/__init__.py +22 -1
  11. pygpt_net/controller/model/__init__.py +2 -2
  12. pygpt_net/controller/model/editor.py +6 -63
  13. pygpt_net/controller/model/importer.py +9 -7
  14. pygpt_net/controller/presets/editor.py +8 -8
  15. pygpt_net/core/agents/legacy.py +2 -2
  16. pygpt_net/core/bridge/__init__.py +6 -3
  17. pygpt_net/core/bridge/worker.py +5 -2
  18. pygpt_net/core/command/__init__.py +10 -8
  19. pygpt_net/core/debug/presets.py +2 -2
  20. pygpt_net/core/experts/__init__.py +2 -2
  21. pygpt_net/core/idx/chat.py +7 -20
  22. pygpt_net/core/idx/llm.py +27 -28
  23. pygpt_net/core/llm/__init__.py +25 -3
  24. pygpt_net/core/models/__init__.py +83 -9
  25. pygpt_net/core/modes/__init__.py +2 -2
  26. pygpt_net/core/presets/__init__.py +3 -3
  27. pygpt_net/core/prompt/__init__.py +5 -5
  28. pygpt_net/core/tokens/__init__.py +3 -3
  29. pygpt_net/core/updater/__init__.py +5 -3
  30. pygpt_net/data/config/config.json +8 -3
  31. pygpt_net/data/config/models.json +1051 -2605
  32. pygpt_net/data/config/modes.json +4 -10
  33. pygpt_net/data/config/settings.json +94 -0
  34. pygpt_net/data/locale/locale.en.ini +17 -2
  35. pygpt_net/item/model.py +56 -33
  36. pygpt_net/plugin/base/plugin.py +6 -5
  37. pygpt_net/provider/core/config/patch.py +23 -1
  38. pygpt_net/provider/core/model/json_file.py +7 -7
  39. pygpt_net/provider/core/model/patch.py +60 -7
  40. pygpt_net/provider/core/preset/json_file.py +4 -4
  41. pygpt_net/provider/gpt/__init__.py +18 -15
  42. pygpt_net/provider/gpt/chat.py +91 -21
  43. pygpt_net/provider/gpt/responses.py +58 -21
  44. pygpt_net/provider/llms/anthropic.py +2 -1
  45. pygpt_net/provider/llms/azure_openai.py +11 -7
  46. pygpt_net/provider/llms/base.py +3 -2
  47. pygpt_net/provider/llms/deepseek_api.py +3 -1
  48. pygpt_net/provider/llms/google.py +2 -1
  49. pygpt_net/provider/llms/hugging_face.py +8 -5
  50. pygpt_net/provider/llms/hugging_face_api.py +3 -1
  51. pygpt_net/provider/llms/local.py +2 -1
  52. pygpt_net/provider/llms/ollama.py +8 -6
  53. pygpt_net/provider/llms/openai.py +11 -7
  54. pygpt_net/provider/llms/perplexity.py +109 -0
  55. pygpt_net/provider/llms/x_ai.py +108 -0
  56. pygpt_net/ui/dialog/about.py +5 -5
  57. pygpt_net/ui/dialog/preset.py +5 -5
  58. {pygpt_net-2.5.18.dist-info → pygpt_net-2.5.20.dist-info}/METADATA +65 -178
  59. {pygpt_net-2.5.18.dist-info → pygpt_net-2.5.20.dist-info}/RECORD +62 -60
  60. {pygpt_net-2.5.18.dist-info → pygpt_net-2.5.20.dist-info}/LICENSE +0 -0
  61. {pygpt_net-2.5.18.dist-info → pygpt_net-2.5.20.dist-info}/WHEEL +0 -0
  62. {pygpt_net-2.5.18.dist-info → pygpt_net-2.5.20.dist-info}/entry_points.txt +0 -0
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 22:00:00 #
9
+ # Updated Date: 2025.06.28 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import json
@@ -189,7 +189,7 @@ class JsonFileProvider(BaseProvider):
189
189
  MODE_COMPLETION: item.completion,
190
190
  MODE_IMAGE: item.img,
191
191
  MODE_VISION: item.vision,
192
- MODE_LANGCHAIN: item.langchain,
192
+ # MODE_LANGCHAIN: item.langchain,
193
193
  MODE_ASSISTANT: item.assistant,
194
194
  MODE_LLAMA_INDEX: item.llama_index,
195
195
  MODE_AGENT: item.agent,
@@ -224,8 +224,8 @@ class JsonFileProvider(BaseProvider):
224
224
  item.img = data[MODE_IMAGE]
225
225
  if MODE_VISION in data:
226
226
  item.vision = data[MODE_VISION]
227
- if MODE_LANGCHAIN in data:
228
- item.langchain = data[MODE_LANGCHAIN]
227
+ # if MODE_LANGCHAIN in data:
228
+ # item.langchain = data[MODE_LANGCHAIN]
229
229
  if MODE_ASSISTANT in data:
230
230
  item.assistant = data[MODE_ASSISTANT]
231
231
  if MODE_LLAMA_INDEX in data:
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.06.25 02:00:00 #
9
+ # Updated Date: 2025.06.28 16:00:00 #
10
10
  # ================================================== #
11
11
  import base64
12
12
 
@@ -34,6 +34,7 @@ from .responses import Responses
34
34
  from .store import Store
35
35
  from .summarizer import Summarizer
36
36
  from .vision import Vision
37
+ from pygpt_net.item.model import ModelItem
37
38
 
38
39
 
39
40
  class Gpt:
@@ -54,11 +55,16 @@ class Gpt:
54
55
  self.summarizer = Summarizer(window)
55
56
  self.vision = Vision(window)
56
57
 
57
- def get_client(self, mode: str = MODE_CHAT) -> OpenAI:
58
+ def get_client(
59
+ self,
60
+ mode: str = MODE_CHAT,
61
+ model: ModelItem = None
62
+ ) -> OpenAI:
58
63
  """
59
64
  Return OpenAI client
60
65
 
61
66
  :param mode: Mode
67
+ :param model: Model
62
68
  :return: OpenAI client
63
69
  """
64
70
  args = {
@@ -79,15 +85,8 @@ class Gpt:
79
85
  transport=transport,
80
86
  )
81
87
 
82
- # research mode endpoint - Perplexity
83
- if mode == MODE_RESEARCH:
84
- if self.window.core.config.has('api_key_perplexity'):
85
- args["api_key"] = self.window.core.config.get('api_key_perplexity')
86
- if self.window.core.config.has('api_endpoint_perplexity'):
87
- endpoint = self.window.core.config.get('api_endpoint_perplexity')
88
- if endpoint:
89
- args["base_url"] = endpoint
90
-
88
+ # update client args by mode and model
89
+ args = self.window.core.models.prepare_client_args(args, mode, model)
91
90
  return OpenAI(**args)
92
91
 
93
92
  def call(self, context: BridgeContext, extra: dict = None) -> bool:
@@ -113,9 +112,6 @@ class Gpt:
113
112
 
114
113
  # --- Responses API ---- /beta/
115
114
  use_responses_api = False
116
- if mode == MODE_CHAT:
117
- use_responses_api = True # use responses API for chat, audio, research modes
118
- ctx.use_responses_api = use_responses_api # set in context
119
115
 
120
116
  # get model id
121
117
  model_id = None
@@ -124,6 +120,12 @@ class Gpt:
124
120
  if max_tokens > model.tokens: # check max output tokens
125
121
  max_tokens = model.tokens
126
122
 
123
+ if model.is_gpt():
124
+ if mode == MODE_CHAT and self.window.core.config.get('api_use_responses', False):
125
+ use_responses_api = True # use responses API for chat mode, only OpenAI models
126
+
127
+ ctx.use_responses_api = use_responses_api # set in context
128
+
127
129
  response = None
128
130
  used_tokens = 0
129
131
  context.max_tokens = max_tokens # update max output tokens
@@ -234,6 +236,7 @@ class Gpt:
234
236
  )
235
237
  else:
236
238
  if response.choices[0]:
239
+ print(response.choices[0].message)
237
240
  if response.choices[0].message.content:
238
241
  output = response.choices[0].message.content.strip()
239
242
  elif response.choices[0].message.tool_calls:
@@ -304,7 +307,7 @@ class Gpt:
304
307
  if model is None:
305
308
  model = self.window.core.models.from_defaults()
306
309
 
307
- client = self.get_client(mode)
310
+ client = self.get_client(mode, model)
308
311
  messages = []
309
312
  messages.append({"role": "system", "content": system_prompt})
310
313
  messages.append({"role": "user", "content": prompt})
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.02.26 23:00:00 #
9
+ # Updated Date: 2025.06.28 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import json
@@ -17,7 +17,6 @@ from pygpt_net.core.types import (
17
17
  MODE_CHAT,
18
18
  MODE_VISION,
19
19
  MODE_AUDIO,
20
- MODE_RESEARCH,
21
20
  )
22
21
  from pygpt_net.core.bridge.context import BridgeContext, MultimodalContext
23
22
  from pygpt_net.item.ctx import CtxItem
@@ -67,7 +66,7 @@ class Chat:
67
66
  user_name = ctx.input_name # from ctx
68
67
  ai_name = ctx.output_name # from ctx
69
68
 
70
- client = self.window.core.gpt.get_client(mode)
69
+ client = self.window.core.gpt.get_client(mode, context.model)
71
70
 
72
71
  # build chat messages
73
72
  messages = self.build(
@@ -113,7 +112,10 @@ class Chat:
113
112
  })
114
113
 
115
114
  # fix: o1 compatibility
116
- if model.id is not None and not model.id.startswith("o1") and not model.id.startswith("o3"):
115
+ if (model.id is not None
116
+ and not model.id.startswith("o1")
117
+ and not model.id.startswith("o3")
118
+ and model.is_gpt()):
117
119
  response_kwargs['presence_penalty'] = self.window.core.config.get('presence_penalty')
118
120
  response_kwargs['frequency_penalty'] = self.window.core.config.get('frequency_penalty')
119
121
  response_kwargs['temperature'] = self.window.core.config.get('temperature')
@@ -184,6 +186,8 @@ class Chat:
184
186
 
185
187
  # tokens config
186
188
  mode = MODE_CHAT
189
+ is_tool_output = False
190
+ tool_call_native_enabled = self.window.core.config.get('func_call.native', False)
187
191
  allowed_system = True
188
192
  if (model.id is not None
189
193
  and model.id in ["o1-mini", "o1-preview"]):
@@ -257,24 +261,90 @@ class Chat:
257
261
  }
258
262
  messages.append(msg)
259
263
 
260
- # use vision and audio if available in current model
261
- content = str(prompt)
262
- if MODE_VISION in model.mode:
263
- content = self.window.core.gpt.vision.build_content(
264
- content=content,
265
- attachments=attachments,
266
- )
267
- if MODE_AUDIO in model.mode:
268
- content = self.window.core.gpt.audio.build_content(
269
- content=content,
270
- multimodal_ctx=multimodal_ctx,
271
- )
264
+ # ---- tool output ----
265
+ is_tool_output = False
266
+ if tool_call_native_enabled and item.extra and isinstance(item.extra, dict):
267
+ if "tool_calls" in item.extra and isinstance(item.extra["tool_calls"], list):
268
+ for tool_call in item.extra["tool_calls"]:
269
+ if "function" in tool_call:
270
+ if "id" not in tool_call or "name" not in tool_call["function"]:
271
+ continue
272
+ if tool_call["id"] and tool_call["function"]["name"]:
273
+ if "tool_output" in item.extra and isinstance(item.extra["tool_output"], list):
274
+ for tool_output in item.extra["tool_output"]:
275
+ if ("cmd" in tool_output
276
+ and tool_output["cmd"] == tool_call["function"]["name"]):
277
+ msg = {
278
+ "role": "tool",
279
+ "tool_call_id": tool_call["id"],
280
+ "content": str(tool_output),
281
+ }
282
+ last_msg = messages[-1] if messages else None
283
+ if last_msg and last_msg.get(
284
+ "role") == "assistant":
285
+ last_msg["tool_calls"] = []
286
+ for call in item.extra["tool_calls"]:
287
+ last_msg["tool_calls"].append(
288
+ {
289
+ "id": call["id"],
290
+ "type": "function",
291
+ "function": {
292
+ "name": call["function"]["name"],
293
+ "arguments": json.dumps(
294
+ call["function"]["arguments"]),
295
+ }
296
+ }
297
+ )
298
+ last_msg["content"] = ""
299
+ messages.append(msg)
300
+ is_tool_output = True
301
+ break
302
+ elif "result" in tool_output:
303
+ # if result is present, append it as function call output
304
+ msg = {
305
+ "role": "tool",
306
+ "tool_call_id": tool_call["id"],
307
+ "content": str(tool_output["result"]),
308
+ }
309
+ last_msg = messages[-1] if messages else None
310
+ if last_msg and last_msg.get(
311
+ "role") == "assistant":
312
+ last_msg["tool_calls"] = []
313
+ for call in item.extra["tool_calls"]:
314
+ last_msg["tool_calls"].append(
315
+ {
316
+ "id": call["id"],
317
+ "type": "function",
318
+ "function": {
319
+ "name": call["function"]["name"],
320
+ "arguments": json.dumps(call["function"]["arguments"]),
321
+ }
322
+ }
323
+ )
324
+ last_msg["content"] = ""
325
+ messages.append(msg)
326
+ is_tool_output = True
327
+ break
272
328
 
273
- # append current prompt
274
- messages.append({
275
- "role": "user",
276
- "content": content,
277
- })
329
+ # use vision and audio if available in current model
330
+ if not is_tool_output: # append current prompt only if not tool output
331
+ content = str(prompt)
332
+ if MODE_VISION in model.mode:
333
+ content = self.window.core.gpt.vision.build_content(
334
+ content=content,
335
+ attachments=attachments,
336
+ )
337
+ if MODE_AUDIO in model.mode:
338
+ content = self.window.core.gpt.audio.build_content(
339
+ content=content,
340
+ multimodal_ctx=multimodal_ctx,
341
+ )
342
+
343
+ # append current prompt
344
+ messages.append({
345
+ "role": "user",
346
+ "content": content,
347
+ })
278
348
 
279
349
  # input tokens: update
280
350
  self.input_tokens += self.window.core.tokens.from_messages(
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.06.26 18:00:00 #
9
+ # Updated Date: 2025.06.28 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import json
@@ -66,7 +66,7 @@ class Responses:
66
66
  user_name = ctx.input_name # from ctx
67
67
  ai_name = ctx.output_name # from ctx
68
68
 
69
- client = self.window.core.gpt.get_client(mode)
69
+ client = self.window.core.gpt.get_client(mode, model)
70
70
 
71
71
  # build chat messages
72
72
  messages = self.build(
@@ -175,9 +175,11 @@ class Responses:
175
175
  """
176
176
  messages = []
177
177
  self.prev_response_id = None # reset
178
+ is_tool_output = False # reset
178
179
 
179
180
  # tokens config
180
181
  mode = MODE_CHAT
182
+ tool_call_native_enabled = self.window.core.config.get('func_call.native', False)
181
183
  allowed_system = True
182
184
  if (model.id is not None
183
185
  and model.id in ["o1-mini", "o1-preview"]):
@@ -243,28 +245,63 @@ class Responses:
243
245
  }
244
246
  messages.append(msg)
245
247
 
246
- if item.msg_id and (item.cmds is None or len(item.cmds) == 0): # if no cmds before
248
+ # ---- tool output ----
249
+ is_tool_output = False # reset tool output flag
250
+ if tool_call_native_enabled and item.extra and isinstance(item.extra, dict):
251
+ if "tool_calls" in item.extra and isinstance(item.extra["tool_calls"], list):
252
+ for tool_call in item.extra["tool_calls"]:
253
+ if "function" in tool_call:
254
+ if "call_id" not in tool_call or "name" not in tool_call["function"]:
255
+ continue
256
+ if tool_call["call_id"] and tool_call["function"]["name"]:
257
+ if "tool_output" in item.extra and isinstance(item.extra["tool_output"], list):
258
+ for tool_output in item.extra["tool_output"]:
259
+ if ("cmd" in tool_output
260
+ and tool_output["cmd"] == tool_call["function"]["name"]):
261
+ msg = {
262
+ "type": "function_call_output",
263
+ "call_id": tool_call["call_id"],
264
+ "output": str(tool_output),
265
+ }
266
+ is_tool_output = True
267
+ messages.append(msg)
268
+ break
269
+ elif "result" in tool_output:
270
+ # if result is present, append it as function call output
271
+ msg = {
272
+ "type": "function_call_output",
273
+ "call_id": tool_call["call_id"],
274
+ "output": str(tool_output["result"]),
275
+ }
276
+ is_tool_output = True
277
+ messages.append(msg)
278
+ break
279
+
280
+ # --- previous message ID ---
281
+ if (item.msg_id
282
+ and ((item.cmds is None or len(item.cmds) == 0) or is_tool_output)): # if no cmds before or tool output
247
283
  self.prev_response_id = item.msg_id # previous response ID to use in current input
248
284
 
249
285
  # use vision and audio if available in current model
250
- content = str(prompt)
251
- if MODE_VISION in model.mode:
252
- content = self.window.core.gpt.vision.build_content(
253
- content=content,
254
- attachments=attachments,
255
- responses_api=True,
256
- )
257
- if MODE_AUDIO in model.mode:
258
- content = self.window.core.gpt.audio.build_content(
259
- content=content,
260
- multimodal_ctx=multimodal_ctx,
261
- )
262
-
263
- # append current prompt
264
- messages.append({
265
- "role": "user",
266
- "content": content,
267
- })
286
+ if not is_tool_output: # append current prompt only if not tool output
287
+ content = str(prompt)
288
+ if MODE_VISION in model.mode:
289
+ content = self.window.core.gpt.vision.build_content(
290
+ content=content,
291
+ attachments=attachments,
292
+ responses_api=True,
293
+ )
294
+ if MODE_AUDIO in model.mode:
295
+ content = self.window.core.gpt.audio.build_content(
296
+ content=content,
297
+ multimodal_ctx=multimodal_ctx,
298
+ )
299
+
300
+ # append current prompt
301
+ messages.append({
302
+ "role": "user",
303
+ "content": content,
304
+ })
268
305
 
269
306
  # input tokens: update
270
307
  self.input_tokens += self.window.core.tokens.from_messages(
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.06.26 16:00:00 #
9
+ # Updated Date: 2025.06.28 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from llama_index.llms.anthropic import Anthropic
@@ -30,6 +30,7 @@ class AnthropicLLM(BaseLLM):
30
30
  - api_key: API key for Anthropic API
31
31
  """
32
32
  self.id = "anthropic"
33
+ self.name = "Anthropic"
33
34
  self.type = [MODE_LLAMA_INDEX]
34
35
 
35
36
  def llama(
@@ -6,13 +6,13 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 22:00:00 #
9
+ # Updated Date: 2025.06.28 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional, List, Dict
13
13
 
14
- from langchain_openai import AzureOpenAI
15
- from langchain_openai import AzureChatOpenAI
14
+ # from langchain_openai import AzureOpenAI
15
+ # from langchain_openai import AzureChatOpenAI
16
16
 
17
17
  from llama_index.core.llms.llm import BaseLLM as LlamaBaseLLM
18
18
  from llama_index.core.base.embeddings.base import BaseEmbedding
@@ -20,7 +20,6 @@ from llama_index.llms.azure_openai import AzureOpenAI as LlamaAzureOpenAI
20
20
  from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
21
21
 
22
22
  from pygpt_net.core.types import (
23
- MODE_LANGCHAIN,
24
23
  MODE_LLAMA_INDEX,
25
24
  )
26
25
  from pygpt_net.provider.llms.base import BaseLLM
@@ -39,7 +38,8 @@ class AzureOpenAILLM(BaseLLM):
39
38
  - api_key: API key for Azure OpenAI API
40
39
  """
41
40
  self.id = "azure_openai"
42
- self.type = [MODE_LANGCHAIN, MODE_LLAMA_INDEX, "embeddings"]
41
+ self.name = "Azure OpenAI"
42
+ self.type = [MODE_LLAMA_INDEX, "embeddings"]
43
43
 
44
44
  def completion(
45
45
  self,
@@ -54,9 +54,11 @@ class AzureOpenAILLM(BaseLLM):
54
54
  :param model: model instance
55
55
  :param stream: stream mode
56
56
  :return: LLM provider instance
57
- """
57
+
58
58
  args = self.parse_args(model.langchain)
59
59
  return AzureOpenAI(**args)
60
+ """
61
+ pass
60
62
 
61
63
  def chat(
62
64
  self,
@@ -71,9 +73,11 @@ class AzureOpenAILLM(BaseLLM):
71
73
  :param model: model instance
72
74
  :param stream: stream mode
73
75
  :return: LLM provider instance
74
- """
76
+
75
77
  args = self.parse_args(model.langchain)
76
78
  return AzureChatOpenAI(**args)
79
+ """
80
+ pass
77
81
 
78
82
  def llama(
79
83
  self,
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 22:00:00 #
9
+ # Updated Date: 2025.06.28 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -48,7 +48,8 @@ class BaseLLM:
48
48
  """
49
49
  options = {}
50
50
  if mode == MODE_LANGCHAIN:
51
- options = model.langchain
51
+ pass
52
+ # options = model.langchain
52
53
  elif mode == MODE_LLAMA_INDEX:
53
54
  options = model.llama_index
54
55
  if 'env' in options:
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.01.31 19:00:00 #
9
+ # Updated Date: 2025.06.28 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from pygpt_net.core.types import (
@@ -14,6 +14,7 @@ from pygpt_net.core.types import (
14
14
  )
15
15
  from llama_index.llms.deepseek import DeepSeek
16
16
  from llama_index.core.llms.llm import BaseLLM as LlamaBaseLLM
17
+
17
18
  from pygpt_net.provider.llms.base import BaseLLM
18
19
  from pygpt_net.item.model import ModelItem
19
20
 
@@ -22,6 +23,7 @@ class DeepseekApiLLM(BaseLLM):
22
23
  def __init__(self, *args, **kwargs):
23
24
  super(DeepseekApiLLM, self).__init__(*args, **kwargs)
24
25
  self.id = "deepseek_api"
26
+ self.name = "Deepseek API"
25
27
  self.type = [MODE_LLAMA_INDEX]
26
28
 
27
29
  def llama(
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.06.26 16:00:00 #
9
+ # Updated Date: 2025.06.28 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional, List, Dict
@@ -34,6 +34,7 @@ class GoogleLLM(BaseLLM):
34
34
  - api_key: API key for Google API
35
35
  """
36
36
  self.id = "google"
37
+ self.name = "Google"
37
38
  self.type = [MODE_LLAMA_INDEX, "embeddings"]
38
39
 
39
40
  def llama(
@@ -6,10 +6,10 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.06.26 16:00:00 #
9
+ # Updated Date: 2025.06.28 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
- from langchain_community.llms import HuggingFaceHub
12
+ # from langchain_community.llms import HuggingFaceHub
13
13
 
14
14
  from pygpt_net.core.types import (
15
15
  MODE_LANGCHAIN,
@@ -22,7 +22,8 @@ class HuggingFaceLLM(BaseLLM):
22
22
  def __init__(self, *args, **kwargs):
23
23
  super(HuggingFaceLLM, self).__init__(*args, **kwargs)
24
24
  self.id = "huggingface"
25
- self.type = [MODE_LANGCHAIN]
25
+ self.name = "HuggingFace"
26
+ self.type = []
26
27
 
27
28
  def completion(
28
29
  self,
@@ -37,11 +38,13 @@ class HuggingFaceLLM(BaseLLM):
37
38
  :param model: model instance
38
39
  :param stream: stream mode
39
40
  :return: LLM provider instance
40
- """
41
+
41
42
  args = self.parse_args(model.langchain)
42
43
  if "model" not in args:
43
44
  args["model"] = model.id
44
45
  return HuggingFaceHub(**args)
46
+ """
47
+ pass
45
48
 
46
49
  def chat(
47
50
  self,
@@ -57,4 +60,4 @@ class HuggingFaceLLM(BaseLLM):
57
60
  :param stream: stream mode
58
61
  :return: LLM provider instance
59
62
  """
60
- return None
63
+ pass
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.06.26 16:00:00 #
9
+ # Updated Date: 2025.06.28 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -19,6 +19,7 @@ from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
19
19
  from llama_index.embeddings.huggingface_api import HuggingFaceInferenceAPIEmbedding as HuggingFaceAPIEmbedding
20
20
  from llama_index.core.llms.llm import BaseLLM as LlamaBaseLLM
21
21
  from llama_index.core.base.embeddings.base import BaseEmbedding
22
+
22
23
  from pygpt_net.provider.llms.base import BaseLLM
23
24
  from pygpt_net.item.model import ModelItem
24
25
 
@@ -27,6 +28,7 @@ class HuggingFaceApiLLM(BaseLLM):
27
28
  def __init__(self, *args, **kwargs):
28
29
  super(HuggingFaceApiLLM, self).__init__(*args, **kwargs)
29
30
  self.id = "huggingface_api"
31
+ self.name = "HuggingFace API"
30
32
  self.type = [MODE_LLAMA_INDEX, "embeddings"]
31
33
 
32
34
  def llama(
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 22:00:00 #
9
+ # Updated Date: 2025.06.28 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from llama_index.llms.openai_like import OpenAILike
@@ -23,6 +23,7 @@ class LocalLLM(BaseLLM):
23
23
  def __init__(self, *args, **kwargs):
24
24
  super(LocalLLM, self).__init__(*args, **kwargs)
25
25
  self.id = "local_ai"
26
+ self.name = "Local model (OpenAI API compatible)"
26
27
  self.type = [MODE_LLAMA_INDEX]
27
28
 
28
29
  def llama(