pygpt-net 2.6.24__py3-none-any.whl → 2.6.26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. pygpt_net/CHANGELOG.txt +10 -0
  2. pygpt_net/__init__.py +1 -1
  3. pygpt_net/app.py +3 -1
  4. pygpt_net/controller/config/placeholder.py +3 -1
  5. pygpt_net/controller/model/importer.py +42 -5
  6. pygpt_net/core/agents/runners/loop.py +36 -3
  7. pygpt_net/core/models/models.py +5 -1
  8. pygpt_net/core/types/openai.py +2 -1
  9. pygpt_net/data/config/config.json +34 -3
  10. pygpt_net/data/config/models.json +2 -2
  11. pygpt_net/data/config/settings.json +72 -1
  12. pygpt_net/data/locale/locale.en.ini +10 -0
  13. pygpt_net/provider/core/config/patch.py +46 -1
  14. pygpt_net/provider/llms/anthropic.py +33 -3
  15. pygpt_net/provider/llms/azure_openai.py +9 -4
  16. pygpt_net/provider/llms/deepseek_api.py +36 -3
  17. pygpt_net/provider/llms/google.py +7 -1
  18. pygpt_net/provider/llms/hugging_face_api.py +9 -3
  19. pygpt_net/provider/llms/hugging_face_router.py +17 -3
  20. pygpt_net/provider/llms/llama_index/x_ai/__init__.py +0 -0
  21. pygpt_net/provider/llms/llama_index/x_ai/embedding.py +71 -0
  22. pygpt_net/provider/llms/local.py +31 -1
  23. pygpt_net/provider/llms/mistral.py +29 -1
  24. pygpt_net/provider/llms/ollama.py +3 -1
  25. pygpt_net/provider/llms/open_router.py +104 -0
  26. pygpt_net/provider/llms/openai.py +7 -2
  27. pygpt_net/provider/llms/x_ai.py +19 -3
  28. {pygpt_net-2.6.24.dist-info → pygpt_net-2.6.26.dist-info}/METADATA +51 -28
  29. {pygpt_net-2.6.24.dist-info → pygpt_net-2.6.26.dist-info}/RECORD +32 -29
  30. {pygpt_net-2.6.24.dist-info → pygpt_net-2.6.26.dist-info}/LICENSE +0 -0
  31. {pygpt_net-2.6.24.dist-info → pygpt_net-2.6.26.dist-info}/WHEEL +0 -0
  32. {pygpt_net-2.6.24.dist-info → pygpt_net-2.6.26.dist-info}/entry_points.txt +0 -0
@@ -6,9 +6,12 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.06 01:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
- from typing import List, Dict
11
+
12
+ from typing import List, Dict, Optional
13
+
14
+ from llama_index.core.base.embeddings.base import BaseEmbedding
12
15
 
13
16
  from pygpt_net.core.types import (
14
17
  MODE_LLAMA_INDEX,
@@ -24,7 +27,7 @@ class DeepseekApiLLM(BaseLLM):
24
27
  super(DeepseekApiLLM, self).__init__(*args, **kwargs)
25
28
  self.id = "deepseek_api"
26
29
  self.name = "Deepseek API"
27
- self.type = [MODE_LLAMA_INDEX]
30
+ self.type = [MODE_LLAMA_INDEX, "embeddings"]
28
31
 
29
32
  def llama(
30
33
  self,
@@ -42,8 +45,38 @@ class DeepseekApiLLM(BaseLLM):
42
45
  """
43
46
  from llama_index.llms.deepseek import DeepSeek
44
47
  args = self.parse_args(model.llama_index, window)
48
+ if "model" not in args:
49
+ args["model"] = model.id
50
+ if "api_key" not in args or args["api_key"] == "":
51
+ args["api_key"] = window.core.config.get("api_key_deepseek", "")
45
52
  return DeepSeek(**args)
46
53
 
54
+ def get_embeddings_model(
55
+ self,
56
+ window,
57
+ config: Optional[List[Dict]] = None
58
+ ) -> BaseEmbedding:
59
+ """
60
+ Return provider instance for embeddings
61
+
62
+ :param window: window instance
63
+ :param config: config keyword arguments list
64
+ :return: Embedding provider instance
65
+ """
66
+ from llama_index.embeddings.voyageai import VoyageEmbedding
67
+ args = {}
68
+ if config is not None:
69
+ args = self.parse_args({
70
+ "args": config,
71
+ }, window)
72
+ if "api_key" in args:
73
+ args["voyage_api_key"] = args.pop("api_key")
74
+ if "voyage_api_key" not in args or args["voyage_api_key"] == "":
75
+ args["voyage_api_key"] = window.core.config.get("api_key_voyage", "")
76
+ if "model" in args and "model_name" not in args:
77
+ args["model_name"] = args.pop("model")
78
+ return VoyageEmbedding(**args)
79
+
47
80
  def get_models(
48
81
  self,
49
82
  window,
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.06 01:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional, List, Dict
@@ -53,6 +53,8 @@ class GoogleLLM(BaseLLM):
53
53
  args = self.parse_args(model.llama_index, window)
54
54
  if "model" not in args:
55
55
  args["model"] = model.id
56
+ if "api_key" not in args or args["api_key"] == "":
57
+ args["api_key"] = window.core.config.get("api_key_google", "")
56
58
  return GoogleGenAI(**args)
57
59
 
58
60
  def get_embeddings_model(
@@ -73,6 +75,10 @@ class GoogleLLM(BaseLLM):
73
75
  args = self.parse_args({
74
76
  "args": config,
75
77
  }, window)
78
+ if "api_key" not in args or args["api_key"] == "":
79
+ args["api_key"] = window.core.config.get("api_key_google", "")
80
+ if "model" in args and "model_name" not in args:
81
+ args["model_name"] = args.pop("model")
76
82
  return GoogleGenAIEmbedding(**args)
77
83
 
78
84
  def get_models(
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.06 01:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -47,6 +47,8 @@ class HuggingFaceApiLLM(BaseLLM):
47
47
  args = self.parse_args(model.llama_index, window)
48
48
  if "model" not in args:
49
49
  args["model"] = model.id
50
+ if "api_key" not in args or args["api_key"] == "":
51
+ args["api_key"] = window.core.config.get("api_key_hugging_face", "")
50
52
  return HuggingFaceInferenceAPI(**args)
51
53
 
52
54
  def get_embeddings_model(
@@ -61,13 +63,17 @@ class HuggingFaceApiLLM(BaseLLM):
61
63
  :param config: config keyword arguments list
62
64
  :return: Embedding provider instance
63
65
  """
64
- from llama_index.embeddings.huggingface_api import HuggingFaceInferenceAPIEmbedding as HuggingFaceAPIEmbedding
66
+ from llama_index.embeddings.huggingface_api import HuggingFaceInferenceAPIEmbedding
65
67
  args = {}
66
68
  if config is not None:
67
69
  args = self.parse_args({
68
70
  "args": config,
69
71
  }, window)
70
- return HuggingFaceAPIEmbedding(**args)
72
+ if "api_key" not in args or args["api_key"] == "":
73
+ args["api_key"] = window.core.config.get("api_key_hugging_face", "")
74
+ if "model" in args and "model_name" not in args:
75
+ args["model_name"] = args.pop("model")
76
+ return HuggingFaceInferenceAPIEmbedding(**args)
71
77
 
72
78
  def init_embeddings(
73
79
  self,
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.08 19:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional, List, Dict
@@ -28,7 +28,7 @@ class HuggingFaceRouterLLM(BaseLLM):
28
28
  super(HuggingFaceRouterLLM, self).__init__(*args, **kwargs)
29
29
  self.id = "huggingface_router"
30
30
  self.name = "HuggingFace Router"
31
- self.type = [MODE_CHAT, MODE_LLAMA_INDEX]
31
+ self.type = [MODE_CHAT, MODE_LLAMA_INDEX, "embeddings"]
32
32
 
33
33
  def completion(
34
34
  self,
@@ -78,6 +78,10 @@ class HuggingFaceRouterLLM(BaseLLM):
78
78
  """
79
79
  from llama_index.llms.openai_like import OpenAILike
80
80
  args = self.parse_args(model.llama_index, window)
81
+ if "model" not in args:
82
+ args["model"] = model.id
83
+ if "api_key" not in args or args["api_key"] == "":
84
+ args["api_key"] = window.core.config.get("api_key_hugging_face", "")
81
85
  return OpenAILike(**args)
82
86
 
83
87
  def llama_multimodal(
@@ -108,7 +112,17 @@ class HuggingFaceRouterLLM(BaseLLM):
108
112
  :param config: config keyword arguments list
109
113
  :return: Embedding provider instance
110
114
  """
111
- pass
115
+ from llama_index.embeddings.huggingface_api import HuggingFaceInferenceAPIEmbedding
116
+ args = {}
117
+ if config is not None:
118
+ args = self.parse_args({
119
+ "args": config,
120
+ }, window)
121
+ if "api_key" not in args or args["api_key"] == "":
122
+ args["api_key"] = window.core.config.get("api_key_hugging_face", "")
123
+ if "model" in args and "model_name" not in args:
124
+ args["model_name"] = args.pop("model")
125
+ return HuggingFaceInferenceAPIEmbedding(**args)
112
126
 
113
127
  def get_models(
114
128
  self,
File without changes
@@ -0,0 +1,71 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
+ # ================================================== #
11
+
12
+ import asyncio
13
+ from typing import Any, List, Optional
14
+
15
+ import xai_sdk
16
+ from llama_index.core.embeddings import BaseEmbedding
17
+
18
+
19
+ class XAIEmbedding(BaseEmbedding):
20
+ """
21
+ LlamaIndex xAI Embedding SDK wrapper.
22
+ """
23
+
24
+ def __init__(
25
+ self,
26
+ model_name: str,
27
+ api_key: Optional[str] = None,
28
+ api_host: str = "api.x.ai",
29
+ **kwargs: Any,
30
+ ) -> None:
31
+ super().__init__(model_name=model_name, **kwargs)
32
+ self._api_key = api_key
33
+ self._api_host = api_host
34
+ self._client = xai_sdk.Client(api_key=api_key, api_host=api_host)
35
+
36
+ def _run_async(self, coro):
37
+ try:
38
+ loop = asyncio.get_running_loop()
39
+ except RuntimeError:
40
+ loop = None
41
+
42
+ if loop and loop.is_running():
43
+ return asyncio.run_coroutine_threadsafe(coro, loop).result()
44
+ else:
45
+ return asyncio.run(coro)
46
+
47
+ async def _aembed_many(self, texts: List[str]) -> List[List[float]]:
48
+ embeddings: List[List[float]] = []
49
+ async for (values, _shape) in self._client.embedder.embed(
50
+ texts=texts, model_name=self.model_name
51
+ ):
52
+ embeddings.append(list(values))
53
+ return embeddings
54
+
55
+ def _get_query_embedding(self, query: str) -> List[float]:
56
+ return self._run_async(self._aembed_many([query]))[0]
57
+
58
+ async def _aget_query_embedding(self, query: str) -> List[float]:
59
+ return (await self._aembed_many([query]))[0]
60
+
61
+ def _get_text_embedding(self, text: str) -> List[float]:
62
+ return self._run_async(self._aembed_many([text]))[0]
63
+
64
+ async def _aget_text_embedding(self, text: str) -> List[float]:
65
+ return (await self._aembed_many([text]))[0]
66
+
67
+ def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
68
+ return self._run_async(self._aembed_many(texts))
69
+
70
+ async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
71
+ return await self._aembed_many(texts)
@@ -8,7 +8,9 @@
8
8
  # Created By : Marcin Szczygliński #
9
9
  # Updated Date: 2025.08.06 01:00:00 #
10
10
  # ================================================== #
11
+ from typing import Optional, Dict, List
11
12
 
13
+ from llama_index.core.base.embeddings.base import BaseEmbedding
12
14
  from llama_index.core.llms.llm import BaseLLM as LlamaBaseLLM
13
15
 
14
16
  from pygpt_net.core.types import (
@@ -23,7 +25,29 @@ class LocalLLM(BaseLLM):
23
25
  super(LocalLLM, self).__init__(*args, **kwargs)
24
26
  self.id = "local_ai"
25
27
  self.name = "Local model (OpenAI API compatible)"
26
- self.type = [MODE_LLAMA_INDEX]
28
+ self.type = [MODE_LLAMA_INDEX, "embeddings"]
29
+
30
+ def get_embeddings_model(
31
+ self,
32
+ window,
33
+ config: Optional[List[Dict]] = None
34
+ ) -> BaseEmbedding:
35
+ """
36
+ Return provider instance for embeddings
37
+
38
+ :param window: window instance
39
+ :param config: config keyword arguments list
40
+ :return: Embedding provider instance
41
+ """
42
+ from llama_index.embeddings.openai_like import OpenAILikeEmbedding
43
+ args = {}
44
+ if config is not None:
45
+ args = self.parse_args({
46
+ "args": config,
47
+ }, window)
48
+ if "model" in args and "model_name" not in args:
49
+ args["model_name"] = args.pop("model")
50
+ return OpenAILikeEmbedding(**args)
27
51
 
28
52
  def llama(
29
53
  self,
@@ -41,4 +65,10 @@ class LocalLLM(BaseLLM):
41
65
  """
42
66
  from llama_index.llms.openai_like import OpenAILike
43
67
  args = self.parse_args(model.llama_index, window)
68
+ if "model" not in args:
69
+ args["model"] = model.id
70
+ if "is_chat_model" not in args:
71
+ args["is_chat_model"] = True
72
+ if "is_function_calling_model" not in args:
73
+ args["is_function_calling_model"] = model.tool_calls
44
74
  return OpenAILike(**args)
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.06 01:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -47,6 +47,8 @@ class MistralAILLM(BaseLLM):
47
47
  args = self.parse_args(model.llama_index, window)
48
48
  if "model" not in args:
49
49
  args["model"] = model.id
50
+ if "api_key" not in args or args["api_key"] == "":
51
+ args["api_key"] = window.core.config.get("api_key_mistral", "")
50
52
  return MistralAI(**args)
51
53
 
52
54
  def get_embeddings_model(
@@ -67,6 +69,10 @@ class MistralAILLM(BaseLLM):
67
69
  args = self.parse_args({
68
70
  "args": config,
69
71
  }, window)
72
+ if "api_key" not in args or args["api_key"] == "":
73
+ args["api_key"] = window.core.config.get("api_key_mistral", "")
74
+ if "model" in args and "model_name" not in args:
75
+ args["model_name"] = args.pop("model")
70
76
  return MistralAIEmbedding(**args)
71
77
 
72
78
  def init_embeddings(
@@ -87,3 +93,25 @@ class MistralAILLM(BaseLLM):
87
93
  if ('OPENAI_API_KEY' not in os.environ
88
94
  and (window.core.config.get('api_key') is None or window.core.config.get('api_key') == "")):
89
95
  os.environ['OPENAI_API_KEY'] = "_"
96
+
97
+ def get_models(
98
+ self,
99
+ window,
100
+ ) -> List[Dict]:
101
+ """
102
+ Return list of models for the provider
103
+
104
+ :param window: window instance
105
+ :return: list of models
106
+ """
107
+ items = []
108
+ client = self.get_client(window)
109
+ models_list = client.models.list()
110
+ if models_list.data:
111
+ for item in models_list.data:
112
+ id = item.id
113
+ items.append({
114
+ "id": id,
115
+ "name": id,
116
+ })
117
+ return items
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.06 01:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -118,6 +118,8 @@ class OllamaLLM(BaseLLM):
118
118
  if 'OLLAMA_API_BASE' in os.environ:
119
119
  if "base_url" not in args:
120
120
  args["base_url"] = os.environ['OLLAMA_API_BASE']
121
+ if "model" in args and "model_name" not in args:
122
+ args["model_name"] = args.pop("model")
121
123
  return OllamaEmbedding(**args)
122
124
 
123
125
  def init_embeddings(
@@ -0,0 +1,104 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.08.26 23:00:00 #
10
+ # ================================================== #
11
+
12
+ from typing import Optional, Dict, List
13
+
14
+ from llama_index.core.base.embeddings.base import BaseEmbedding
15
+ from llama_index.core.llms.llm import BaseLLM as LlamaBaseLLM
16
+
17
+ from pygpt_net.core.types import (
18
+ MODE_LLAMA_INDEX,
19
+ )
20
+ from pygpt_net.provider.llms.base import BaseLLM
21
+ from pygpt_net.item.model import ModelItem
22
+
23
+
24
+ class OpenRouterLLM(BaseLLM):
25
+ def __init__(self, *args, **kwargs):
26
+ super(OpenRouterLLM, self).__init__(*args, **kwargs)
27
+ self.id = "open_router"
28
+ self.name = "OpenRouter"
29
+ self.type = [MODE_LLAMA_INDEX, "embeddings"]
30
+
31
+ def get_embeddings_model(
32
+ self,
33
+ window,
34
+ config: Optional[List[Dict]] = None
35
+ ) -> BaseEmbedding:
36
+ """
37
+ Return provider instance for embeddings
38
+
39
+ :param window: window instance
40
+ :param config: config keyword arguments list
41
+ :return: Embedding provider instance
42
+ """
43
+ from llama_index.embeddings.openai_like import OpenAILikeEmbedding
44
+ args = {}
45
+ if config is not None:
46
+ args = self.parse_args({
47
+ "args": config,
48
+ }, window)
49
+ if "api_key" not in args:
50
+ args["api_key"] = window.core.config.get("api_key_open_router", "")
51
+ if "api_base" not in args:
52
+ args["api_base"] = window.core.config.get("api_endpoint_open_router", "")
53
+ if "model" in args and "model_name" not in args:
54
+ args["model_name"] = args.pop("model")
55
+ return OpenAILikeEmbedding(**args)
56
+
57
+ def llama(
58
+ self,
59
+ window,
60
+ model: ModelItem,
61
+ stream: bool = False
62
+ ) -> LlamaBaseLLM:
63
+ """
64
+ Return LLM provider instance for llama
65
+
66
+ :param window: window instance
67
+ :param model: model instance
68
+ :param stream: stream mode
69
+ :return: LLM provider instance
70
+ """
71
+ from llama_index.llms.openai_like import OpenAILike
72
+ args = self.parse_args(model.llama_index, window)
73
+ if "model" not in args:
74
+ args["model"] = model.id
75
+ if "api_key" not in args:
76
+ args["api_key"] = window.core.config.get("api_key_open_router", "")
77
+ if "api_base" not in args:
78
+ args["api_base"] = window.core.config.get("api_endpoint_open_router", "")
79
+ if "is_chat_model" not in args:
80
+ args["is_chat_model"] = True
81
+ if "is_function_calling_model" not in args:
82
+ args["is_function_calling_model"] = model.tool_calls
83
+ return OpenAILike(**args)
84
+
85
+ def get_models(
86
+ self,
87
+ window,
88
+ ) -> List[Dict]:
89
+ """
90
+ Return list of models for the provider
91
+
92
+ :param window: window instance
93
+ :return: list of models
94
+ """
95
+ items = []
96
+ client = self.get_client(window)
97
+ models_list = client.models.list()
98
+ if models_list.data:
99
+ for item in models_list.data:
100
+ items.append({
101
+ "id": item.id,
102
+ "name": item.id,
103
+ })
104
+ return items
@@ -6,10 +6,9 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.06 01:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
- import json
13
12
  from typing import Optional, List, Dict
14
13
 
15
14
  # from langchain_openai import OpenAI
@@ -93,6 +92,8 @@ class OpenAILLM(BaseLLM):
93
92
  from .llama_index.openai import OpenAI as LlamaOpenAI
94
93
  from .llama_index.openai import OpenAIResponses as LlamaOpenAIResponses
95
94
  args = self.parse_args(model.llama_index, window)
95
+ if "api_key" not in args:
96
+ args["api_key"] = window.core.config.get("api_key", "")
96
97
  if "model" not in args:
97
98
  args["model"] = model.id
98
99
 
@@ -148,6 +149,10 @@ class OpenAILLM(BaseLLM):
148
149
  args = self.parse_args({
149
150
  "args": config,
150
151
  }, window)
152
+ if "api_key" not in args:
153
+ args["api_key"] = window.core.config.get("api_key", "")
154
+ if "model" in args and "model_name" not in args:
155
+ args["model_name"] = args.pop("model")
151
156
  return OpenAIEmbedding(**args)
152
157
 
153
158
  def get_models(
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.06 01:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional, List, Dict
@@ -28,7 +28,7 @@ class xAILLM(BaseLLM):
28
28
  super(xAILLM, self).__init__(*args, **kwargs)
29
29
  self.id = "x_ai"
30
30
  self.name = "xAI"
31
- self.type = [MODE_CHAT, MODE_LLAMA_INDEX]
31
+ self.type = [MODE_CHAT, MODE_LLAMA_INDEX, "embeddings"]
32
32
 
33
33
  def completion(
34
34
  self,
@@ -78,6 +78,12 @@ class xAILLM(BaseLLM):
78
78
  """
79
79
  from llama_index.llms.openai_like import OpenAILike
80
80
  args = self.parse_args(model.llama_index, window)
81
+ if "model" not in args:
82
+ args["model"] = model.id
83
+ if "api_key" not in args or args["api_key"] == "":
84
+ args["api_key"] = window.core.config.get("api_key_xai", "")
85
+ if "api_base" not in args or args["api_base"] == "":
86
+ args["api_base"] = window.core.config.get("api_endpoint_xai", "https://api.x.ai/v1")
81
87
  return OpenAILike(**args)
82
88
 
83
89
  def llama_multimodal(
@@ -108,7 +114,17 @@ class xAILLM(BaseLLM):
108
114
  :param config: config keyword arguments list
109
115
  :return: Embedding provider instance
110
116
  """
111
- pass
117
+ from .llama_index.x_ai.embedding import XAIEmbedding
118
+ args = {}
119
+ if config is not None:
120
+ args = self.parse_args({
121
+ "args": config,
122
+ }, window)
123
+ if "api_key" not in args or args["api_key"] == "":
124
+ args["api_key"] = window.core.config.get("api_key_xai", "")
125
+ if "model" in args and "model_name" not in args:
126
+ args["model_name"] = args.pop("model")
127
+ return XAIEmbedding(**args)
112
128
 
113
129
  def get_models(
114
130
  self,