homeassistant 2025.7.0b8__py3-none-any.whl → 2025.7.0b9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- homeassistant/components/ai_task/translations/nl.json +3 -0
- homeassistant/components/alexa_devices/manifest.json +1 -1
- homeassistant/components/alexa_devices/translations/de.json +2 -1
- homeassistant/components/alexa_devices/translations/es.json +2 -1
- homeassistant/components/alexa_devices/translations/et.json +2 -1
- homeassistant/components/alexa_devices/translations/ga.json +11 -0
- homeassistant/components/alexa_devices/translations/lt.json +29 -2
- homeassistant/components/alexa_devices/translations/mk.json +8 -0
- homeassistant/components/alexa_devices/translations/sk.json +2 -1
- homeassistant/components/alexa_devices/translations/zh-Hant.json +2 -1
- homeassistant/components/assist_satellite/translations/nl.json +7 -0
- homeassistant/components/devolo_home_control/translations/lt.json +10 -0
- homeassistant/components/ecovacs/manifest.json +1 -1
- homeassistant/components/emoncms/translations/ga.json +1 -0
- homeassistant/components/enphase_envoy/translations/et.json +1 -0
- homeassistant/components/enphase_envoy/translations/lt.json +1 -0
- homeassistant/components/enphase_envoy/translations/nl.json +2 -1
- homeassistant/components/ezviz/translations/lt.json +10 -0
- homeassistant/components/frontend/manifest.json +1 -1
- homeassistant/components/google_generative_ai_conversation/translations/lt.json +28 -0
- homeassistant/components/google_travel_time/translations/it.json +7 -0
- homeassistant/components/hassio/ingress.py +6 -2
- homeassistant/components/home_connect/translations/it.json +3 -0
- homeassistant/components/homeassistant/translations/it.json +24 -0
- homeassistant/components/homeassistant_hardware/translations/et.json +1 -0
- homeassistant/components/homeassistant_hardware/translations/ga.json +4 -0
- homeassistant/components/homeassistant_hardware/translations/lt.json +2 -0
- homeassistant/components/homeassistant_hardware/translations/mk.json +3 -0
- homeassistant/components/homeassistant_sky_connect/translations/et.json +2 -0
- homeassistant/components/homeassistant_sky_connect/translations/ga.json +8 -0
- homeassistant/components/homeassistant_sky_connect/translations/lt.json +4 -0
- homeassistant/components/homeassistant_yellow/translations/et.json +1 -0
- homeassistant/components/homeassistant_yellow/translations/ga.json +4 -0
- homeassistant/components/homeassistant_yellow/translations/lt.json +3 -0
- homeassistant/components/immich/translations/nl.json +5 -0
- homeassistant/components/lcn/translations/cs.json +0 -3
- homeassistant/components/lcn/translations/de.json +1 -1
- homeassistant/components/lcn/translations/el.json +0 -3
- homeassistant/components/lcn/translations/en-GB.json +0 -3
- homeassistant/components/lcn/translations/en.json +1 -1
- homeassistant/components/lcn/translations/es.json +1 -1
- homeassistant/components/lcn/translations/et.json +0 -3
- homeassistant/components/lcn/translations/hu.json +0 -3
- homeassistant/components/lcn/translations/ja.json +0 -3
- homeassistant/components/lcn/translations/lt.json +0 -3
- homeassistant/components/lcn/translations/pt-BR.json +0 -3
- homeassistant/components/lcn/translations/pt.json +0 -3
- homeassistant/components/lcn/translations/ru.json +0 -3
- homeassistant/components/lcn/translations/sk.json +0 -3
- homeassistant/components/lcn/translations/sv.json +0 -3
- homeassistant/components/lcn/translations/tr.json +0 -3
- homeassistant/components/lcn/translations/zh-Hans.json +0 -3
- homeassistant/components/lcn/translations/zh-Hant.json +0 -3
- homeassistant/components/lg_thinq/translations/et.json +3 -1
- homeassistant/components/lg_thinq/translations/lt.json +3 -1
- homeassistant/components/litterrobot/translations/et.json +1 -0
- homeassistant/components/litterrobot/translations/lt.json +1 -0
- homeassistant/components/litterrobot/translations/sk.json +1 -0
- homeassistant/components/lyric/translations/it.json +3 -0
- homeassistant/components/miele/translations/it.json +3 -0
- homeassistant/components/miele/translations/sk.json +1 -0
- homeassistant/components/mqtt/translations/bg.json +10 -0
- homeassistant/components/mqtt/translations/ga.json +10 -0
- homeassistant/components/mqtt/translations/it.json +7 -1
- homeassistant/components/mqtt/translations/lt.json +37 -0
- homeassistant/components/mqtt/translations/mk.json +9 -0
- homeassistant/components/nordpool/coordinator.py +3 -0
- homeassistant/components/ollama/__init__.py +30 -4
- homeassistant/components/ollama/config_flow.py +164 -135
- homeassistant/components/ollama/conversation.py +8 -243
- homeassistant/components/ollama/entity.py +261 -0
- homeassistant/components/ollama/strings.json +12 -10
- homeassistant/components/ollama/translations/bg.json +0 -1
- homeassistant/components/ollama/translations/ca.json +0 -10
- homeassistant/components/ollama/translations/cs.json +1 -10
- homeassistant/components/ollama/translations/de.json +1 -10
- homeassistant/components/ollama/translations/el.json +1 -10
- homeassistant/components/ollama/translations/en-GB.json +0 -10
- homeassistant/components/ollama/translations/en.json +12 -10
- homeassistant/components/ollama/translations/es.json +1 -10
- homeassistant/components/ollama/translations/et.json +1 -10
- homeassistant/components/ollama/translations/fi.json +0 -7
- homeassistant/components/ollama/translations/fr.json +1 -10
- homeassistant/components/ollama/translations/ga.json +0 -3
- homeassistant/components/ollama/translations/hu.json +1 -10
- homeassistant/components/ollama/translations/it.json +0 -8
- homeassistant/components/ollama/translations/ja.json +1 -10
- homeassistant/components/ollama/translations/lt.json +1 -10
- homeassistant/components/ollama/translations/pt-BR.json +0 -3
- homeassistant/components/ollama/translations/pt.json +1 -10
- homeassistant/components/ollama/translations/ru.json +1 -10
- homeassistant/components/ollama/translations/sk.json +1 -10
- homeassistant/components/ollama/translations/sv.json +1 -10
- homeassistant/components/ollama/translations/tr.json +0 -10
- homeassistant/components/ollama/translations/zh-Hans.json +1 -10
- homeassistant/components/ollama/translations/zh-Hant.json +1 -10
- homeassistant/components/opentherm_gw/translations/bg.json +3 -0
- homeassistant/components/opentherm_gw/translations/et.json +3 -0
- homeassistant/components/opentherm_gw/translations/ga.json +3 -0
- homeassistant/components/opentherm_gw/translations/lt.json +3 -0
- homeassistant/components/opentherm_gw/translations/mk.json +7 -0
- homeassistant/components/overkiz/translations/et.json +1 -0
- homeassistant/components/overkiz/translations/lt.json +1 -0
- homeassistant/components/pegel_online/translations/lt.json +2 -1
- homeassistant/components/playstation_network/translations/cs.json +0 -3
- homeassistant/components/playstation_network/translations/el.json +0 -3
- homeassistant/components/playstation_network/translations/en.json +1 -1
- homeassistant/components/playstation_network/translations/et.json +1 -1
- homeassistant/components/playstation_network/translations/ga.json +0 -3
- homeassistant/components/playstation_network/translations/hu.json +0 -3
- homeassistant/components/playstation_network/translations/it.json +0 -3
- homeassistant/components/playstation_network/translations/lt.json +29 -0
- homeassistant/components/playstation_network/translations/mk.json +0 -3
- homeassistant/components/playstation_network/translations/nl.json +0 -3
- homeassistant/components/playstation_network/translations/pl.json +0 -3
- homeassistant/components/playstation_network/translations/pt.json +0 -3
- homeassistant/components/playstation_network/translations/sk.json +1 -1
- homeassistant/components/playstation_network/translations/sv.json +0 -3
- homeassistant/components/playstation_network/translations/zh-Hans.json +0 -3
- homeassistant/components/playstation_network/translations/zh-Hant.json +1 -1
- homeassistant/components/proximity/translations/lt.json +25 -1
- homeassistant/components/proximity/translations/nl.json +3 -0
- homeassistant/components/reolink/translations/it.json +3 -0
- homeassistant/components/russound_rio/translations/nl.json +7 -0
- homeassistant/components/scrape/translations/it.json +1 -0
- homeassistant/components/sensibo/translations/pt-BR.json +4 -1
- homeassistant/components/sensibo/translations/sv.json +3 -3
- homeassistant/components/sensor/translations/it.json +1 -0
- homeassistant/components/shelly/__init__.py +8 -1
- homeassistant/components/shelly/const.py +3 -0
- homeassistant/components/shelly/repairs.py +87 -4
- homeassistant/components/shelly/strings.json +14 -0
- homeassistant/components/shelly/translations/bg.json +7 -0
- homeassistant/components/shelly/translations/de.json +14 -0
- homeassistant/components/shelly/translations/en.json +14 -0
- homeassistant/components/shelly/translations/es.json +14 -0
- homeassistant/components/shelly/translations/et.json +11 -0
- homeassistant/components/sma/config_flow.py +30 -1
- homeassistant/components/smarla/translations/it.json +9 -0
- homeassistant/components/sonos/translations/lt.json +9 -0
- homeassistant/components/sql/translations/it.json +1 -0
- homeassistant/components/squeezebox/translations/it.json +5 -0
- homeassistant/components/subaru/translations/lt.json +2 -0
- homeassistant/components/subaru/translations/mk.json +12 -0
- homeassistant/components/switchbot/translations/et.json +1 -0
- homeassistant/components/switchbot/translations/ga.json +13 -0
- homeassistant/components/switchbot/translations/lt.json +54 -0
- homeassistant/components/switchbot/translations/nl.json +1 -0
- homeassistant/components/switchbot/translations/sk.json +1 -0
- homeassistant/components/switchbot/translations/zh-Hant.json +1 -0
- homeassistant/components/telegram_bot/translations/bg.json +10 -0
- homeassistant/components/telegram_bot/translations/ga.json +28 -0
- homeassistant/components/telegram_bot/translations/it.json +12 -0
- homeassistant/components/telegram_bot/translations/lt.json +30 -0
- homeassistant/components/telegram_bot/translations/mk.json +5 -0
- homeassistant/components/template/translations/it.json +1 -0
- homeassistant/components/tesla_fleet/translations/et.json +3 -0
- homeassistant/components/tesla_fleet/translations/lt.json +3 -0
- homeassistant/components/tesla_fleet/translations/mk.json +3 -0
- homeassistant/components/tesla_fleet/translations/nl.json +3 -0
- homeassistant/components/thermopro/manifest.json +1 -1
- homeassistant/components/thermopro/translations/lt.json +7 -0
- homeassistant/components/tuya/translations/it.json +13 -0
- homeassistant/components/unifiprotect/config_flow.py +1 -1
- homeassistant/components/uptimerobot/translations/it.json +5 -0
- homeassistant/components/weatherflow_cloud/translations/ga.json +1 -0
- homeassistant/components/webdav/translations/et.json +1 -0
- homeassistant/components/webdav/translations/ga.json +1 -0
- homeassistant/components/webdav/translations/lt.json +1 -0
- homeassistant/components/webdav/translations/mk.json +7 -0
- homeassistant/components/whirlpool/translations/et.json +1 -0
- homeassistant/components/whirlpool/translations/lt.json +1 -0
- homeassistant/components/withings/translations/it.json +3 -0
- homeassistant/components/zha/translations/it.json +10 -2
- homeassistant/components/zha/translations/lt.json +50 -0
- homeassistant/components/zwave_js/strings.json +11 -11
- homeassistant/components/zwave_js/translations/bg.json +6 -20
- homeassistant/components/zwave_js/translations/cs.json +0 -22
- homeassistant/components/zwave_js/translations/de.json +0 -23
- homeassistant/components/zwave_js/translations/el.json +0 -23
- homeassistant/components/zwave_js/translations/en-GB.json +0 -23
- homeassistant/components/zwave_js/translations/en.json +11 -11
- homeassistant/components/zwave_js/translations/es.json +0 -23
- homeassistant/components/zwave_js/translations/et.json +0 -23
- homeassistant/components/zwave_js/translations/fi.json +0 -11
- homeassistant/components/zwave_js/translations/ga.json +16 -22
- homeassistant/components/zwave_js/translations/hu.json +0 -23
- homeassistant/components/zwave_js/translations/it.json +0 -12
- homeassistant/components/zwave_js/translations/lt.json +32 -22
- homeassistant/components/zwave_js/translations/mk.json +16 -7
- homeassistant/components/zwave_js/translations/pt.json +0 -23
- homeassistant/components/zwave_js/translations/ru.json +0 -21
- homeassistant/components/zwave_js/translations/sk.json +0 -23
- homeassistant/components/zwave_js/translations/sv.json +0 -23
- homeassistant/components/zwave_js/translations/tr.json +0 -13
- homeassistant/components/zwave_js/translations/zh-Hans.json +0 -23
- homeassistant/components/zwave_js/translations/zh-Hant.json +0 -23
- homeassistant/const.py +1 -1
- homeassistant/package_constraints.txt +1 -1
- {homeassistant-2025.7.0b8.dist-info → homeassistant-2025.7.0b9.dist-info}/METADATA +1 -1
- {homeassistant-2025.7.0b8.dist-info → homeassistant-2025.7.0b9.dist-info}/RECORD +206 -201
- homeassistant/components/zwave_js/translations/sq.json +0 -7
- {homeassistant-2025.7.0b8.dist-info → homeassistant-2025.7.0b9.dist-info}/WHEEL +0 -0
- {homeassistant-2025.7.0b8.dist-info → homeassistant-2025.7.0b9.dist-info}/entry_points.txt +0 -0
- {homeassistant-2025.7.0b8.dist-info → homeassistant-2025.7.0b9.dist-info}/licenses/LICENSE.md +0 -0
- {homeassistant-2025.7.0b8.dist-info → homeassistant-2025.7.0b9.dist-info}/licenses/homeassistant/backports/LICENSE.Python +0 -0
- {homeassistant-2025.7.0b8.dist-info → homeassistant-2025.7.0b9.dist-info}/top_level.txt +0 -0
|
@@ -2,41 +2,18 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
from
|
|
6
|
-
import json
|
|
7
|
-
import logging
|
|
8
|
-
from typing import Any, Literal
|
|
9
|
-
|
|
10
|
-
import ollama
|
|
11
|
-
from voluptuous_openapi import convert
|
|
5
|
+
from typing import Literal
|
|
12
6
|
|
|
13
7
|
from homeassistant.components import assist_pipeline, conversation
|
|
14
8
|
from homeassistant.config_entries import ConfigEntry, ConfigSubentry
|
|
15
9
|
from homeassistant.const import CONF_LLM_HASS_API, MATCH_ALL
|
|
16
10
|
from homeassistant.core import HomeAssistant
|
|
17
|
-
from homeassistant.
|
|
18
|
-
from homeassistant.helpers import device_registry as dr, intent, llm
|
|
11
|
+
from homeassistant.helpers import intent
|
|
19
12
|
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
|
|
20
13
|
|
|
21
14
|
from . import OllamaConfigEntry
|
|
22
|
-
from .const import
|
|
23
|
-
|
|
24
|
-
CONF_MAX_HISTORY,
|
|
25
|
-
CONF_MODEL,
|
|
26
|
-
CONF_NUM_CTX,
|
|
27
|
-
CONF_PROMPT,
|
|
28
|
-
CONF_THINK,
|
|
29
|
-
DEFAULT_KEEP_ALIVE,
|
|
30
|
-
DEFAULT_MAX_HISTORY,
|
|
31
|
-
DEFAULT_NUM_CTX,
|
|
32
|
-
DOMAIN,
|
|
33
|
-
)
|
|
34
|
-
from .models import MessageHistory, MessageRole
|
|
35
|
-
|
|
36
|
-
# Max number of back and forth with the LLM to generate a response
|
|
37
|
-
MAX_TOOL_ITERATIONS = 10
|
|
38
|
-
|
|
39
|
-
_LOGGER = logging.getLogger(__name__)
|
|
15
|
+
from .const import CONF_PROMPT, DOMAIN
|
|
16
|
+
from .entity import OllamaBaseLLMEntity
|
|
40
17
|
|
|
41
18
|
|
|
42
19
|
async def async_setup_entry(
|
|
@@ -55,129 +32,10 @@ async def async_setup_entry(
|
|
|
55
32
|
)
|
|
56
33
|
|
|
57
34
|
|
|
58
|
-
def _format_tool(
|
|
59
|
-
tool: llm.Tool, custom_serializer: Callable[[Any], Any] | None
|
|
60
|
-
) -> dict[str, Any]:
|
|
61
|
-
"""Format tool specification."""
|
|
62
|
-
tool_spec = {
|
|
63
|
-
"name": tool.name,
|
|
64
|
-
"parameters": convert(tool.parameters, custom_serializer=custom_serializer),
|
|
65
|
-
}
|
|
66
|
-
if tool.description:
|
|
67
|
-
tool_spec["description"] = tool.description
|
|
68
|
-
return {"type": "function", "function": tool_spec}
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
def _fix_invalid_arguments(value: Any) -> Any:
|
|
72
|
-
"""Attempt to repair incorrectly formatted json function arguments.
|
|
73
|
-
|
|
74
|
-
Small models (for example llama3.1 8B) may produce invalid argument values
|
|
75
|
-
which we attempt to repair here.
|
|
76
|
-
"""
|
|
77
|
-
if not isinstance(value, str):
|
|
78
|
-
return value
|
|
79
|
-
if (value.startswith("[") and value.endswith("]")) or (
|
|
80
|
-
value.startswith("{") and value.endswith("}")
|
|
81
|
-
):
|
|
82
|
-
try:
|
|
83
|
-
return json.loads(value)
|
|
84
|
-
except json.decoder.JSONDecodeError:
|
|
85
|
-
pass
|
|
86
|
-
return value
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
def _parse_tool_args(arguments: dict[str, Any]) -> dict[str, Any]:
|
|
90
|
-
"""Rewrite ollama tool arguments.
|
|
91
|
-
|
|
92
|
-
This function improves tool use quality by fixing common mistakes made by
|
|
93
|
-
small local tool use models. This will repair invalid json arguments and
|
|
94
|
-
omit unnecessary arguments with empty values that will fail intent parsing.
|
|
95
|
-
"""
|
|
96
|
-
return {k: _fix_invalid_arguments(v) for k, v in arguments.items() if v}
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
def _convert_content(
|
|
100
|
-
chat_content: (
|
|
101
|
-
conversation.Content
|
|
102
|
-
| conversation.ToolResultContent
|
|
103
|
-
| conversation.AssistantContent
|
|
104
|
-
),
|
|
105
|
-
) -> ollama.Message:
|
|
106
|
-
"""Create tool response content."""
|
|
107
|
-
if isinstance(chat_content, conversation.ToolResultContent):
|
|
108
|
-
return ollama.Message(
|
|
109
|
-
role=MessageRole.TOOL.value,
|
|
110
|
-
content=json.dumps(chat_content.tool_result),
|
|
111
|
-
)
|
|
112
|
-
if isinstance(chat_content, conversation.AssistantContent):
|
|
113
|
-
return ollama.Message(
|
|
114
|
-
role=MessageRole.ASSISTANT.value,
|
|
115
|
-
content=chat_content.content,
|
|
116
|
-
tool_calls=[
|
|
117
|
-
ollama.Message.ToolCall(
|
|
118
|
-
function=ollama.Message.ToolCall.Function(
|
|
119
|
-
name=tool_call.tool_name,
|
|
120
|
-
arguments=tool_call.tool_args,
|
|
121
|
-
)
|
|
122
|
-
)
|
|
123
|
-
for tool_call in chat_content.tool_calls or ()
|
|
124
|
-
],
|
|
125
|
-
)
|
|
126
|
-
if isinstance(chat_content, conversation.UserContent):
|
|
127
|
-
return ollama.Message(
|
|
128
|
-
role=MessageRole.USER.value,
|
|
129
|
-
content=chat_content.content,
|
|
130
|
-
)
|
|
131
|
-
if isinstance(chat_content, conversation.SystemContent):
|
|
132
|
-
return ollama.Message(
|
|
133
|
-
role=MessageRole.SYSTEM.value,
|
|
134
|
-
content=chat_content.content,
|
|
135
|
-
)
|
|
136
|
-
raise TypeError(f"Unexpected content type: {type(chat_content)}")
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
async def _transform_stream(
|
|
140
|
-
result: AsyncIterator[ollama.ChatResponse],
|
|
141
|
-
) -> AsyncGenerator[conversation.AssistantContentDeltaDict]:
|
|
142
|
-
"""Transform the response stream into HA format.
|
|
143
|
-
|
|
144
|
-
An Ollama streaming response may come in chunks like this:
|
|
145
|
-
|
|
146
|
-
response: message=Message(role="assistant", content="Paris")
|
|
147
|
-
response: message=Message(role="assistant", content=".")
|
|
148
|
-
response: message=Message(role="assistant", content=""), done: True, done_reason: "stop"
|
|
149
|
-
response: message=Message(role="assistant", tool_calls=[...])
|
|
150
|
-
response: message=Message(role="assistant", content=""), done: True, done_reason: "stop"
|
|
151
|
-
|
|
152
|
-
This generator conforms to the chatlog delta stream expectations in that it
|
|
153
|
-
yields deltas, then the role only once the response is done.
|
|
154
|
-
"""
|
|
155
|
-
|
|
156
|
-
new_msg = True
|
|
157
|
-
async for response in result:
|
|
158
|
-
_LOGGER.debug("Received response: %s", response)
|
|
159
|
-
response_message = response["message"]
|
|
160
|
-
chunk: conversation.AssistantContentDeltaDict = {}
|
|
161
|
-
if new_msg:
|
|
162
|
-
new_msg = False
|
|
163
|
-
chunk["role"] = "assistant"
|
|
164
|
-
if (tool_calls := response_message.get("tool_calls")) is not None:
|
|
165
|
-
chunk["tool_calls"] = [
|
|
166
|
-
llm.ToolInput(
|
|
167
|
-
tool_name=tool_call["function"]["name"],
|
|
168
|
-
tool_args=_parse_tool_args(tool_call["function"]["arguments"]),
|
|
169
|
-
)
|
|
170
|
-
for tool_call in tool_calls
|
|
171
|
-
]
|
|
172
|
-
if (content := response_message.get("content")) is not None:
|
|
173
|
-
chunk["content"] = content
|
|
174
|
-
if response_message.get("done"):
|
|
175
|
-
new_msg = True
|
|
176
|
-
yield chunk
|
|
177
|
-
|
|
178
|
-
|
|
179
35
|
class OllamaConversationEntity(
|
|
180
|
-
conversation.ConversationEntity,
|
|
36
|
+
conversation.ConversationEntity,
|
|
37
|
+
conversation.AbstractConversationAgent,
|
|
38
|
+
OllamaBaseLLMEntity,
|
|
181
39
|
):
|
|
182
40
|
"""Ollama conversation agent."""
|
|
183
41
|
|
|
@@ -185,17 +43,7 @@ class OllamaConversationEntity(
|
|
|
185
43
|
|
|
186
44
|
def __init__(self, entry: OllamaConfigEntry, subentry: ConfigSubentry) -> None:
|
|
187
45
|
"""Initialize the agent."""
|
|
188
|
-
|
|
189
|
-
self.subentry = subentry
|
|
190
|
-
self._attr_name = subentry.title
|
|
191
|
-
self._attr_unique_id = subentry.subentry_id
|
|
192
|
-
self._attr_device_info = dr.DeviceInfo(
|
|
193
|
-
identifiers={(DOMAIN, subentry.subentry_id)},
|
|
194
|
-
name=subentry.title,
|
|
195
|
-
manufacturer="Ollama",
|
|
196
|
-
model=entry.data[CONF_MODEL],
|
|
197
|
-
entry_type=dr.DeviceEntryType.SERVICE,
|
|
198
|
-
)
|
|
46
|
+
super().__init__(entry, subentry)
|
|
199
47
|
if self.subentry.data.get(CONF_LLM_HASS_API):
|
|
200
48
|
self._attr_supported_features = (
|
|
201
49
|
conversation.ConversationEntityFeature.CONTROL
|
|
@@ -255,89 +103,6 @@ class OllamaConversationEntity(
|
|
|
255
103
|
continue_conversation=chat_log.continue_conversation,
|
|
256
104
|
)
|
|
257
105
|
|
|
258
|
-
async def _async_handle_chat_log(
|
|
259
|
-
self,
|
|
260
|
-
chat_log: conversation.ChatLog,
|
|
261
|
-
) -> None:
|
|
262
|
-
"""Generate an answer for the chat log."""
|
|
263
|
-
settings = {**self.entry.data, **self.subentry.data}
|
|
264
|
-
|
|
265
|
-
client = self.entry.runtime_data
|
|
266
|
-
model = settings[CONF_MODEL]
|
|
267
|
-
|
|
268
|
-
tools: list[dict[str, Any]] | None = None
|
|
269
|
-
if chat_log.llm_api:
|
|
270
|
-
tools = [
|
|
271
|
-
_format_tool(tool, chat_log.llm_api.custom_serializer)
|
|
272
|
-
for tool in chat_log.llm_api.tools
|
|
273
|
-
]
|
|
274
|
-
|
|
275
|
-
message_history: MessageHistory = MessageHistory(
|
|
276
|
-
[_convert_content(content) for content in chat_log.content]
|
|
277
|
-
)
|
|
278
|
-
max_messages = int(settings.get(CONF_MAX_HISTORY, DEFAULT_MAX_HISTORY))
|
|
279
|
-
self._trim_history(message_history, max_messages)
|
|
280
|
-
|
|
281
|
-
# Get response
|
|
282
|
-
# To prevent infinite loops, we limit the number of iterations
|
|
283
|
-
for _iteration in range(MAX_TOOL_ITERATIONS):
|
|
284
|
-
try:
|
|
285
|
-
response_generator = await client.chat(
|
|
286
|
-
model=model,
|
|
287
|
-
# Make a copy of the messages because we mutate the list later
|
|
288
|
-
messages=list(message_history.messages),
|
|
289
|
-
tools=tools,
|
|
290
|
-
stream=True,
|
|
291
|
-
# keep_alive requires specifying unit. In this case, seconds
|
|
292
|
-
keep_alive=f"{settings.get(CONF_KEEP_ALIVE, DEFAULT_KEEP_ALIVE)}s",
|
|
293
|
-
options={CONF_NUM_CTX: settings.get(CONF_NUM_CTX, DEFAULT_NUM_CTX)},
|
|
294
|
-
think=settings.get(CONF_THINK),
|
|
295
|
-
)
|
|
296
|
-
except (ollama.RequestError, ollama.ResponseError) as err:
|
|
297
|
-
_LOGGER.error("Unexpected error talking to Ollama server: %s", err)
|
|
298
|
-
raise HomeAssistantError(
|
|
299
|
-
f"Sorry, I had a problem talking to the Ollama server: {err}"
|
|
300
|
-
) from err
|
|
301
|
-
|
|
302
|
-
message_history.messages.extend(
|
|
303
|
-
[
|
|
304
|
-
_convert_content(content)
|
|
305
|
-
async for content in chat_log.async_add_delta_content_stream(
|
|
306
|
-
self.entity_id, _transform_stream(response_generator)
|
|
307
|
-
)
|
|
308
|
-
]
|
|
309
|
-
)
|
|
310
|
-
|
|
311
|
-
if not chat_log.unresponded_tool_results:
|
|
312
|
-
break
|
|
313
|
-
|
|
314
|
-
def _trim_history(self, message_history: MessageHistory, max_messages: int) -> None:
|
|
315
|
-
"""Trims excess messages from a single history.
|
|
316
|
-
|
|
317
|
-
This sets the max history to allow a configurable size history may take
|
|
318
|
-
up in the context window.
|
|
319
|
-
|
|
320
|
-
Note that some messages in the history may not be from ollama only, and
|
|
321
|
-
may come from other anents, so the assumptions here may not strictly hold,
|
|
322
|
-
but generally should be effective.
|
|
323
|
-
"""
|
|
324
|
-
if max_messages < 1:
|
|
325
|
-
# Keep all messages
|
|
326
|
-
return
|
|
327
|
-
|
|
328
|
-
# Ignore the in progress user message
|
|
329
|
-
num_previous_rounds = message_history.num_user_messages - 1
|
|
330
|
-
if num_previous_rounds >= max_messages:
|
|
331
|
-
# Trim history but keep system prompt (first message).
|
|
332
|
-
# Every other message should be an assistant message, so keep 2x
|
|
333
|
-
# message objects. Also keep the last in progress user message
|
|
334
|
-
num_keep = 2 * max_messages + 1
|
|
335
|
-
drop_index = len(message_history.messages) - num_keep
|
|
336
|
-
message_history.messages = [
|
|
337
|
-
message_history.messages[0],
|
|
338
|
-
*message_history.messages[drop_index:],
|
|
339
|
-
]
|
|
340
|
-
|
|
341
106
|
async def _async_entry_update_listener(
|
|
342
107
|
self, hass: HomeAssistant, entry: ConfigEntry
|
|
343
108
|
) -> None:
|
|
@@ -0,0 +1,261 @@
|
|
|
1
|
+
"""Base entity for the Ollama integration."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from collections.abc import AsyncGenerator, AsyncIterator, Callable
|
|
6
|
+
import json
|
|
7
|
+
import logging
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
import ollama
|
|
11
|
+
from voluptuous_openapi import convert
|
|
12
|
+
|
|
13
|
+
from homeassistant.components import conversation
|
|
14
|
+
from homeassistant.config_entries import ConfigSubentry
|
|
15
|
+
from homeassistant.exceptions import HomeAssistantError
|
|
16
|
+
from homeassistant.helpers import device_registry as dr, llm
|
|
17
|
+
from homeassistant.helpers.entity import Entity
|
|
18
|
+
|
|
19
|
+
from . import OllamaConfigEntry
|
|
20
|
+
from .const import (
|
|
21
|
+
CONF_KEEP_ALIVE,
|
|
22
|
+
CONF_MAX_HISTORY,
|
|
23
|
+
CONF_MODEL,
|
|
24
|
+
CONF_NUM_CTX,
|
|
25
|
+
CONF_THINK,
|
|
26
|
+
DEFAULT_KEEP_ALIVE,
|
|
27
|
+
DEFAULT_MAX_HISTORY,
|
|
28
|
+
DEFAULT_NUM_CTX,
|
|
29
|
+
DOMAIN,
|
|
30
|
+
)
|
|
31
|
+
from .models import MessageHistory, MessageRole
|
|
32
|
+
|
|
33
|
+
# Max number of back and forth with the LLM to generate a response
|
|
34
|
+
MAX_TOOL_ITERATIONS = 10
|
|
35
|
+
|
|
36
|
+
_LOGGER = logging.getLogger(__name__)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def _format_tool(
|
|
40
|
+
tool: llm.Tool, custom_serializer: Callable[[Any], Any] | None
|
|
41
|
+
) -> dict[str, Any]:
|
|
42
|
+
"""Format tool specification."""
|
|
43
|
+
tool_spec = {
|
|
44
|
+
"name": tool.name,
|
|
45
|
+
"parameters": convert(tool.parameters, custom_serializer=custom_serializer),
|
|
46
|
+
}
|
|
47
|
+
if tool.description:
|
|
48
|
+
tool_spec["description"] = tool.description
|
|
49
|
+
return {"type": "function", "function": tool_spec}
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def _fix_invalid_arguments(value: Any) -> Any:
|
|
53
|
+
"""Attempt to repair incorrectly formatted json function arguments.
|
|
54
|
+
|
|
55
|
+
Small models (for example llama3.1 8B) may produce invalid argument values
|
|
56
|
+
which we attempt to repair here.
|
|
57
|
+
"""
|
|
58
|
+
if not isinstance(value, str):
|
|
59
|
+
return value
|
|
60
|
+
if (value.startswith("[") and value.endswith("]")) or (
|
|
61
|
+
value.startswith("{") and value.endswith("}")
|
|
62
|
+
):
|
|
63
|
+
try:
|
|
64
|
+
return json.loads(value)
|
|
65
|
+
except json.decoder.JSONDecodeError:
|
|
66
|
+
pass
|
|
67
|
+
return value
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def _parse_tool_args(arguments: dict[str, Any]) -> dict[str, Any]:
|
|
71
|
+
"""Rewrite ollama tool arguments.
|
|
72
|
+
|
|
73
|
+
This function improves tool use quality by fixing common mistakes made by
|
|
74
|
+
small local tool use models. This will repair invalid json arguments and
|
|
75
|
+
omit unnecessary arguments with empty values that will fail intent parsing.
|
|
76
|
+
"""
|
|
77
|
+
return {k: _fix_invalid_arguments(v) for k, v in arguments.items() if v}
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def _convert_content(
|
|
81
|
+
chat_content: (
|
|
82
|
+
conversation.Content
|
|
83
|
+
| conversation.ToolResultContent
|
|
84
|
+
| conversation.AssistantContent
|
|
85
|
+
),
|
|
86
|
+
) -> ollama.Message:
|
|
87
|
+
"""Create tool response content."""
|
|
88
|
+
if isinstance(chat_content, conversation.ToolResultContent):
|
|
89
|
+
return ollama.Message(
|
|
90
|
+
role=MessageRole.TOOL.value,
|
|
91
|
+
content=json.dumps(chat_content.tool_result),
|
|
92
|
+
)
|
|
93
|
+
if isinstance(chat_content, conversation.AssistantContent):
|
|
94
|
+
return ollama.Message(
|
|
95
|
+
role=MessageRole.ASSISTANT.value,
|
|
96
|
+
content=chat_content.content,
|
|
97
|
+
tool_calls=[
|
|
98
|
+
ollama.Message.ToolCall(
|
|
99
|
+
function=ollama.Message.ToolCall.Function(
|
|
100
|
+
name=tool_call.tool_name,
|
|
101
|
+
arguments=tool_call.tool_args,
|
|
102
|
+
)
|
|
103
|
+
)
|
|
104
|
+
for tool_call in chat_content.tool_calls or ()
|
|
105
|
+
],
|
|
106
|
+
)
|
|
107
|
+
if isinstance(chat_content, conversation.UserContent):
|
|
108
|
+
return ollama.Message(
|
|
109
|
+
role=MessageRole.USER.value,
|
|
110
|
+
content=chat_content.content,
|
|
111
|
+
)
|
|
112
|
+
if isinstance(chat_content, conversation.SystemContent):
|
|
113
|
+
return ollama.Message(
|
|
114
|
+
role=MessageRole.SYSTEM.value,
|
|
115
|
+
content=chat_content.content,
|
|
116
|
+
)
|
|
117
|
+
raise TypeError(f"Unexpected content type: {type(chat_content)}")
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
async def _transform_stream(
|
|
121
|
+
result: AsyncIterator[ollama.ChatResponse],
|
|
122
|
+
) -> AsyncGenerator[conversation.AssistantContentDeltaDict]:
|
|
123
|
+
"""Transform the response stream into HA format.
|
|
124
|
+
|
|
125
|
+
An Ollama streaming response may come in chunks like this:
|
|
126
|
+
|
|
127
|
+
response: message=Message(role="assistant", content="Paris")
|
|
128
|
+
response: message=Message(role="assistant", content=".")
|
|
129
|
+
response: message=Message(role="assistant", content=""), done: True, done_reason: "stop"
|
|
130
|
+
response: message=Message(role="assistant", tool_calls=[...])
|
|
131
|
+
response: message=Message(role="assistant", content=""), done: True, done_reason: "stop"
|
|
132
|
+
|
|
133
|
+
This generator conforms to the chatlog delta stream expectations in that it
|
|
134
|
+
yields deltas, then the role only once the response is done.
|
|
135
|
+
"""
|
|
136
|
+
|
|
137
|
+
new_msg = True
|
|
138
|
+
async for response in result:
|
|
139
|
+
_LOGGER.debug("Received response: %s", response)
|
|
140
|
+
response_message = response["message"]
|
|
141
|
+
chunk: conversation.AssistantContentDeltaDict = {}
|
|
142
|
+
if new_msg:
|
|
143
|
+
new_msg = False
|
|
144
|
+
chunk["role"] = "assistant"
|
|
145
|
+
if (tool_calls := response_message.get("tool_calls")) is not None:
|
|
146
|
+
chunk["tool_calls"] = [
|
|
147
|
+
llm.ToolInput(
|
|
148
|
+
tool_name=tool_call["function"]["name"],
|
|
149
|
+
tool_args=_parse_tool_args(tool_call["function"]["arguments"]),
|
|
150
|
+
)
|
|
151
|
+
for tool_call in tool_calls
|
|
152
|
+
]
|
|
153
|
+
if (content := response_message.get("content")) is not None:
|
|
154
|
+
chunk["content"] = content
|
|
155
|
+
if response_message.get("done"):
|
|
156
|
+
new_msg = True
|
|
157
|
+
yield chunk
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
class OllamaBaseLLMEntity(Entity):
|
|
161
|
+
"""Ollama base LLM entity."""
|
|
162
|
+
|
|
163
|
+
def __init__(self, entry: OllamaConfigEntry, subentry: ConfigSubentry) -> None:
|
|
164
|
+
"""Initialize the entity."""
|
|
165
|
+
self.entry = entry
|
|
166
|
+
self.subentry = subentry
|
|
167
|
+
self._attr_name = subentry.title
|
|
168
|
+
self._attr_unique_id = subentry.subentry_id
|
|
169
|
+
|
|
170
|
+
model, _, version = subentry.data[CONF_MODEL].partition(":")
|
|
171
|
+
self._attr_device_info = dr.DeviceInfo(
|
|
172
|
+
identifiers={(DOMAIN, subentry.subentry_id)},
|
|
173
|
+
name=subentry.title,
|
|
174
|
+
manufacturer="Ollama",
|
|
175
|
+
model=model,
|
|
176
|
+
sw_version=version or "latest",
|
|
177
|
+
entry_type=dr.DeviceEntryType.SERVICE,
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
async def _async_handle_chat_log(
|
|
181
|
+
self,
|
|
182
|
+
chat_log: conversation.ChatLog,
|
|
183
|
+
) -> None:
|
|
184
|
+
"""Generate an answer for the chat log."""
|
|
185
|
+
settings = {**self.entry.data, **self.subentry.data}
|
|
186
|
+
|
|
187
|
+
client = self.entry.runtime_data
|
|
188
|
+
model = settings[CONF_MODEL]
|
|
189
|
+
|
|
190
|
+
tools: list[dict[str, Any]] | None = None
|
|
191
|
+
if chat_log.llm_api:
|
|
192
|
+
tools = [
|
|
193
|
+
_format_tool(tool, chat_log.llm_api.custom_serializer)
|
|
194
|
+
for tool in chat_log.llm_api.tools
|
|
195
|
+
]
|
|
196
|
+
|
|
197
|
+
message_history: MessageHistory = MessageHistory(
|
|
198
|
+
[_convert_content(content) for content in chat_log.content]
|
|
199
|
+
)
|
|
200
|
+
max_messages = int(settings.get(CONF_MAX_HISTORY, DEFAULT_MAX_HISTORY))
|
|
201
|
+
self._trim_history(message_history, max_messages)
|
|
202
|
+
|
|
203
|
+
# Get response
|
|
204
|
+
# To prevent infinite loops, we limit the number of iterations
|
|
205
|
+
for _iteration in range(MAX_TOOL_ITERATIONS):
|
|
206
|
+
try:
|
|
207
|
+
response_generator = await client.chat(
|
|
208
|
+
model=model,
|
|
209
|
+
# Make a copy of the messages because we mutate the list later
|
|
210
|
+
messages=list(message_history.messages),
|
|
211
|
+
tools=tools,
|
|
212
|
+
stream=True,
|
|
213
|
+
# keep_alive requires specifying unit. In this case, seconds
|
|
214
|
+
keep_alive=f"{settings.get(CONF_KEEP_ALIVE, DEFAULT_KEEP_ALIVE)}s",
|
|
215
|
+
options={CONF_NUM_CTX: settings.get(CONF_NUM_CTX, DEFAULT_NUM_CTX)},
|
|
216
|
+
think=settings.get(CONF_THINK),
|
|
217
|
+
)
|
|
218
|
+
except (ollama.RequestError, ollama.ResponseError) as err:
|
|
219
|
+
_LOGGER.error("Unexpected error talking to Ollama server: %s", err)
|
|
220
|
+
raise HomeAssistantError(
|
|
221
|
+
f"Sorry, I had a problem talking to the Ollama server: {err}"
|
|
222
|
+
) from err
|
|
223
|
+
|
|
224
|
+
message_history.messages.extend(
|
|
225
|
+
[
|
|
226
|
+
_convert_content(content)
|
|
227
|
+
async for content in chat_log.async_add_delta_content_stream(
|
|
228
|
+
self.entity_id, _transform_stream(response_generator)
|
|
229
|
+
)
|
|
230
|
+
]
|
|
231
|
+
)
|
|
232
|
+
|
|
233
|
+
if not chat_log.unresponded_tool_results:
|
|
234
|
+
break
|
|
235
|
+
|
|
236
|
+
def _trim_history(self, message_history: MessageHistory, max_messages: int) -> None:
|
|
237
|
+
"""Trims excess messages from a single history.
|
|
238
|
+
|
|
239
|
+
This sets the max history to allow a configurable size history may take
|
|
240
|
+
up in the context window.
|
|
241
|
+
|
|
242
|
+
Note that some messages in the history may not be from ollama only, and
|
|
243
|
+
may come from other anents, so the assumptions here may not strictly hold,
|
|
244
|
+
but generally should be effective.
|
|
245
|
+
"""
|
|
246
|
+
if max_messages < 1:
|
|
247
|
+
# Keep all messages
|
|
248
|
+
return
|
|
249
|
+
|
|
250
|
+
# Ignore the in progress user message
|
|
251
|
+
num_previous_rounds = message_history.num_user_messages - 1
|
|
252
|
+
if num_previous_rounds >= max_messages:
|
|
253
|
+
# Trim history but keep system prompt (first message).
|
|
254
|
+
# Every other message should be an assistant message, so keep 2x
|
|
255
|
+
# message objects. Also keep the last in progress user message
|
|
256
|
+
num_keep = 2 * max_messages + 1
|
|
257
|
+
drop_index = len(message_history.messages) - num_keep
|
|
258
|
+
message_history.messages = [
|
|
259
|
+
message_history.messages[0],
|
|
260
|
+
*message_history.messages[drop_index:],
|
|
261
|
+
]
|
|
@@ -3,24 +3,17 @@
|
|
|
3
3
|
"step": {
|
|
4
4
|
"user": {
|
|
5
5
|
"data": {
|
|
6
|
-
"url": "[%key:common::config_flow::data::url%]"
|
|
7
|
-
"model": "Model"
|
|
6
|
+
"url": "[%key:common::config_flow::data::url%]"
|
|
8
7
|
}
|
|
9
|
-
},
|
|
10
|
-
"download": {
|
|
11
|
-
"title": "Downloading model"
|
|
12
8
|
}
|
|
13
9
|
},
|
|
14
10
|
"abort": {
|
|
15
|
-
"download_failed": "Model downloading failed",
|
|
16
11
|
"already_configured": "[%key:common::config_flow::abort::already_configured_service%]"
|
|
17
12
|
},
|
|
18
13
|
"error": {
|
|
14
|
+
"invalid_url": "[%key:common::config_flow::error::invalid_host%]",
|
|
19
15
|
"cannot_connect": "[%key:common::config_flow::error::cannot_connect%]",
|
|
20
16
|
"unknown": "[%key:common::config_flow::error::unknown%]"
|
|
21
|
-
},
|
|
22
|
-
"progress": {
|
|
23
|
-
"download": "Please wait while the model is downloaded, which may take a very long time. Check your Ollama server logs for more details."
|
|
24
17
|
}
|
|
25
18
|
},
|
|
26
19
|
"config_subentries": {
|
|
@@ -33,6 +26,7 @@
|
|
|
33
26
|
"step": {
|
|
34
27
|
"set_options": {
|
|
35
28
|
"data": {
|
|
29
|
+
"model": "Model",
|
|
36
30
|
"name": "[%key:common::config_flow::data::name%]",
|
|
37
31
|
"prompt": "Instructions",
|
|
38
32
|
"llm_hass_api": "[%key:common::config_flow::data::llm_hass_api%]",
|
|
@@ -47,11 +41,19 @@
|
|
|
47
41
|
"num_ctx": "Maximum number of text tokens the model can process. Lower to reduce Ollama RAM, or increase for a large number of exposed entities.",
|
|
48
42
|
"think": "If enabled, the LLM will think before responding. This can improve response quality but may increase latency."
|
|
49
43
|
}
|
|
44
|
+
},
|
|
45
|
+
"download": {
|
|
46
|
+
"title": "Downloading model"
|
|
50
47
|
}
|
|
51
48
|
},
|
|
52
49
|
"abort": {
|
|
53
50
|
"reconfigure_successful": "[%key:common::config_flow::abort::reconfigure_successful%]",
|
|
54
|
-
"entry_not_loaded": "
|
|
51
|
+
"entry_not_loaded": "Failed to add agent. The configuration is disabled.",
|
|
52
|
+
"download_failed": "Model downloading failed",
|
|
53
|
+
"cannot_connect": "[%key:common::config_flow::error::cannot_connect%]"
|
|
54
|
+
},
|
|
55
|
+
"progress": {
|
|
56
|
+
"download": "Please wait while the model is downloaded, which may take a very long time. Check your Ollama server logs for more details."
|
|
55
57
|
}
|
|
56
58
|
}
|
|
57
59
|
}
|
|
@@ -1,22 +1,12 @@
|
|
|
1
1
|
{
|
|
2
2
|
"config": {
|
|
3
|
-
"abort": {
|
|
4
|
-
"download_failed": "Ha fallat la baixada del model"
|
|
5
|
-
},
|
|
6
3
|
"error": {
|
|
7
4
|
"cannot_connect": "Ha fallat la connexi\u00f3",
|
|
8
5
|
"unknown": "Error inesperat"
|
|
9
6
|
},
|
|
10
|
-
"progress": {
|
|
11
|
-
"download": "Espera mentre es baixa el model, aix\u00f2 pot trigar molt temps. Consulta els registres del teu servidor Ollama per a m\u00e9s detalls."
|
|
12
|
-
},
|
|
13
7
|
"step": {
|
|
14
|
-
"download": {
|
|
15
|
-
"title": "Baixant model"
|
|
16
|
-
},
|
|
17
8
|
"user": {
|
|
18
9
|
"data": {
|
|
19
|
-
"model": "Model",
|
|
20
10
|
"url": "URL"
|
|
21
11
|
}
|
|
22
12
|
}
|
|
@@ -1,23 +1,15 @@
|
|
|
1
1
|
{
|
|
2
2
|
"config": {
|
|
3
3
|
"abort": {
|
|
4
|
-
"already_configured": "Slu\u017eba je ji\u017e nastavena"
|
|
5
|
-
"download_failed": "Sta\u017een\u00ed modelu se nezda\u0159ilo"
|
|
4
|
+
"already_configured": "Slu\u017eba je ji\u017e nastavena"
|
|
6
5
|
},
|
|
7
6
|
"error": {
|
|
8
7
|
"cannot_connect": "Nepoda\u0159ilo se p\u0159ipojit",
|
|
9
8
|
"unknown": "Neo\u010dek\u00e1van\u00e1 chyba"
|
|
10
9
|
},
|
|
11
|
-
"progress": {
|
|
12
|
-
"download": "Po\u010dkejte, ne\u017e se model st\u00e1hne, co\u017e m\u016f\u017ee trvat velmi dlouho. Dal\u0161\u00ed podrobnosti naleznete v protokolech serveru Ollama."
|
|
13
|
-
},
|
|
14
10
|
"step": {
|
|
15
|
-
"download": {
|
|
16
|
-
"title": "Stahov\u00e1n\u00ed modelu"
|
|
17
|
-
},
|
|
18
11
|
"user": {
|
|
19
12
|
"data": {
|
|
20
|
-
"model": "Model",
|
|
21
13
|
"url": "URL"
|
|
22
14
|
}
|
|
23
15
|
}
|
|
@@ -26,7 +18,6 @@
|
|
|
26
18
|
"config_subentries": {
|
|
27
19
|
"conversation": {
|
|
28
20
|
"abort": {
|
|
29
|
-
"entry_not_loaded": "Nelze p\u0159id\u00e1vat polo\u017eky, pokud je nastaven\u00ed zak\u00e1z\u00e1no.",
|
|
30
21
|
"reconfigure_successful": "P\u0159enastaven\u00ed bylo \u00fasp\u011b\u0161n\u00e9"
|
|
31
22
|
},
|
|
32
23
|
"entry_type": "Agent pro konverzaci",
|