hassl 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hassl/__init__.py +0 -0
- hassl/ast/__init__.py +0 -0
- hassl/ast/nodes.py +34 -0
- hassl/cli.py +42 -0
- hassl/codegen/__init__.py +22 -0
- hassl/codegen/package.py +335 -0
- hassl/codegen/rules_min.py +663 -0
- hassl/codegen/yaml_emit.py +77 -0
- hassl/parser/__init__.py +0 -0
- hassl/parser/transform.py +272 -0
- hassl/semantics/__init__.py +0 -0
- hassl/semantics/analyzer.py +145 -0
- hassl/semantics/domains.py +8 -0
- hassl-0.2.0.dist-info/METADATA +167 -0
- hassl-0.2.0.dist-info/RECORD +19 -0
- hassl-0.2.0.dist-info/WHEEL +5 -0
- hassl-0.2.0.dist-info/entry_points.txt +2 -0
- hassl-0.2.0.dist-info/licenses/LICENSE +21 -0
- hassl-0.2.0.dist-info/top_level.txt +1 -0
hassl/__init__.py
ADDED
|
File without changes
|
hassl/ast/__init__.py
ADDED
|
File without changes
|
hassl/ast/nodes.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
from dataclasses import dataclass, asdict, field
|
|
2
|
+
from typing import List, Any, Dict
|
|
3
|
+
|
|
4
|
+
@dataclass
|
|
5
|
+
class Alias:
|
|
6
|
+
name: str
|
|
7
|
+
entity: str
|
|
8
|
+
|
|
9
|
+
@dataclass
|
|
10
|
+
class Sync:
|
|
11
|
+
kind: str
|
|
12
|
+
members: List[str]
|
|
13
|
+
name: str
|
|
14
|
+
invert: List[str] = field(default_factory=list)
|
|
15
|
+
|
|
16
|
+
@dataclass
|
|
17
|
+
class IfClause:
|
|
18
|
+
condition: Dict[str, Any]
|
|
19
|
+
actions: List[Dict[str, Any]]
|
|
20
|
+
|
|
21
|
+
@dataclass
|
|
22
|
+
class Rule:
|
|
23
|
+
name: str
|
|
24
|
+
clauses: List[IfClause]
|
|
25
|
+
|
|
26
|
+
@dataclass
|
|
27
|
+
class Program:
|
|
28
|
+
statements: List[object]
|
|
29
|
+
def to_dict(self):
|
|
30
|
+
def enc(x):
|
|
31
|
+
if isinstance(x, (Alias, Sync, Rule, IfClause)):
|
|
32
|
+
d = asdict(x); d["type"] = x.__class__.__name__; return d
|
|
33
|
+
return x
|
|
34
|
+
return {"type": "Program","statements": [enc(s) for s in self.statements]}
|
hassl/cli.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import os, json
|
|
3
|
+
from .parser.transform import HasslTransformer
|
|
4
|
+
from .ast.nodes import Program
|
|
5
|
+
from lark import Lark
|
|
6
|
+
from .semantics.analyzer import analyze
|
|
7
|
+
from .codegen.package import emit_package
|
|
8
|
+
from .codegen import generate as codegen_generate
|
|
9
|
+
|
|
10
|
+
GRAMMAR_PATH = os.path.join(os.path.dirname(__file__), "parser", "hassl.lark")
|
|
11
|
+
|
|
12
|
+
def parse_hassl(text: str) -> Program:
|
|
13
|
+
with open(GRAMMAR_PATH) as f:
|
|
14
|
+
grammar = f.read()
|
|
15
|
+
parser = Lark(grammar, start="start", parser="lalr", maybe_placeholders=False)
|
|
16
|
+
tree = parser.parse(text)
|
|
17
|
+
program = HasslTransformer().transform(tree)
|
|
18
|
+
return program
|
|
19
|
+
|
|
20
|
+
def main():
|
|
21
|
+
ap = argparse.ArgumentParser(prog="hasslc", description="HASSL Compiler")
|
|
22
|
+
ap.add_argument("input", help="Input .hassl file")
|
|
23
|
+
ap.add_argument("-o", "--out", default="./packages/out", help="Output directory for HA package")
|
|
24
|
+
args = ap.parse_args()
|
|
25
|
+
|
|
26
|
+
with open(args.input) as f:
|
|
27
|
+
src = f.read()
|
|
28
|
+
|
|
29
|
+
program = parse_hassl(src)
|
|
30
|
+
print("[hasslc] AST:", program.to_dict())
|
|
31
|
+
ir = analyze(program)
|
|
32
|
+
print("[hasslc] IR:", ir.to_dict())
|
|
33
|
+
|
|
34
|
+
ir_dict = ir.to_dict() if hasattr(ir, "to_dict") else ir
|
|
35
|
+
codegen_generate(ir_dict, args.out)
|
|
36
|
+
print(f"[hasslc] Package written to {args.out}")
|
|
37
|
+
|
|
38
|
+
os.makedirs(args.out, exist_ok=True)
|
|
39
|
+
emit_package(ir, args.out)
|
|
40
|
+
with open(os.path.join(args.out, "DEBUG_ir.json"), "w") as dbg:
|
|
41
|
+
dbg.write(json.dumps(ir.to_dict(), indent=2))
|
|
42
|
+
print(f"[hasslc] Package written to {args.out}")
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
from .package import emit_package
|
|
3
|
+
from .rules_min import generate_rules
|
|
4
|
+
|
|
5
|
+
def generate(ir_obj, outdir):
|
|
6
|
+
"""
|
|
7
|
+
Orchestrate codegen in a merge-safe order:
|
|
8
|
+
1) emit_package: writes/merges helpers, scripts, and sync automations
|
|
9
|
+
2) generate_rules: writes rules automations & merges gate booleans into helpers.yaml
|
|
10
|
+
+ """
|
|
11
|
+
Path(outdir).mkdir(parents=True, exist_ok=True)
|
|
12
|
+
|
|
13
|
+
# 1) Sync & helpers first (merge-safe via yaml_emit._dump_yaml)
|
|
14
|
+
try:
|
|
15
|
+
emit_package(ir_obj if hasattr(ir_obj, "syncs") else ir_obj, outdir)
|
|
16
|
+
except Exception:
|
|
17
|
+
# keep going to still emit rules even if sync pass fails
|
|
18
|
+
pass
|
|
19
|
+
|
|
20
|
+
# 2) Rules last (adds gate booleans; also merge-safe)
|
|
21
|
+
generate_rules(ir_obj if isinstance(ir_obj, dict) else getattr(ir_obj, "to_dict", lambda: ir_obj)(), outdir)
|
|
22
|
+
return True
|
hassl/codegen/package.py
ADDED
|
@@ -0,0 +1,335 @@
|
|
|
1
|
+
from typing import Dict, List
|
|
2
|
+
import os, re
|
|
3
|
+
from ..semantics.analyzer import IRProgram, IRSync
|
|
4
|
+
from .yaml_emit import _dump_yaml, ensure_dir
|
|
5
|
+
|
|
6
|
+
# Property configuration for proxies and services
|
|
7
|
+
PROP_CONFIG = {
|
|
8
|
+
"onoff": {"proxy": {"type": "input_boolean"}},
|
|
9
|
+
"brightness": {
|
|
10
|
+
"proxy": {"type": "input_number", "min": 0, "max": 255, "step": 1},
|
|
11
|
+
"upstream": {"attr": "brightness"},
|
|
12
|
+
"service": {"domain": "light", "service": "light.turn_on", "data_key": "brightness"}
|
|
13
|
+
},
|
|
14
|
+
"color_temp": {
|
|
15
|
+
"proxy": {"type": "input_number", "min": 150, "max": 500, "step": 1},
|
|
16
|
+
"upstream": {"attr": "color_temp"},
|
|
17
|
+
"service": {"domain": "light", "service": "light.turn_on", "data_key": "color_temp"}
|
|
18
|
+
},
|
|
19
|
+
"kelvin": {
|
|
20
|
+
# Typical usable range; adjust if your bulbs differ (e.g., 2000–6500K)
|
|
21
|
+
"proxy": {"type": "input_number", "min": 2000, "max": 6500, "step": 50},
|
|
22
|
+
# Newer HA exposes color_temp_kelvin; we prefer that for upstream reads
|
|
23
|
+
"upstream": {"attr": "color_temp_kelvin"},
|
|
24
|
+
# Downstream: HA light.turn_on supports 'kelvin' directly
|
|
25
|
+
"service": {"domain": "light", "service": "light.turn_on", "data_key": "kelvin"}
|
|
26
|
+
},
|
|
27
|
+
"hs_color": {
|
|
28
|
+
"proxy": {"type": "input_text"},
|
|
29
|
+
"upstream": {"attr": "hs_color"},
|
|
30
|
+
"service": {"domain": "light", "service": "light.turn_on", "data_key": "hs_color"}
|
|
31
|
+
},
|
|
32
|
+
"percentage": {
|
|
33
|
+
"proxy": {"type": "input_number", "min": 0, "max": 100, "step": 1},
|
|
34
|
+
"upstream": {"attr": "percentage"},
|
|
35
|
+
"service": {"domain": "fan", "service": "fan.set_percentage", "data_key": "percentage"}
|
|
36
|
+
},
|
|
37
|
+
"preset_mode": {
|
|
38
|
+
"proxy": {"type": "input_text"},
|
|
39
|
+
"upstream": {"attr": "preset_mode"},
|
|
40
|
+
"service": {"domain": "fan", "service": "fan.set_preset_mode", "data_key": "preset_mode"}
|
|
41
|
+
},
|
|
42
|
+
"volume": {
|
|
43
|
+
"proxy": {"type": "input_number", "min": 0, "max": 1, "step": 0.01},
|
|
44
|
+
"upstream": {"attr": "volume_level"},
|
|
45
|
+
"service": {"domain": "media_player", "service": "media_player.volume_set", "data_key": "volume_level"}
|
|
46
|
+
},
|
|
47
|
+
"mute": {
|
|
48
|
+
"proxy": {"type": "input_boolean"},
|
|
49
|
+
"upstream": {"attr": "is_volume_muted"},
|
|
50
|
+
"service": {"domain": "media_player", "service": "media_player.volume_mute", "data_key": "is_volume_muted"}
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
def _safe(name: str) -> str:
|
|
55
|
+
return name.replace(".", "_")
|
|
56
|
+
|
|
57
|
+
def _pkg_slug(outdir: str) -> str:
|
|
58
|
+
base = os.path.basename(os.path.abspath(outdir))
|
|
59
|
+
s = re.sub(r'[^a-z0-9]+', '_', base.lower()).strip('_')
|
|
60
|
+
return s or "pkg"
|
|
61
|
+
|
|
62
|
+
def _proxy_entity(sync_name: str, prop: str) -> str:
|
|
63
|
+
return (f"input_boolean.hassl_{_safe(sync_name)}_onoff" if prop == "onoff"
|
|
64
|
+
else f"input_number.hassl_{_safe(sync_name)}_{prop}" if PROP_CONFIG.get(prop,{}).get("proxy",{}).get("type")=="input_number"
|
|
65
|
+
else f"input_text.hassl_{_safe(sync_name)}_{prop}")
|
|
66
|
+
|
|
67
|
+
def _context_entity(entity: str, prop: str = None) -> str:
|
|
68
|
+
if prop and prop != "onoff":
|
|
69
|
+
return f"input_text.hassl_ctx_{_safe(entity)}_{prop}"
|
|
70
|
+
return f"input_text.hassl_ctx_{_safe(entity)}"
|
|
71
|
+
|
|
72
|
+
def _domain(entity: str) -> str:
|
|
73
|
+
return entity.split(".", 1)[0]
|
|
74
|
+
|
|
75
|
+
def _turn_service(domain: str, state_on: bool) -> str:
|
|
76
|
+
if domain in ("light","switch","fan","media_player","cover"):
|
|
77
|
+
return f"{domain}.turn_on" if state_on else f"{domain}.turn_off"
|
|
78
|
+
return "homeassistant.turn_on" if state_on else "homeassistant.turn_off"
|
|
79
|
+
|
|
80
|
+
def emit_package(ir: IRProgram, outdir: str):
|
|
81
|
+
ensure_dir(outdir)
|
|
82
|
+
helpers: Dict = {"input_text": {}, "input_boolean": {}, "input_number": {}}
|
|
83
|
+
scripts: Dict = {"script": {}}
|
|
84
|
+
automations: List[Dict] = []
|
|
85
|
+
|
|
86
|
+
# Context helpers for entities & per-prop contexts
|
|
87
|
+
sync_entities = set(); entity_props = {}
|
|
88
|
+
for s in ir.syncs:
|
|
89
|
+
for m in s.members:
|
|
90
|
+
sync_entities.add(m)
|
|
91
|
+
entity_props.setdefault(m, set())
|
|
92
|
+
for p in s.properties: entity_props[m].add(p.name)
|
|
93
|
+
|
|
94
|
+
for e in sorted(sync_entities):
|
|
95
|
+
helpers["input_text"][f"hassl_ctx_{_safe(e)}"] = {"name": f"HASSL Ctx {e}", "max": 64}
|
|
96
|
+
for prop in sorted(entity_props[e]):
|
|
97
|
+
if prop != "onoff":
|
|
98
|
+
helpers["input_text"][f"hassl_ctx_{_safe(e)}_{prop}"] = {
|
|
99
|
+
"name": f"HASSL Ctx {e} {prop}", "max": 64
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
# Proxies
|
|
103
|
+
for s in ir.syncs:
|
|
104
|
+
for p in s.properties:
|
|
105
|
+
cfg = PROP_CONFIG.get(p.name, {})
|
|
106
|
+
proxy = cfg.get("proxy", {"type":"input_number","min":0,"max":255,"step":1})
|
|
107
|
+
if p.name == "onoff" or proxy.get("type") == "input_boolean":
|
|
108
|
+
helpers["input_boolean"][f"hassl_{_safe(s.name)}_{p.name}"] = {"name": f"HASSL Proxy {s.name} {p.name}"}
|
|
109
|
+
elif proxy.get("type") == "input_text":
|
|
110
|
+
helpers["input_text"][f"hassl_{_safe(s.name)}_{p.name}"] = {"name": f"HASSL Proxy {s.name} {p.name}", "max": 120}
|
|
111
|
+
else:
|
|
112
|
+
helpers["input_number"][f"hassl_{_safe(s.name)}_{p.name}"] = {
|
|
113
|
+
"name": f"HASSL Proxy {s.name} {p.name}", "min": proxy.get("min", 0), "max": proxy.get("max", 255),
|
|
114
|
+
"step": proxy.get("step", 1), "mode": "slider"
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
# Writer scripts per (sync, member, prop)
|
|
118
|
+
for s in ir.syncs:
|
|
119
|
+
# be defensive in case props/members are empty
|
|
120
|
+
if not getattr(s, "properties", None):
|
|
121
|
+
continue
|
|
122
|
+
if not getattr(s, "members", None):
|
|
123
|
+
continue
|
|
124
|
+
|
|
125
|
+
for p in s.properties:
|
|
126
|
+
prop = getattr(p, "name", None) or (p.get("name") if isinstance(p, dict) else None)
|
|
127
|
+
if not prop:
|
|
128
|
+
continue
|
|
129
|
+
|
|
130
|
+
for m in s.members:
|
|
131
|
+
dom = _domain(m)
|
|
132
|
+
script_key = f"hassl_write_sync_{_safe(s.name)}_{_safe(m)}_{prop}_set"
|
|
133
|
+
|
|
134
|
+
# Step 1: always stamp context to block feedback loops
|
|
135
|
+
seq = [{
|
|
136
|
+
"service": "input_text.set_value",
|
|
137
|
+
"data": {
|
|
138
|
+
"entity_id": _context_entity(m, prop if prop != "onoff" else None),
|
|
139
|
+
"value": "{{ this.context.id }}"
|
|
140
|
+
}
|
|
141
|
+
}]
|
|
142
|
+
|
|
143
|
+
# Step 2: for non-onoff, forward the value to the actual device
|
|
144
|
+
if prop == "hs_color":
|
|
145
|
+
# value is a JSON string; HA expects a list
|
|
146
|
+
seq.append({
|
|
147
|
+
"service": "light.turn_on",
|
|
148
|
+
"target": {"entity_id": m},
|
|
149
|
+
"data": { "hs_color": "{{ value | from_json }}" }
|
|
150
|
+
})
|
|
151
|
+
elif prop != "onoff":
|
|
152
|
+
svc = PROP_CONFIG.get(prop, {}).get("service", {})
|
|
153
|
+
service = svc.get("service", f"{dom}.turn_on")
|
|
154
|
+
data_key = svc.get("data_key", prop)
|
|
155
|
+
seq.append({
|
|
156
|
+
"service": service,
|
|
157
|
+
"target": {"entity_id": m},
|
|
158
|
+
"data": { data_key: "{{ value }}" }
|
|
159
|
+
})
|
|
160
|
+
|
|
161
|
+
# actually register the script
|
|
162
|
+
scripts["script"][script_key] = {
|
|
163
|
+
"alias": f"HASSL write (sync {s.name} → {m} {prop})",
|
|
164
|
+
"mode": "single",
|
|
165
|
+
"sequence": seq
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
# Upstream automations
|
|
169
|
+
for s in ir.syncs:
|
|
170
|
+
for p in s.properties:
|
|
171
|
+
prop = p.name;
|
|
172
|
+
triggers = [];
|
|
173
|
+
conditions = [];
|
|
174
|
+
actions = []
|
|
175
|
+
|
|
176
|
+
if prop == "onoff":
|
|
177
|
+
for m in s.members:
|
|
178
|
+
triggers.append({"platform": "state", "entity_id": m})
|
|
179
|
+
|
|
180
|
+
conditions.append({"condition": "template",
|
|
181
|
+
"value_template": (
|
|
182
|
+
"{{ trigger.to_state.context.parent_id != "
|
|
183
|
+
"states('input_text.hassl_ctx_' ~ trigger.entity_id|replace('.','_')) }}"
|
|
184
|
+
)
|
|
185
|
+
})
|
|
186
|
+
actions = [{
|
|
187
|
+
"choose": [
|
|
188
|
+
{"conditions": [{"condition":"template","value_template":"{{ trigger.to_state.state == 'on' }}"}],
|
|
189
|
+
"sequence": [{"service":"input_boolean.turn_on","target":{"entity_id":f"input_boolean.hassl_{_safe(s.name)}_onoff"}}]
|
|
190
|
+
},
|
|
191
|
+
{"conditions": [{"condition":"template","value_template":"{{ trigger.to_state.state != 'on' }}"}],
|
|
192
|
+
"sequence": [{"service":"input_boolean.turn_off","target": {"entity_id": f"input_boolean.hassl_{_safe(s.name)}_onoff"}}]
|
|
193
|
+
}
|
|
194
|
+
]
|
|
195
|
+
}]
|
|
196
|
+
else:
|
|
197
|
+
cfg = PROP_CONFIG.get(prop, {});
|
|
198
|
+
attr = cfg.get("upstream", {}).get("attr", prop)
|
|
199
|
+
|
|
200
|
+
#state trigger on attribute
|
|
201
|
+
for m in s.members:
|
|
202
|
+
triggers.append({"platform": "state", "entity_id": m, "attribute": attr})
|
|
203
|
+
suffix = f"_{prop}" if prop != "onoff" else ""
|
|
204
|
+
conditions.append({
|
|
205
|
+
"condition":"template",
|
|
206
|
+
"value_template": (
|
|
207
|
+
"{{ trigger.to_state.context.parent_id != "
|
|
208
|
+
"states('input_text.hassl_ctx_' ~ trigger.entity_id|replace('.', '_') ~ '" + suffix + "') }}"
|
|
209
|
+
)
|
|
210
|
+
})
|
|
211
|
+
|
|
212
|
+
proxy_e = (
|
|
213
|
+
f"input_text.hassl_{_safe(s.name)}_{prop}"
|
|
214
|
+
if PROP_CONFIG.get(prop,{}).get("proxy",{}).get("type") == "input_text"
|
|
215
|
+
else f"input_number.hassl_{_safe(s.name)}_{prop}"
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
if prop == "mute":
|
|
219
|
+
actions = [{
|
|
220
|
+
"choose": [
|
|
221
|
+
{
|
|
222
|
+
"conditions": [{"condition":"template","value_template": f"{{{{ state_attr(trigger.entity_id, '{attr}') | bool }}}}"}],
|
|
223
|
+
"sequence": [{"service": "input_boolean.turn_on", "target": {"entity_id": proxy_e}}]
|
|
224
|
+
},
|
|
225
|
+
{
|
|
226
|
+
"conditions": [{"condition":"template","value_template": f"{{{{ not (state_attr(trigger.entity_id, '{attr}') | bool) }}}}"}],
|
|
227
|
+
"sequence": [{"service": "input_boolean.turn_off", "target": {"entity_id": proxy_e}}]
|
|
228
|
+
}
|
|
229
|
+
]
|
|
230
|
+
}]
|
|
231
|
+
elif prop == "preset_mode":
|
|
232
|
+
actions = [{"service": "input_text.set_value", "data": {"entity_id": proxy_e, "value": f"{{{{ state_attr(trigger.entity_id, '{attr}') }}}}"}}]
|
|
233
|
+
elif prop == "hs_color":
|
|
234
|
+
# Store JSON so we can send a real list back later
|
|
235
|
+
actions = [{"service": "input_text.set_value", "data": {"entity_id": proxy_e, "value": f"{{{{ state_attr(trigger.entity_id, '{attr}') | to_json }}}}"}}]
|
|
236
|
+
else:
|
|
237
|
+
actions = [{"service": "input_number.set_value", "data": {"entity_id": proxy_e, "value": f"{{{{ state_attr(trigger.entity_id, '{attr}') }}}}"}}]
|
|
238
|
+
|
|
239
|
+
if triggers:
|
|
240
|
+
automations.append({
|
|
241
|
+
"alias": f"HASSL sync {s.name} upstream {prop}",
|
|
242
|
+
"mode": "restart",
|
|
243
|
+
"trigger": triggers,
|
|
244
|
+
"condition": conditions,
|
|
245
|
+
"action": actions
|
|
246
|
+
})
|
|
247
|
+
|
|
248
|
+
# Downstream automations
|
|
249
|
+
# Downstream automations
|
|
250
|
+
for s in ir.syncs:
|
|
251
|
+
for p in s.properties:
|
|
252
|
+
prop = p.name
|
|
253
|
+
if prop == "onoff":
|
|
254
|
+
trigger = [{"platform":"state","entity_id": f"input_boolean.hassl_{_safe(s.name)}_onoff"}]
|
|
255
|
+
actions = []
|
|
256
|
+
for m in s.members:
|
|
257
|
+
dom = _domain(m)
|
|
258
|
+
cond_tpl = "{{ is_state('%s','on') != is_state('%s','on') }}" % (f"input_boolean.hassl_{_safe(s.name)}_onoff", m)
|
|
259
|
+
service_on = _turn_service(dom, True)
|
|
260
|
+
service_off = _turn_service(dom, False)
|
|
261
|
+
actions.append({
|
|
262
|
+
"choose":[
|
|
263
|
+
{
|
|
264
|
+
"conditions":[
|
|
265
|
+
{"condition":"template","value_template":cond_tpl},
|
|
266
|
+
{"condition":"state","entity_id": f"input_boolean.hassl_{_safe(s.name)}_onoff","state":"on"}
|
|
267
|
+
],
|
|
268
|
+
"sequence":[
|
|
269
|
+
{"service":"script.%s" % f"hassl_write_sync_{_safe(s.name)}_{_safe(m)}_onoff_set"},
|
|
270
|
+
{"service": service_on, "target":{"entity_id": m}}
|
|
271
|
+
]
|
|
272
|
+
},
|
|
273
|
+
{
|
|
274
|
+
"conditions":[
|
|
275
|
+
{"condition":"template","value_template":cond_tpl},
|
|
276
|
+
{"condition":"state","entity_id": f"input_boolean.hassl_{_safe(s.name)}_onoff","state":"off"}
|
|
277
|
+
],
|
|
278
|
+
"sequence":[
|
|
279
|
+
{"service":"script.%s" % f"hassl_write_sync_{_safe(s.name)}_{_safe(m)}_onoff_set"},
|
|
280
|
+
{"service": service_off, "target":{"entity_id": m}}
|
|
281
|
+
]
|
|
282
|
+
}
|
|
283
|
+
]
|
|
284
|
+
})
|
|
285
|
+
automations.append({"alias": f"HASSL sync {s.name} downstream onoff","mode":"queued","max":10,"trigger": trigger,"action": actions})
|
|
286
|
+
else:
|
|
287
|
+
proxy_e = (
|
|
288
|
+
f"input_text.hassl_{_safe(s.name)}_{prop}"
|
|
289
|
+
if PROP_CONFIG.get(prop,{}).get("proxy",{}).get("type") == "input_text"
|
|
290
|
+
else f"input_number.hassl_{_safe(s.name)}_{prop}"
|
|
291
|
+
)
|
|
292
|
+
trigger = [{"platform": "state","entity_id": proxy_e}]
|
|
293
|
+
actions = []
|
|
294
|
+
cfg = PROP_CONFIG.get(prop, {})
|
|
295
|
+
attr = cfg.get("upstream", {}).get("attr", prop)
|
|
296
|
+
|
|
297
|
+
for m in s.members:
|
|
298
|
+
if prop == "mute":
|
|
299
|
+
diff_tpl = "{{ (states('%s') == 'on') != (state_attr('%s','%s') | bool) }}" % (proxy_e, m, attr)
|
|
300
|
+
val_expr = "{{ iif(states('%s') == 'on', true, false) }}" % (proxy_e)
|
|
301
|
+
elif prop == "preset_mode":
|
|
302
|
+
diff_tpl = "{{ (states('%s') != state_attr('%s','%s') ) }}" % (proxy_e, m, attr)
|
|
303
|
+
val_expr = "{{ states('%s') }}" % (proxy_e)
|
|
304
|
+
elif prop == "hs_color":
|
|
305
|
+
# compare JSON string vs current attr rendered to JSON
|
|
306
|
+
diff_tpl = "{{ states('%s') != (state_attr('%s','%s') | to_json) }}" % (proxy_e, m, attr)
|
|
307
|
+
# pass JSON string to script; script converts with from_json
|
|
308
|
+
val_expr = "{{ states('%s') }}" % (proxy_e)
|
|
309
|
+
else:
|
|
310
|
+
diff_tpl = "{{ (states('%s') | float) != (state_attr('%s','%s') | float) }}" % (proxy_e, m, attr)
|
|
311
|
+
val_expr = "{{ states('%s') }}" % (proxy_e)
|
|
312
|
+
|
|
313
|
+
actions.append({
|
|
314
|
+
"choose":[
|
|
315
|
+
{
|
|
316
|
+
"conditions":[{"condition":"template","value_template": diff_tpl}],
|
|
317
|
+
"sequence":[
|
|
318
|
+
{"service":"script.%s" % f"hassl_write_sync_{_safe(s.name)}_{_safe(m)}_{prop}_set","data":{"value": val_expr}}
|
|
319
|
+
]
|
|
320
|
+
}
|
|
321
|
+
]
|
|
322
|
+
})
|
|
323
|
+
automations.append({"alias": f"HASSL sync {s.name} downstream {prop}","mode":"queued","max":10,"trigger": trigger,"action": actions})
|
|
324
|
+
|
|
325
|
+
pkg = _pkg_slug(outdir)
|
|
326
|
+
|
|
327
|
+
# Write helpers.yaml / scripts.yaml
|
|
328
|
+
_dump_yaml(os.path.join(outdir, f"helpers_{pkg}.yaml"), helpers, ensure_sections=True)
|
|
329
|
+
_dump_yaml(os.path.join(outdir, f"scripts_{pkg}.yaml"), scripts)
|
|
330
|
+
|
|
331
|
+
# Write automations per sync
|
|
332
|
+
for s in ir.syncs:
|
|
333
|
+
doc = [a for a in automations if a["alias"].startswith(f"HASSL sync {s.name}")]
|
|
334
|
+
if doc:
|
|
335
|
+
_dump_yaml(os.path.join(outdir, f"sync_{pkg}_{_safe(s.name)}.yaml"), {"automation": doc})
|