homesec 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. homesec/__init__.py +20 -0
  2. homesec/app.py +393 -0
  3. homesec/cli.py +159 -0
  4. homesec/config/__init__.py +18 -0
  5. homesec/config/loader.py +109 -0
  6. homesec/config/validation.py +82 -0
  7. homesec/errors.py +71 -0
  8. homesec/health/__init__.py +5 -0
  9. homesec/health/server.py +226 -0
  10. homesec/interfaces.py +249 -0
  11. homesec/logging_setup.py +176 -0
  12. homesec/maintenance/__init__.py +1 -0
  13. homesec/maintenance/cleanup_clips.py +632 -0
  14. homesec/models/__init__.py +79 -0
  15. homesec/models/alert.py +32 -0
  16. homesec/models/clip.py +71 -0
  17. homesec/models/config.py +362 -0
  18. homesec/models/events.py +184 -0
  19. homesec/models/filter.py +62 -0
  20. homesec/models/source.py +77 -0
  21. homesec/models/storage.py +12 -0
  22. homesec/models/vlm.py +99 -0
  23. homesec/pipeline/__init__.py +6 -0
  24. homesec/pipeline/alert_policy.py +5 -0
  25. homesec/pipeline/core.py +639 -0
  26. homesec/plugins/__init__.py +62 -0
  27. homesec/plugins/alert_policies/__init__.py +80 -0
  28. homesec/plugins/alert_policies/default.py +111 -0
  29. homesec/plugins/alert_policies/noop.py +60 -0
  30. homesec/plugins/analyzers/__init__.py +126 -0
  31. homesec/plugins/analyzers/openai.py +446 -0
  32. homesec/plugins/filters/__init__.py +124 -0
  33. homesec/plugins/filters/yolo.py +317 -0
  34. homesec/plugins/notifiers/__init__.py +80 -0
  35. homesec/plugins/notifiers/mqtt.py +189 -0
  36. homesec/plugins/notifiers/multiplex.py +106 -0
  37. homesec/plugins/notifiers/sendgrid_email.py +228 -0
  38. homesec/plugins/storage/__init__.py +116 -0
  39. homesec/plugins/storage/dropbox.py +272 -0
  40. homesec/plugins/storage/local.py +108 -0
  41. homesec/plugins/utils.py +63 -0
  42. homesec/py.typed +0 -0
  43. homesec/repository/__init__.py +5 -0
  44. homesec/repository/clip_repository.py +552 -0
  45. homesec/sources/__init__.py +17 -0
  46. homesec/sources/base.py +224 -0
  47. homesec/sources/ftp.py +209 -0
  48. homesec/sources/local_folder.py +238 -0
  49. homesec/sources/rtsp.py +1251 -0
  50. homesec/state/__init__.py +10 -0
  51. homesec/state/postgres.py +501 -0
  52. homesec/storage_paths.py +46 -0
  53. homesec/telemetry/__init__.py +0 -0
  54. homesec/telemetry/db/__init__.py +1 -0
  55. homesec/telemetry/db/log_table.py +16 -0
  56. homesec/telemetry/db_log_handler.py +246 -0
  57. homesec/telemetry/postgres_settings.py +42 -0
  58. homesec-0.1.0.dist-info/METADATA +446 -0
  59. homesec-0.1.0.dist-info/RECORD +62 -0
  60. homesec-0.1.0.dist-info/WHEEL +4 -0
  61. homesec-0.1.0.dist-info/entry_points.txt +2 -0
  62. homesec-0.1.0.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,446 @@
1
+ """OpenAI-compatible VLM analyzer plugin."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ import base64
7
+ import json
8
+ import logging
9
+ import os
10
+ from pathlib import Path
11
+
12
+ import aiohttp
13
+ import cv2
14
+ from PIL import Image
15
+ from pydantic import BaseModel
16
+
17
+ from homesec.models.filter import FilterResult
18
+ from homesec.models.vlm import (
19
+ AnalysisResult,
20
+ OpenAILLMConfig,
21
+ SequenceAnalysis,
22
+ VLMConfig,
23
+ VLMPreprocessConfig,
24
+ )
25
+ from homesec.interfaces import VLMAnalyzer
26
+
27
+ logger = logging.getLogger(__name__)
28
+
29
+ DEFAULT_SYSTEM_PROMPT = """Analyze this residential security camera footage frame-by-frame and identify key security events.
30
+
31
+ CRITICAL INSTRUCTIONS:
32
+ 1. Carefully examine EACH frame to identify when entities appear and disappear
33
+ 2. Detect ALL entities: every person, vehicle, animal, package - even if they're far away or in the background
34
+ 3. Record the FIRST timestamp where you see each person/vehicle
35
+ 4. Record the LAST timestamp where you see each person/vehicle
36
+ 5. Use ONLY the exact timestamps shown in frame labels - never guess or extrapolate
37
+
38
+ Focus on KEY EVENTS ONLY:
39
+ - Person approaching/departing property
40
+ - Doorbell ring, door interaction, window checking
41
+ - Suspicious behaviors: loitering, concealing face, multiple passes
42
+ - Package delivery or theft
43
+ - Vehicles stopping, driving past, or unusual patterns
44
+
45
+ Keep observations list concise (short bullet points of security-relevant actions)."""
46
+
47
+
48
+ def _create_json_schema_format(
49
+ schema_model: type[BaseModel], schema_name: str
50
+ ) -> dict[str, object]:
51
+ """Create OpenAI JSON schema format configuration."""
52
+ return {
53
+ "type": "json_schema",
54
+ "json_schema": {
55
+ "name": schema_name,
56
+ "schema": schema_model.model_json_schema(),
57
+ "strict": True,
58
+ },
59
+ }
60
+
61
+
62
+ class OpenAIVLM(VLMAnalyzer):
63
+ """OpenAI-compatible VLM analyzer plugin.
64
+
65
+ Uses aiohttp for async HTTP calls to OpenAI API.
66
+ Supports structured output with Pydantic schemas.
67
+ """
68
+
69
+ def __init__(self, config: VLMConfig) -> None:
70
+ """Initialize OpenAI VLM with config validation.
71
+
72
+ Required config:
73
+ llm.api_key_env: Env var name with API key
74
+ llm.model: Model name (e.g., gpt-4o)
75
+
76
+ Optional config:
77
+ llm.base_url: API base URL (default: https://api.openai.com/v1)
78
+ llm.token_param: max_tokens or max_completion_tokens
79
+ llm.max_completion_tokens/max_tokens: Token limits
80
+ llm.temperature: Temperature (None to omit)
81
+ preprocessing.max_frames: Maximum frames to send (default: 10)
82
+ preprocessing.max_size: Max image dimension (default: 1024)
83
+ preprocessing.quality: JPEG quality (default: 85)
84
+ """
85
+ if not isinstance(config.llm, OpenAILLMConfig):
86
+ raise ValueError("OpenAIVLM requires llm=OpenAILLMConfig")
87
+ llm = config.llm
88
+ preprocess = config.preprocessing
89
+
90
+ # Get API key from env
91
+ self._api_key_env = llm.api_key_env
92
+ self.api_key = os.getenv(self._api_key_env)
93
+ if not self.api_key:
94
+ raise ValueError(f"API key not found in env: {self._api_key_env}")
95
+
96
+ self.model = llm.model
97
+ self.base_url = llm.base_url
98
+ self.system_prompt = DEFAULT_SYSTEM_PROMPT
99
+ self.temperature = llm.temperature
100
+ self.token_param = llm.token_param
101
+ self.max_tokens = self._resolve_token_limit(llm)
102
+ self.request_timeout = float(llm.request_timeout)
103
+
104
+ # Create HTTP session
105
+ self._session: aiohttp.ClientSession | None = None
106
+ self._shutdown_called = False
107
+
108
+ logger.info(
109
+ "OpenAIVLM initialized: model=%s, max_frames=%d, token_param=%s, temperature=%s",
110
+ self.model,
111
+ preprocess.max_frames,
112
+ self.token_param,
113
+ self.temperature if self.temperature is not None else "default",
114
+ )
115
+
116
+ async def _ensure_session(self) -> aiohttp.ClientSession:
117
+ """Lazy-create aiohttp session with timeout."""
118
+ if self._session is None:
119
+ timeout = aiohttp.ClientTimeout(total=self.request_timeout)
120
+ self._session = aiohttp.ClientSession(timeout=timeout)
121
+ return self._session
122
+
123
+ async def analyze(
124
+ self,
125
+ video_path: Path,
126
+ filter_result: FilterResult,
127
+ config: VLMConfig,
128
+ ) -> AnalysisResult:
129
+ """Analyze video clip using OpenAI VLM.
130
+
131
+ Extracts frames, encodes as base64, and calls OpenAI API
132
+ with structured output schema.
133
+ """
134
+ if self._shutdown_called:
135
+ raise RuntimeError("VLM has been shut down")
136
+
137
+ start_time = asyncio.get_running_loop().time()
138
+
139
+ # Extract frames
140
+ frames = await self._extract_frames_async(video_path, config.preprocessing)
141
+
142
+ if not frames:
143
+ raise ValueError(f"No frames extracted from {video_path}")
144
+
145
+ messages = self._build_messages(frames, filter_result)
146
+ payload = self._build_payload(messages)
147
+ headers = self._build_headers()
148
+
149
+ data = await self._call_api(payload, headers)
150
+ usage = data.get("usage", {})
151
+ if not isinstance(usage, dict):
152
+ usage = {}
153
+ self._log_usage(usage, start_time, video_path)
154
+ prompt_tokens = usage.get("prompt_tokens")
155
+ completion_tokens = usage.get("completion_tokens")
156
+ prompt_token_count = prompt_tokens if isinstance(prompt_tokens, int) else None
157
+ completion_token_count = (
158
+ completion_tokens if isinstance(completion_tokens, int) else None
159
+ )
160
+
161
+ # Parse response
162
+ content = self._extract_content(data)
163
+ analysis = self._parse_sequence_analysis(content)
164
+ return AnalysisResult(
165
+ risk_level=analysis.max_risk_level,
166
+ activity_type=analysis.primary_activity,
167
+ summary=analysis.sequence_description,
168
+ analysis=analysis,
169
+ prompt_tokens=prompt_token_count,
170
+ completion_tokens=completion_token_count,
171
+ )
172
+
173
+ async def _extract_frames_async(
174
+ self, video_path: Path, preprocessing: VLMPreprocessConfig
175
+ ) -> list[tuple[str, str]]:
176
+ return await asyncio.to_thread(
177
+ self._extract_frames,
178
+ video_path,
179
+ preprocessing.max_frames,
180
+ preprocessing.max_size,
181
+ preprocessing.quality,
182
+ )
183
+
184
+ def _build_messages(
185
+ self,
186
+ frames: list[tuple[str, str]],
187
+ filter_result: FilterResult,
188
+ ) -> list[dict[str, object]]:
189
+ frame_count = len(frames)
190
+ start_ts = frames[0][1]
191
+ end_ts = frames[-1][1]
192
+ detected = ", ".join(filter_result.detected_classes) or "none"
193
+ user_content: list[dict[str, object]] = [
194
+ {
195
+ "type": "text",
196
+ "text": (
197
+ f"Analyze these {frame_count} frames from security camera footage. "
198
+ f"Detected objects: {detected}."
199
+ ),
200
+ },
201
+ {
202
+ "type": "text",
203
+ "text": (
204
+ "TIMESTAMP CONSTRAINT: This video spans from "
205
+ f"{start_ts} to {end_ts}. You MUST use ONLY these exact timestamps "
206
+ "shown in frame labels below. Do not invent timestamps outside this range."
207
+ ),
208
+ },
209
+ ]
210
+ for idx, (frame_b64, timestamp) in enumerate(frames, start=1):
211
+ user_content.append(
212
+ {
213
+ "type": "text",
214
+ "text": f"Frame at {timestamp} ({idx} of {frame_count}):",
215
+ }
216
+ )
217
+ user_content.append(
218
+ {
219
+ "type": "image_url",
220
+ "image_url": {
221
+ "url": f"data:image/jpeg;base64,{frame_b64}",
222
+ "detail": "high",
223
+ },
224
+ }
225
+ )
226
+
227
+ return [
228
+ {"role": "system", "content": self.system_prompt},
229
+ {
230
+ "role": "user",
231
+ "content": user_content,
232
+ },
233
+ ]
234
+
235
+ def _build_payload(self, messages: list[dict[str, object]]) -> dict[str, object]:
236
+ payload: dict[str, object] = {
237
+ "model": self.model,
238
+ "messages": messages,
239
+ "response_format": _create_json_schema_format(
240
+ SequenceAnalysis, "sequence_analysis"
241
+ ),
242
+ }
243
+ if self.temperature is not None:
244
+ payload["temperature"] = self.temperature
245
+ payload[self.token_param] = self.max_tokens
246
+ return payload
247
+
248
+ def _build_headers(self) -> dict[str, str]:
249
+ return {
250
+ "Authorization": f"Bearer {self.api_key}",
251
+ "Content-Type": "application/json",
252
+ }
253
+
254
+ async def _call_api(
255
+ self, payload: dict[str, object], headers: dict[str, str]
256
+ ) -> dict[str, object]:
257
+ session = await self._ensure_session()
258
+ url = f"{self.base_url}/chat/completions"
259
+
260
+ async with session.post(url, json=payload, headers=headers) as resp:
261
+ if resp.status != 200:
262
+ error_text = await resp.text()
263
+ raise RuntimeError(
264
+ f"OpenAI API error {resp.status}: {error_text}"
265
+ )
266
+
267
+ data = await resp.json()
268
+ if not isinstance(data, dict):
269
+ raise TypeError("OpenAI API response is not a JSON object")
270
+ return data
271
+
272
+ def _log_usage(
273
+ self, usage: dict[str, object], start_time: float, video_path: Path
274
+ ) -> None:
275
+ elapsed_s = asyncio.get_running_loop().time() - start_time
276
+ logger.info(
277
+ "VLM token usage",
278
+ extra={
279
+ "event_type": "vlm_usage",
280
+ "provider": "openai",
281
+ "model": self.model,
282
+ "token_param": self.token_param,
283
+ "clip_id": video_path.stem,
284
+ "temperature": self.temperature,
285
+ "prompt_tokens": usage.get("prompt_tokens"),
286
+ "completion_tokens": usage.get("completion_tokens"),
287
+ "total_tokens": usage.get("total_tokens"),
288
+ "elapsed_s": round(elapsed_s, 3),
289
+ },
290
+ )
291
+
292
+ def _extract_content(self, data: dict[str, object]) -> object:
293
+ choices = data.get("choices")
294
+ if not isinstance(choices, list) or not choices:
295
+ raise TypeError("OpenAI API response missing choices")
296
+ first = choices[0]
297
+ if not isinstance(first, dict):
298
+ raise TypeError("OpenAI API response choice is not an object")
299
+ message = first.get("message")
300
+ if not isinstance(message, dict):
301
+ raise TypeError("OpenAI API response message is not an object")
302
+ return message.get("content")
303
+
304
+ def _parse_sequence_analysis(self, content: object) -> SequenceAnalysis:
305
+ try:
306
+ response_dict = content
307
+ if isinstance(content, str):
308
+ response_dict = json.loads(content)
309
+ if not isinstance(response_dict, dict):
310
+ raise TypeError(f"Expected JSON object, got {type(response_dict).__name__}")
311
+ return SequenceAnalysis.model_validate(response_dict)
312
+ except json.JSONDecodeError as e:
313
+ raise json.JSONDecodeError(
314
+ f"VLM response is not valid JSON: {e.msg}. Raw response: {content}",
315
+ e.doc,
316
+ e.pos,
317
+ ) from e
318
+ except ValueError as e:
319
+ raise ValueError(
320
+ f"VLM response does not match SequenceAnalysis schema: {e}. "
321
+ f"Raw response: {content}"
322
+ ) from e
323
+
324
+ def _resolve_token_limit(self, llm: OpenAILLMConfig) -> int:
325
+ if self.token_param == "max_completion_tokens":
326
+ value = llm.max_completion_tokens or llm.max_tokens or 1000
327
+ else:
328
+ value = llm.max_tokens or llm.max_completion_tokens or 1000
329
+ return int(value)
330
+
331
+ def _extract_frames(
332
+ self,
333
+ video_path: Path,
334
+ max_frames: int,
335
+ max_size: int,
336
+ quality: int,
337
+ ) -> list[tuple[str, str]]:
338
+ """Extract and encode frames from video.
339
+
340
+ Returns list of (base64 JPEG, timestamp) tuples.
341
+ """
342
+ cap = cv2.VideoCapture(str(video_path))
343
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
344
+
345
+ if total_frames == 0:
346
+ cap.release()
347
+ return []
348
+
349
+ # Calculate frame indices to sample
350
+ if total_frames <= max_frames:
351
+ frame_indices = list(range(total_frames))
352
+ else:
353
+ step = total_frames / max_frames
354
+ frame_indices = [int(i * step) for i in range(max_frames)]
355
+
356
+ frames_b64: list[tuple[str, str]] = []
357
+
358
+ for idx in frame_indices:
359
+ cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
360
+ ret, frame = cap.read()
361
+ if not ret:
362
+ continue
363
+
364
+ timestamp_ms = cap.get(cv2.CAP_PROP_POS_MSEC)
365
+ timestamp = self._format_timestamp(timestamp_ms)
366
+
367
+ # Convert to PIL Image
368
+ rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
369
+ pil_img = Image.fromarray(rgb_frame)
370
+
371
+ # Resize if needed
372
+ if max(pil_img.size) > max_size:
373
+ pil_img = self._resize_image(pil_img, max_size)
374
+
375
+ # Encode as JPEG
376
+ import io
377
+ buffer = io.BytesIO()
378
+ pil_img.save(buffer, format="JPEG", quality=quality)
379
+ frame_bytes = buffer.getvalue()
380
+
381
+ # Base64 encode
382
+ frame_b64 = base64.b64encode(frame_bytes).decode("utf-8")
383
+ frames_b64.append((frame_b64, timestamp))
384
+
385
+ cap.release()
386
+ return frames_b64
387
+
388
+ def _format_timestamp(self, timestamp_ms: float) -> str:
389
+ total_seconds = max(0.0, timestamp_ms / 1000.0)
390
+ hours, remainder = divmod(total_seconds, 3600)
391
+ minutes, seconds = divmod(remainder, 60)
392
+ return f"{int(hours):02d}:{int(minutes):02d}:{seconds:05.2f}"
393
+
394
+ def _resize_image(self, img: Image.Image, max_size: int) -> Image.Image:
395
+ """Resize image maintaining aspect ratio."""
396
+ width, height = img.size
397
+
398
+ if width <= max_size and height <= max_size:
399
+ return img
400
+
401
+ if width > height:
402
+ new_width = max_size
403
+ new_height = int(height * (max_size / width))
404
+ else:
405
+ new_height = max_size
406
+ new_width = int(width * (max_size / height))
407
+
408
+ return img.resize((new_width, new_height), Image.Resampling.LANCZOS)
409
+
410
+ async def shutdown(self, timeout: float | None = None) -> None:
411
+ """Cleanup resources - close HTTP session."""
412
+ _ = timeout
413
+ if self._shutdown_called:
414
+ return
415
+
416
+ self._shutdown_called = True
417
+ logger.info("Shutting down OpenAIVLM...")
418
+
419
+ if self._session:
420
+ await self._session.close()
421
+
422
+ logger.info("OpenAIVLM shutdown complete")
423
+
424
+
425
+ # Plugin registration
426
+ from homesec.plugins.analyzers import VLMPlugin, vlm_plugin
427
+
428
+
429
+ @vlm_plugin(name="openai")
430
+ def openai_vlm_plugin() -> VLMPlugin:
431
+ """OpenAI VLM plugin factory.
432
+
433
+ Returns:
434
+ VLMPlugin for OpenAI vision-language model
435
+ """
436
+ from homesec.models.vlm import OpenAILLMConfig, VLMConfig
437
+ from homesec.interfaces import VLMAnalyzer
438
+
439
+ def factory(cfg: VLMConfig) -> VLMAnalyzer:
440
+ return OpenAIVLM(cfg)
441
+
442
+ return VLMPlugin(
443
+ name="openai",
444
+ config_model=OpenAILLMConfig,
445
+ factory=factory,
446
+ )
@@ -0,0 +1,124 @@
1
+ """Filter plugins and registry."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import logging
6
+ from dataclasses import dataclass
7
+ from typing import Callable, TYPE_CHECKING, TypeVar
8
+
9
+ from pydantic import BaseModel
10
+
11
+ from homesec.interfaces import ObjectFilter
12
+
13
+ if TYPE_CHECKING:
14
+ from homesec.models.filter import FilterConfig
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+ # Type alias for clarity
19
+ FilterFactory = Callable[["FilterConfig"], ObjectFilter]
20
+
21
+
22
+ @dataclass(frozen=True)
23
+ class FilterPlugin:
24
+ """Metadata for a filter plugin."""
25
+
26
+ name: str
27
+ config_model: type[BaseModel]
28
+ factory: FilterFactory
29
+
30
+
31
+ FILTER_REGISTRY: dict[str, FilterPlugin] = {}
32
+
33
+
34
+ def register_filter(plugin: FilterPlugin) -> None:
35
+ """Register a filter plugin with collision detection.
36
+
37
+ Args:
38
+ plugin: Filter plugin to register
39
+
40
+ Raises:
41
+ ValueError: If a plugin with the same name is already registered
42
+ """
43
+ if plugin.name in FILTER_REGISTRY:
44
+ raise ValueError(
45
+ f"Filter plugin '{plugin.name}' is already registered. "
46
+ f"Plugin names must be unique across all filter plugins."
47
+ )
48
+ FILTER_REGISTRY[plugin.name] = plugin
49
+
50
+
51
+ T = TypeVar("T", bound=Callable[[], FilterPlugin])
52
+
53
+
54
+ def filter_plugin(name: str) -> Callable[[T], T]:
55
+ """Decorator to register a filter plugin.
56
+
57
+ Usage:
58
+ @filter_plugin(name="my_filter")
59
+ def my_filter_plugin() -> FilterPlugin:
60
+ return FilterPlugin(...)
61
+
62
+ Args:
63
+ name: Plugin name (for validation only - must match plugin.name)
64
+
65
+ Returns:
66
+ Decorator function that registers the plugin
67
+ """
68
+
69
+ def decorator(factory_fn: T) -> T:
70
+ plugin = factory_fn()
71
+ register_filter(plugin)
72
+ return factory_fn
73
+
74
+ return decorator
75
+
76
+
77
+ def load_filter_plugin(config: FilterConfig) -> ObjectFilter:
78
+ """Load filter plugin by name from config.
79
+
80
+ Validates the config dict against the plugin's config_model and creates
81
+ a FilterConfig with the validated settings object.
82
+
83
+ Args:
84
+ config: Filter configuration with plugin name and raw config dict
85
+
86
+ Returns:
87
+ Instantiated filter plugin
88
+
89
+ Raises:
90
+ ValueError: If plugin name is unknown or config validation fails
91
+ """
92
+ plugin_name = config.plugin.lower()
93
+
94
+ if plugin_name not in FILTER_REGISTRY:
95
+ available = ", ".join(sorted(FILTER_REGISTRY.keys()))
96
+ raise ValueError(
97
+ f"Unknown filter plugin: '{plugin_name}'. Available: {available}"
98
+ )
99
+
100
+ plugin = FILTER_REGISTRY[plugin_name]
101
+
102
+ # Validate config.config dict against plugin's config_model
103
+ validated_settings = plugin.config_model.model_validate(config.config)
104
+
105
+ # Create new FilterConfig with validated settings object
106
+ from homesec.models.filter import FilterConfig as FilterConfigModel
107
+
108
+ validated_config = FilterConfigModel(
109
+ plugin=config.plugin,
110
+ max_workers=config.max_workers,
111
+ config=validated_settings,
112
+ )
113
+
114
+ return plugin.factory(validated_config)
115
+
116
+
117
+ __all__ = [
118
+ "FilterPlugin",
119
+ "FilterFactory",
120
+ "FILTER_REGISTRY",
121
+ "register_filter",
122
+ "filter_plugin",
123
+ "load_filter_plugin",
124
+ ]