brynq-sdk-sage-germany 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. brynq_sdk_sage_germany/__init__.py +278 -0
  2. brynq_sdk_sage_germany/absences.py +175 -0
  3. brynq_sdk_sage_germany/allowances.py +100 -0
  4. brynq_sdk_sage_germany/contracts.py +145 -0
  5. brynq_sdk_sage_germany/cost_centers.py +89 -0
  6. brynq_sdk_sage_germany/employees.py +140 -0
  7. brynq_sdk_sage_germany/helpers.py +391 -0
  8. brynq_sdk_sage_germany/organization.py +90 -0
  9. brynq_sdk_sage_germany/payroll.py +167 -0
  10. brynq_sdk_sage_germany/payslips.py +106 -0
  11. brynq_sdk_sage_germany/salaries.py +95 -0
  12. brynq_sdk_sage_germany/schemas/__init__.py +44 -0
  13. brynq_sdk_sage_germany/schemas/absences.py +311 -0
  14. brynq_sdk_sage_germany/schemas/allowances.py +147 -0
  15. brynq_sdk_sage_germany/schemas/cost_centers.py +46 -0
  16. brynq_sdk_sage_germany/schemas/employees.py +487 -0
  17. brynq_sdk_sage_germany/schemas/organization.py +172 -0
  18. brynq_sdk_sage_germany/schemas/organization_assignment.py +61 -0
  19. brynq_sdk_sage_germany/schemas/payroll.py +287 -0
  20. brynq_sdk_sage_germany/schemas/payslips.py +34 -0
  21. brynq_sdk_sage_germany/schemas/salaries.py +101 -0
  22. brynq_sdk_sage_germany/schemas/start_end_dates.py +194 -0
  23. brynq_sdk_sage_germany/schemas/vacation_account.py +117 -0
  24. brynq_sdk_sage_germany/schemas/work_hours.py +94 -0
  25. brynq_sdk_sage_germany/start_end_dates.py +123 -0
  26. brynq_sdk_sage_germany/vacation_account.py +70 -0
  27. brynq_sdk_sage_germany/work_hours.py +97 -0
  28. brynq_sdk_sage_germany-1.0.0.dist-info/METADATA +21 -0
  29. brynq_sdk_sage_germany-1.0.0.dist-info/RECORD +31 -0
  30. brynq_sdk_sage_germany-1.0.0.dist-info/WHEEL +5 -0
  31. brynq_sdk_sage_germany-1.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,89 @@
1
+ """
2
+ Cost center endpoint implementations for Sage Germany.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ from typing import Any, Dict, List, Optional, Tuple, Union
8
+
9
+ import pandas as pd
10
+ from brynq_sdk_functions import Functions
11
+
12
+ from .schemas.cost_centers import CostCentersGet, CostCenterCreate
13
+
14
+
15
+ class CostCenters:
16
+ """
17
+ Handles cost-center-related operations scoped to companies.
18
+ """
19
+
20
+ def __init__(self, sage) -> None:
21
+ self.sage = sage
22
+ self.base_url = "/company/Kostenstellen"
23
+
24
+ def _collect_company_ids(self) -> List[int]:
25
+ """
26
+ Retrieve distinct company ids using the employee search endpoint.
27
+ """
28
+ records = self.sage._employee_search()
29
+ company_ids = {record["MdNr"] for record in records if record.get("MdNr") is not None}
30
+ return list(company_ids)
31
+
32
+ def get(self, company_ids: Optional[List[int]] = None) -> Tuple[pd.DataFrame, pd.DataFrame]:
33
+ """
34
+ Retrieve cost center entries for the provided companies.
35
+ """
36
+ try:
37
+ companies = company_ids or self._collect_company_ids()
38
+ if not companies:
39
+ return pd.DataFrame(), pd.DataFrame()
40
+
41
+ cost_center_records: List[Dict[str, Any]] = []
42
+ for company_id in companies:
43
+ response = self.sage.get(
44
+ path=self.base_url,
45
+ params={"MdNr": company_id},
46
+ )
47
+ response.raise_for_status()
48
+ payload = response.json()
49
+ if isinstance(payload, list):
50
+ cost_center_records.extend(payload)
51
+
52
+ if not cost_center_records:
53
+ return pd.DataFrame(), pd.DataFrame()
54
+
55
+ df = pd.json_normalize(cost_center_records, sep="__")
56
+
57
+ valid_data, invalid_data = Functions.validate_data(
58
+ df=df,
59
+ schema=CostCentersGet,
60
+ )
61
+ return valid_data, invalid_data
62
+ except Exception as exc:
63
+ raise RuntimeError("Failed to retrieve cost center data.") from exc
64
+
65
+ def create(self, data: Union[Dict[str, Any], List[Dict[str, Any]]]) -> Dict[str, Any]:
66
+ """
67
+ Create or update cost centers using bulk PUT.
68
+ """
69
+ try:
70
+ if isinstance(data, dict):
71
+ items = [data]
72
+ elif isinstance(data, list):
73
+ items = data
74
+ else:
75
+ raise ValueError("Cost center create payload must be a dictionary or list of dictionaries.")
76
+
77
+ payload = [
78
+ CostCenterCreate(**item).model_dump(by_alias=True, exclude_none=True, mode="json")
79
+ for item in items
80
+ ]
81
+
82
+ response = self.sage.put(
83
+ path=self.base_url,
84
+ body=payload,
85
+ )
86
+ response.raise_for_status()
87
+ return response
88
+ except Exception as exc:
89
+ raise RuntimeError("Failed to create or update Sage Germany cost centers.") from exc
@@ -0,0 +1,140 @@
1
+ """
2
+ Employees endpoint implementations for Sage Germany.
3
+ """
4
+
5
+ from typing import Any, Dict, Optional, Tuple
6
+
7
+ import pandas as pd
8
+ from brynq_sdk_functions import Functions
9
+
10
+ from .schemas.employees import EmployeeUpdateRequest, EmployeeCreateForm, EmployeesGet
11
+ from .absences import Absences
12
+ from .allowances import Allowances
13
+ from .salaries import Salary
14
+ from .start_end_dates import StartEndDates
15
+ from .work_hours import WorkHours
16
+ from .vacation_account import VacationAccount
17
+ from .contracts import Contracts
18
+ from .helpers import sage_flat_to_nested_with_prefix
19
+ from .organization import Organization
20
+
21
+
22
+ class Employees:
23
+ """
24
+ Handles employee-related operations.
25
+ """
26
+
27
+ def __init__(self, sage) -> None:
28
+ self.sage = sage
29
+ self.absences = Absences(sage)
30
+ self.allowances = Allowances(sage)
31
+ self.salary = Salary(sage)
32
+ self.start_end_dates = StartEndDates(sage)
33
+ self.work_hours = WorkHours(sage)
34
+ self.vacation_account = VacationAccount(sage)
35
+ self.contracts = Contracts(sage)
36
+ self.organization = Organization(sage)
37
+
38
+ def _fetch_employee_record(self, mdnr: int, annr: int) -> Dict[str, Any]:
39
+ """
40
+ Fetch a single employee detail payload from the API.
41
+ """
42
+ response = self.sage.get(
43
+ path="/employeenew/person/personal",
44
+ params={"MdNr": mdnr, "AnNr": annr},
45
+ )
46
+ response.raise_for_status()
47
+ return response.json()
48
+
49
+
50
+ def get(
51
+ self,
52
+ employee_id: Optional[int] = None,
53
+ company_id: Optional[int] = None,
54
+ ) -> Tuple[pd.DataFrame, pd.DataFrame]:
55
+ """
56
+ Retrieve employee details, flatten them, and validate using Pandera schemas.
57
+ """
58
+ try:
59
+ if employee_id is not None or company_id is not None:
60
+ if employee_id is None or company_id is None:
61
+ raise ValueError(
62
+ "Both company_id and employee_id are required to fetch a single employee."
63
+ )
64
+ try:
65
+ data = self._fetch_employee_record(mdnr=company_id, annr=employee_id)
66
+ if not data:
67
+ return pd.DataFrame(), pd.DataFrame()
68
+ except Exception as exc: # pragma: no cover - upstream errors
69
+ raise RuntimeError("Failed to fetch employee record.") from exc
70
+ records = [data]
71
+ else:
72
+ employees = self.sage._employee_search()
73
+ if not employees:
74
+ return pd.DataFrame(), pd.DataFrame()
75
+
76
+ records = [
77
+ self._fetch_employee_record(mdnr=item["MdNr"], annr=item["AnNr"])
78
+ for item in employees
79
+ ]
80
+ records = [record for record in records if record]
81
+
82
+ if not records:
83
+ return pd.DataFrame(), pd.DataFrame()
84
+
85
+ df = pd.json_normalize(records, sep="__")
86
+
87
+ valid_data, invalid_data = Functions.validate_data(
88
+ df=df,
89
+ schema=EmployeesGet,
90
+ )
91
+ return valid_data, invalid_data
92
+ except Exception as exc: # pragma: no cover - upstream errors
93
+ raise RuntimeError("Failed to retrieve employee data.") from exc
94
+
95
+ def create(self, data: Dict[str, Any]) -> Dict[str, Any]:
96
+ """
97
+ Create a new employee using the EmployeeNew Neuanlage endpoint.
98
+ """
99
+
100
+ try:
101
+ valid_data = EmployeeCreateForm(**data)
102
+ payload = valid_data.model_dump(by_alias=True, exclude_none=True, mode="json")
103
+
104
+ response = self.sage.post(
105
+ path="/EmployeeNew/Neuanlage",
106
+ data=payload,
107
+ headers={"Content-Type": "application/x-www-form-urlencoded"}
108
+ )
109
+ response.raise_for_status()
110
+ return response
111
+ except Exception as exc:
112
+ raise RuntimeError("Failed to create Sage Germany employee.") from exc
113
+
114
+ def update(self, data: Dict[str, Any]) -> Dict[str, Any]:
115
+ """
116
+ Update an existing employee personal record in Sage Germany.
117
+
118
+ Args:
119
+ data: Flat dictionary that matches `EmployeeUpdateRequest` field names
120
+ using prefixes such as `person_`, `address_`, `identity_`, etc.
121
+
122
+ Returns:
123
+ Dict[str, Any]: Raw response JSON returned by the Sage API.
124
+ """
125
+ try:
126
+ nested_payload = sage_flat_to_nested_with_prefix(data, EmployeeUpdateRequest)
127
+ validated = EmployeeUpdateRequest(**nested_payload)
128
+ request_body = validated.model_dump(by_alias=True, exclude_none=True, mode="json")
129
+ except ValidationError as exc:
130
+ raise ValueError(f"Invalid employee update payload: {exc}") from exc
131
+
132
+ try:
133
+ response = self.sage.post(
134
+ path="/employeenew/person/personal",
135
+ body=request_body,
136
+ )
137
+ response.raise_for_status()
138
+ return response
139
+ except Exception as exc:
140
+ raise RuntimeError("Failed to update Sage Germany employee.") from exc
@@ -0,0 +1,391 @@
1
+ """
2
+ Helper utilities specific to the Sage Germany SDK.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ import re
8
+ from typing import Any, Dict, List, Optional, Type, get_args, get_origin
9
+
10
+ from pydantic import BaseModel
11
+
12
+
13
+ def _extract_basemodel(annotation: Any) -> Optional[Type[BaseModel]]:
14
+ """
15
+ Return the BaseModel class from an annotation if present.
16
+ """
17
+ if isinstance(annotation, type) and issubclass(annotation, BaseModel):
18
+ return annotation
19
+
20
+ origin = get_origin(annotation)
21
+ if origin:
22
+ for arg in get_args(annotation):
23
+ if isinstance(arg, type) and issubclass(arg, BaseModel):
24
+ return arg
25
+ return None
26
+
27
+
28
+ def _get_prefix(field_info) -> Optional[str]:
29
+ """
30
+ Read json_schema_extra prefix metadata if available.
31
+ """
32
+ if field_info.json_schema_extra and isinstance(field_info.json_schema_extra, dict):
33
+ return field_info.json_schema_extra.get("prefix")
34
+ return None
35
+
36
+
37
+ def _collect_indexed_prefixes(flat_dict: Dict[str, Any], prefix: str) -> List[int]:
38
+ """
39
+ Extract list indices for list-based payloads (e.g., addresses_0_city).
40
+ """
41
+ pattern = re.compile(rf"^{re.escape(prefix)}(\d+)_")
42
+ indices: List[int] = []
43
+ for key in flat_dict:
44
+ match = pattern.match(key)
45
+ if match:
46
+ indices.append(int(match.group(1)))
47
+ return sorted(set(indices))
48
+
49
+
50
+ def sage_flat_to_nested_with_prefix(
51
+ flat_dict: Dict[str, Any],
52
+ model: Type[BaseModel],
53
+ ) -> Dict[str, Any]:
54
+ """
55
+ Convert a flat Sage Germany payload into the nested structure defined by the schema.
56
+
57
+ Differences from the generic implementation:
58
+ - Nested prefixes are treated as absolute so Sage-specific prefixes such as
59
+ `person_title_` are not concatenated twice.
60
+ - Falls back to parent prefixes only when a nested field does not define its own prefix.
61
+ """
62
+
63
+ def process_model(model_cls: Type[BaseModel], parent_prefix: str = "") -> Dict[str, Any]:
64
+ nested: Dict[str, Any] = {}
65
+
66
+ for field_name, field_info in model_cls.model_fields.items():
67
+ alias = field_info.alias or field_name
68
+ field_type = field_info.annotation
69
+ prefix_hint = _get_prefix(field_info)
70
+ effective_prefix = prefix_hint if prefix_hint is not None else parent_prefix
71
+
72
+ origin = get_origin(field_type)
73
+ if origin is list:
74
+ list_model = _extract_basemodel(field_type)
75
+ if list_model and effective_prefix:
76
+ list_items = process_list(list_model, effective_prefix)
77
+ if list_items:
78
+ nested[alias] = list_items
79
+ continue
80
+
81
+ nested_model = _extract_basemodel(field_type)
82
+ if nested_model:
83
+ child_payload = process_model(nested_model, effective_prefix)
84
+ if child_payload:
85
+ nested[alias] = child_payload
86
+ continue
87
+
88
+ key_candidates = []
89
+ if effective_prefix:
90
+ key_candidates.append(f"{effective_prefix}{field_name}")
91
+ key_candidates.append(field_name)
92
+
93
+ for key in key_candidates:
94
+ if key in flat_dict and flat_dict[key] is not None:
95
+ nested[alias] = flat_dict[key]
96
+ break
97
+
98
+ return nested
99
+
100
+ def process_list(model_cls: Type[BaseModel], prefix: str) -> Optional[List[Dict[str, Any]]]:
101
+ indices = _collect_indexed_prefixes(flat_dict, prefix)
102
+ if not indices:
103
+ return None
104
+
105
+ items: List[Dict[str, Any]] = []
106
+ for index in indices:
107
+ index_prefix = f"{prefix}{index}_"
108
+ payload = process_model(model_cls, index_prefix)
109
+ if payload:
110
+ items.append(payload)
111
+ return items or None
112
+
113
+ return process_model(model)
114
+
115
+
116
+ def organization_flat_to_nested(flat: Dict[str, Any]) -> Dict[str, Any]:
117
+ """
118
+ Organization-specific flat-to-nested converter.
119
+ Expects prefixes like cost_center_0_id, org_assign_0_structure_id, fixed_0_cost_center_id, etc.
120
+ """
121
+
122
+ def pop_prefix(prefix: str) -> Dict[str, Any]:
123
+ return {k[len(prefix):]: v for k, v in flat.items() if k.startswith(prefix) and v is not None}
124
+
125
+ nested: Dict[str, Any] = {}
126
+
127
+ # Key
128
+ key_fields = pop_prefix("key_")
129
+ if key_fields:
130
+ nested["Key"] = {
131
+ "Date": key_fields.get("date"),
132
+ "MdNr": key_fields.get("company_id"),
133
+ "AnNr": key_fields.get("employee_number"),
134
+ "CombinedKey": key_fields.get("combined_key"),
135
+ }
136
+
137
+ # Flags
138
+ if "is_fixed_cost_split" in flat:
139
+ nested["IstFesteKostenAufteilung"] = flat.get("is_fixed_cost_split")
140
+
141
+ # General data
142
+ general = pop_prefix("general_")
143
+ if general:
144
+ def id_text(prefix: str) -> Optional[Dict[str, Any]]:
145
+ if general.get(f"{prefix}id") is None and general.get(f"{prefix}text") is None:
146
+ return None
147
+ return {"Id": general.get(f"{prefix}id"), "Text": general.get(f"{prefix}text")}
148
+
149
+ nested["AllgemeineDaten"] = {
150
+ "VerwendungBetriebsstaetten": general.get("use_operating_sites"),
151
+ "Abrechnungskreis": id_text("payroll_group_"),
152
+ "Abrechnungslauf": id_text("payroll_run_"),
153
+ "Bundesland": id_text("state_"),
154
+ "Betriebsstaette": id_text("site_"),
155
+ "DebitorenNummer": general.get("debtor_number"),
156
+ "KalkulatorischeKostenstelle": id_text("calc_cost_center_"),
157
+ "Position": id_text("position_"),
158
+ }
159
+
160
+ # Search terms
161
+ search = pop_prefix("search_terms_")
162
+ if search:
163
+ nested["Suchbegriffe"] = {
164
+ "Such1": search.get("search_1"),
165
+ "Such2": search.get("search_2"),
166
+ "Such3": search.get("search_3"),
167
+ "Such4": search.get("search_4"),
168
+ }
169
+
170
+ def build_split(prefix: str) -> Optional[List[Dict[str, Any]]]:
171
+ items: Dict[int, Dict[str, Any]] = {}
172
+ for k, v in flat.items():
173
+ if not k.startswith(prefix):
174
+ continue
175
+ remainder = k[len(prefix):]
176
+ parts = remainder.split("_", 1)
177
+ if len(parts) != 2:
178
+ continue
179
+ try:
180
+ idx = int(parts[0])
181
+ except ValueError:
182
+ continue
183
+ field = parts[1]
184
+ items.setdefault(idx, {})[field] = v
185
+ if not items:
186
+ return None
187
+ out: List[Dict[str, Any]] = []
188
+ for idx in sorted(items):
189
+ entry = items[idx]
190
+ out.append(
191
+ {
192
+ "Id": entry.get("id"),
193
+ "Number": entry.get("number"),
194
+ "Text": entry.get("text"),
195
+ "Anteil": entry.get("portion"),
196
+ }
197
+ )
198
+ return out or None
199
+
200
+ cc = build_split("cost_center_")
201
+ if cc:
202
+ nested["Kostenstellen"] = {"Aufteilung": cc}
203
+
204
+ cu = build_split("cost_unit_")
205
+ if cu:
206
+ nested["Kostentraeger"] = {"Aufteilung": cu}
207
+
208
+ d1 = build_split("dimension_one_")
209
+ if d1:
210
+ nested["Dimension1"] = {"Aufteilung": d1}
211
+ d2 = build_split("dimension_two_")
212
+ if d2:
213
+ nested["Dimension2"] = {"Aufteilung": d2}
214
+ d3 = build_split("dimension_three_")
215
+ if d3:
216
+ nested["Dimension3"] = {"Aufteilung": d3}
217
+
218
+ # Org assignments
219
+ org_items: Dict[int, Dict[str, Any]] = {}
220
+ for k, v in flat.items():
221
+ if not k.startswith("org_assign_"):
222
+ continue
223
+ remainder = k[len("org_assign_"):]
224
+ parts = remainder.split("_", 1)
225
+ if len(parts) != 2:
226
+ continue
227
+ try:
228
+ idx = int(parts[0])
229
+ except ValueError:
230
+ continue
231
+ field = parts[1]
232
+ org_items.setdefault(idx, {})[field] = v
233
+
234
+ if org_items:
235
+ nested["OrgaEinordnungen"] = []
236
+ for idx in sorted(org_items):
237
+ entry = org_items[idx]
238
+ nested["OrgaEinordnungen"].append(
239
+ {
240
+ "Id": entry.get("id"),
241
+ "Struktur": {"Id": entry.get("structure_id"), "Text": entry.get("structure_text")},
242
+ "Einheit": {"Id": entry.get("unit_id"), "Text": entry.get("unit_text")},
243
+ "ValidFrom": entry.get("valid_from"),
244
+ "ValidTo": entry.get("valid_to"),
245
+ }
246
+ )
247
+
248
+ # Fixed distribution
249
+ fixed_items: Dict[int, Dict[str, Any]] = {}
250
+ for k, v in flat.items():
251
+ if not k.startswith("fixed_"):
252
+ continue
253
+ remainder = k[len("fixed_"):]
254
+ parts = remainder.split("_", 1)
255
+ if len(parts) != 2:
256
+ continue
257
+ try:
258
+ idx = int(parts[0])
259
+ except ValueError:
260
+ continue
261
+ field = parts[1]
262
+ fixed_items.setdefault(idx, {})[field] = v
263
+
264
+ if fixed_items:
265
+ nested["FesteAufteilung"] = []
266
+ for idx in sorted(fixed_items):
267
+ entry = fixed_items[idx]
268
+ nested["FesteAufteilung"].append(
269
+ {
270
+ "Id": entry.get("id"),
271
+ "Kostenstelle": {"Id": entry.get("cost_center_id"), "Text": entry.get("cost_center_text")},
272
+ "Kostentraeger": {"Id": entry.get("cost_unit_id"), "Text": entry.get("cost_unit_text")},
273
+ "Dimension1": {"Id": entry.get("dimension_one_id"), "Text": entry.get("dimension_one_text")},
274
+ "Dimension2": {"Id": entry.get("dimension_two_id"), "Text": entry.get("dimension_two_text")},
275
+ "Dimension3": {"Id": entry.get("dimension_three_id"), "Text": entry.get("dimension_three_text")},
276
+ "Anteil": entry.get("portion"),
277
+ }
278
+ )
279
+
280
+ # Clean empties
281
+ def prune(obj: Any) -> Any:
282
+ if isinstance(obj, dict):
283
+ cleaned = {k: prune(v) for k, v in obj.items()}
284
+ return {k: v for k, v in cleaned.items() if v not in (None, {}, [])}
285
+ if isinstance(obj, list):
286
+ cleaned_list = [prune(i) for i in obj]
287
+ return [i for i in cleaned_list if i not in (None, {}, [])]
288
+ return obj
289
+
290
+ return prune(nested)
291
+
292
+
293
+ def start_end_flat_to_nested(
294
+ flat_dict: Dict[str, Any],
295
+ model: Type[BaseModel],
296
+ ) -> Dict[str, Any]:
297
+ """
298
+ Local flat-to-nested converter supporting Optional[List[BaseModel]] for start/end dates.
299
+ Keeps the shared helper unchanged to avoid side effects in other endpoints.
300
+ """
301
+
302
+ def _extract_basemodel(annotation: Any) -> Optional[Type[BaseModel]]:
303
+ if isinstance(annotation, type) and issubclass(annotation, BaseModel):
304
+ return annotation
305
+ origin = get_origin(annotation)
306
+ if origin:
307
+ for arg in get_args(annotation):
308
+ if isinstance(arg, type) and issubclass(arg, BaseModel):
309
+ return arg
310
+ return None
311
+
312
+ def _extract_list_model(annotation: Any) -> Optional[Type[BaseModel]]:
313
+ origin = get_origin(annotation)
314
+ if origin is list:
315
+ return _extract_basemodel(get_args(annotation)[0])
316
+ if origin:
317
+ for arg in get_args(annotation):
318
+ arg_origin = get_origin(arg)
319
+ if arg_origin is list:
320
+ return _extract_basemodel(get_args(arg)[0])
321
+ return None
322
+
323
+ def _get_prefix(field_info) -> Optional[str]:
324
+ if field_info.json_schema_extra and isinstance(field_info.json_schema_extra, dict):
325
+ return field_info.json_schema_extra.get("prefix")
326
+ return None
327
+
328
+ def _collect_indexed_prefixes(prefix: str) -> List[int]:
329
+ pattern = re.compile(rf"^{re.escape(prefix)}(\d+)_")
330
+ indices: List[int] = []
331
+ for key in flat_dict:
332
+ match = pattern.match(key)
333
+ if match:
334
+ indices.append(int(match.group(1)))
335
+ return sorted(set(indices))
336
+
337
+ def process_model(model_cls: Type[BaseModel], parent_prefix: str = "") -> Dict[str, Any]:
338
+ nested: Dict[str, Any] = {}
339
+ for field_name, field_info in model_cls.model_fields.items():
340
+ alias = field_info.alias or field_name
341
+ field_type = field_info.annotation
342
+ prefix_hint = _get_prefix(field_info)
343
+ effective_prefix = parent_prefix
344
+ if prefix_hint:
345
+ if parent_prefix and prefix_hint.startswith("entry_exit_"):
346
+ effective_prefix = prefix_hint.replace("entry_exit_", parent_prefix, 1)
347
+ else:
348
+ effective_prefix = prefix_hint
349
+
350
+ list_model = _extract_list_model(field_type)
351
+ if list_model and effective_prefix:
352
+ list_items = process_list(list_model, effective_prefix)
353
+ if list_items:
354
+ nested[alias] = list_items
355
+ continue
356
+
357
+ nested_model = _extract_basemodel(field_type)
358
+ if nested_model:
359
+ child_payload = process_model(nested_model, effective_prefix)
360
+ if child_payload:
361
+ nested[alias] = child_payload
362
+ continue
363
+
364
+ key_candidates = []
365
+ if effective_prefix:
366
+ key_candidates.append(f"{effective_prefix}{field_name}")
367
+ key_candidates.append(field_name)
368
+
369
+ for key in key_candidates:
370
+ if key in flat_dict and flat_dict[key] is not None:
371
+ nested[alias] = flat_dict[key]
372
+ break
373
+
374
+ return nested
375
+
376
+ def process_list(model_cls: Type[BaseModel], prefix: str) -> Optional[List[Dict[str, Any]]]:
377
+ indices = _collect_indexed_prefixes(prefix)
378
+ if not indices:
379
+ return None
380
+ items: List[Dict[str, Any]] = []
381
+ for index in indices:
382
+ index_prefix = f"{prefix}{index}_"
383
+ payload = process_model(model_cls, index_prefix)
384
+ if payload:
385
+ items.append(payload)
386
+ return items or None
387
+
388
+ return process_model(model)
389
+
390
+
391
+ __all__ = ["sage_flat_to_nested_with_prefix", "organization_flat_to_nested", "start_end_flat_to_nested"]