python-infrakit-dev 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. infrakit/__init__.py +0 -0
  2. infrakit/cli/__init__.py +1 -0
  3. infrakit/cli/commands/__init__.py +1 -0
  4. infrakit/cli/commands/deps.py +530 -0
  5. infrakit/cli/commands/init.py +129 -0
  6. infrakit/cli/commands/llm.py +295 -0
  7. infrakit/cli/commands/logger.py +160 -0
  8. infrakit/cli/commands/module.py +342 -0
  9. infrakit/cli/commands/time.py +81 -0
  10. infrakit/cli/main.py +65 -0
  11. infrakit/core/__init__.py +0 -0
  12. infrakit/core/config/__init__.py +0 -0
  13. infrakit/core/config/converter.py +480 -0
  14. infrakit/core/config/exporter.py +304 -0
  15. infrakit/core/config/loader.py +713 -0
  16. infrakit/core/config/validator.py +389 -0
  17. infrakit/core/logger/__init__.py +21 -0
  18. infrakit/core/logger/formatters.py +143 -0
  19. infrakit/core/logger/handlers.py +322 -0
  20. infrakit/core/logger/retention.py +176 -0
  21. infrakit/core/logger/setup.py +314 -0
  22. infrakit/deps/__init__.py +239 -0
  23. infrakit/deps/clean.py +141 -0
  24. infrakit/deps/depfile.py +405 -0
  25. infrakit/deps/health.py +357 -0
  26. infrakit/deps/optimizer.py +642 -0
  27. infrakit/deps/scanner.py +550 -0
  28. infrakit/llm/__init__.py +35 -0
  29. infrakit/llm/batch.py +165 -0
  30. infrakit/llm/client.py +575 -0
  31. infrakit/llm/key_manager.py +728 -0
  32. infrakit/llm/llm_readme.md +306 -0
  33. infrakit/llm/models.py +148 -0
  34. infrakit/llm/providers/__init__.py +5 -0
  35. infrakit/llm/providers/base.py +112 -0
  36. infrakit/llm/providers/gemini.py +164 -0
  37. infrakit/llm/providers/openai.py +168 -0
  38. infrakit/llm/rate_limiter.py +54 -0
  39. infrakit/scaffolder/__init__.py +31 -0
  40. infrakit/scaffolder/ai.py +508 -0
  41. infrakit/scaffolder/backend.py +555 -0
  42. infrakit/scaffolder/cli_tool.py +386 -0
  43. infrakit/scaffolder/generator.py +338 -0
  44. infrakit/scaffolder/pipeline.py +562 -0
  45. infrakit/scaffolder/registry.py +121 -0
  46. infrakit/time/__init__.py +60 -0
  47. infrakit/time/profiler.py +511 -0
  48. python_infrakit_dev-0.1.0.dist-info/METADATA +124 -0
  49. python_infrakit_dev-0.1.0.dist-info/RECORD +51 -0
  50. python_infrakit_dev-0.1.0.dist-info/WHEEL +4 -0
  51. python_infrakit_dev-0.1.0.dist-info/entry_points.txt +3 -0
@@ -0,0 +1,389 @@
1
+ """
2
+ infrakit.core.config.validator
3
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4
+ Validate a loaded config dict against a Pydantic model or a lightweight
5
+ field-spec schema, with clear, structured error reporting.
6
+
7
+ Usage:
8
+ # --- Pydantic model ---
9
+ from pydantic import BaseModel
10
+ from infrakit.core.config.validator import validate, ValidationResult
11
+
12
+ class AppConfig(BaseModel):
13
+ host: str
14
+ port: int
15
+ debug: bool = False
16
+
17
+ result = validate({"host": "localhost", "port": 8080}, AppConfig)
18
+ if result.ok:
19
+ cfg = result.data # AppConfig instance, fully typed
20
+ else:
21
+ print(result.errors) # list[FieldError]
22
+
23
+ # --- Dict schema (no Pydantic model needed) ---
24
+ from infrakit.core.config.validator import Schema, field
25
+
26
+ schema = Schema({
27
+ "host": field(str, required=True),
28
+ "port": field(int, required=True),
29
+ "debug": field(bool, required=False, default=False),
30
+ })
31
+ result = schema.validate({"host": "localhost", "port": 8080})
32
+ """
33
+
34
+ from __future__ import annotations
35
+
36
+ import traceback
37
+ from dataclasses import dataclass, field as dc_field
38
+ from typing import Any, Generic, TypeVar
39
+
40
+ from pydantic import BaseModel, ValidationError
41
+
42
+ # ---------------------------------------------------------------------------
43
+ # Public types
44
+ # ---------------------------------------------------------------------------
45
+
46
+ ConfigDict = dict[str, Any]
47
+ T = TypeVar("T", bound=BaseModel)
48
+
49
+
50
+ # ---------------------------------------------------------------------------
51
+ # Error types
52
+ # ---------------------------------------------------------------------------
53
+
54
+ @dataclass(frozen=True)
55
+ class FieldError:
56
+ """A single validation failure on one field."""
57
+ field: str # dot-separated path, e.g. "database.port"
58
+ message: str # human-readable reason
59
+ value: Any = None # the offending value (None if field was missing)
60
+
61
+ def __str__(self) -> str:
62
+ location = f"[{self.field}]" if self.field else "[root]"
63
+ suffix = f" (got {self.value!r})" if self.value is not None else ""
64
+ return f"{location} {self.message}{suffix}"
65
+
66
+
67
+ # ---------------------------------------------------------------------------
68
+ # ValidationResult
69
+ # ---------------------------------------------------------------------------
70
+
71
+ @dataclass
72
+ class ValidationResult(Generic[T]):
73
+ """Outcome of a validate() call.
74
+
75
+ Attributes
76
+ ----------
77
+ ok:
78
+ True if validation passed with no errors.
79
+ data:
80
+ The validated model instance (Pydantic) or cast dict (Schema).
81
+ ``None`` when *ok* is False.
82
+ errors:
83
+ List of :class:`FieldError` instances. Empty when *ok* is True.
84
+ """
85
+ ok: bool
86
+ data: T | ConfigDict | None
87
+ errors: list[FieldError] = dc_field(default_factory=list)
88
+
89
+ def raise_on_error(self) -> None:
90
+ """Raise :exc:`ConfigValidationError` if validation failed."""
91
+ if not self.ok:
92
+ raise ConfigValidationError(self.errors)
93
+
94
+ def __bool__(self) -> bool:
95
+ return self.ok
96
+
97
+ def summary(self) -> str:
98
+ """Return a human-readable multi-line summary of all errors."""
99
+ if self.ok:
100
+ return "Validation passed."
101
+ lines = [f"Validation failed with {len(self.errors)} error(s):"]
102
+ for err in self.errors:
103
+ lines.append(f" {err}")
104
+ return "\n".join(lines)
105
+
106
+
107
+ # ---------------------------------------------------------------------------
108
+ # Exceptions
109
+ # ---------------------------------------------------------------------------
110
+
111
+ class ConfigValidationError(Exception):
112
+ """Raised by :meth:`ValidationResult.raise_on_error` when validation fails."""
113
+
114
+ def __init__(self, errors: list[FieldError]) -> None:
115
+ self.errors = errors
116
+ super().__init__(self._format(errors))
117
+
118
+ @staticmethod
119
+ def _format(errors: list[FieldError]) -> str:
120
+ lines = [f"Config validation failed ({len(errors)} error(s)):"]
121
+ for err in errors:
122
+ lines.append(f" {err}")
123
+ return "\n".join(lines)
124
+
125
+
126
+ # ---------------------------------------------------------------------------
127
+ # Pydantic-based validation
128
+ # ---------------------------------------------------------------------------
129
+
130
+ def validate(data: ConfigDict, model: type[T]) -> ValidationResult[T]:
131
+ """Validate *data* against a Pydantic *model*.
132
+
133
+ Parameters
134
+ ----------
135
+ data:
136
+ A plain config dict, as returned by :func:`~infrakit.core.config.loader.load`.
137
+ model:
138
+ A :class:`pydantic.BaseModel` subclass describing the expected shape.
139
+
140
+ Returns
141
+ -------
142
+ ValidationResult[T]
143
+ ``result.ok`` is True and ``result.data`` is the model instance on
144
+ success. On failure, ``result.errors`` contains one :class:`FieldError`
145
+ per invalid field.
146
+
147
+ Examples
148
+ --------
149
+ >>> class DB(BaseModel):
150
+ ... host: str
151
+ ... port: int
152
+ >>> result = validate({"host": "localhost", "port": 5432}, DB)
153
+ >>> result.ok
154
+ True
155
+ >>> result.data.port
156
+ 5432
157
+ """
158
+ try:
159
+ instance = model.model_validate(data)
160
+ return ValidationResult(ok=True, data=instance, errors=[])
161
+ except ValidationError as exc:
162
+ errors = _parse_pydantic_errors(exc)
163
+ return ValidationResult(ok=False, data=None, errors=errors)
164
+ except Exception as exc:
165
+ # Catch-all for unexpected failures (e.g. a broken model __init__)
166
+ err = FieldError(field="", message=f"Unexpected error: {exc}")
167
+ return ValidationResult(ok=False, data=None, errors=[err])
168
+
169
+
170
+ def _parse_pydantic_errors(exc: ValidationError) -> list[FieldError]:
171
+ """Convert a Pydantic ValidationError into a list of FieldError."""
172
+ errors: list[FieldError] = []
173
+ for raw in exc.errors():
174
+ # loc is a tuple like ("database", "port") or ("host",)
175
+ loc = raw.get("loc", ())
176
+ field_path = ".".join(str(part) for part in loc) if loc else ""
177
+ message = raw.get("msg", "Invalid value")
178
+ value = raw.get("input", None)
179
+ errors.append(FieldError(field=field_path, message=message, value=value))
180
+ return errors
181
+
182
+
183
+ # ---------------------------------------------------------------------------
184
+ # Lightweight dict schema
185
+ # ---------------------------------------------------------------------------
186
+
187
+ @dataclass
188
+ class FieldSpec:
189
+ """Specification for a single config field in a :class:`Schema`.
190
+
191
+ Attributes
192
+ ----------
193
+ type_:
194
+ Expected Python type. ``None`` means any type is accepted.
195
+ required:
196
+ If True and the key is missing from the config dict, validation fails.
197
+ default:
198
+ Value used when the field is absent and *required* is False.
199
+ choices:
200
+ If provided, the value must be one of these options.
201
+ description:
202
+ Optional human-readable description (used in error messages and export).
203
+ """
204
+ type_: type | None = None
205
+ required: bool = True
206
+ default: Any = None
207
+ choices: list[Any] | None = None
208
+ description: str = ""
209
+
210
+
211
+ def field(
212
+ type_: type | None = None,
213
+ *,
214
+ required: bool = True,
215
+ default: Any = None,
216
+ choices: list[Any] | None = None,
217
+ description: str = "",
218
+ ) -> FieldSpec:
219
+ """Convenience constructor for :class:`FieldSpec`.
220
+
221
+ Examples
222
+ --------
223
+ >>> schema = Schema({
224
+ ... "host": field(str, required=True),
225
+ ... "port": field(int, required=True),
226
+ ... "debug": field(bool, required=False, default=False),
227
+ ... "env": field(str, choices=["dev", "prod", "test"]),
228
+ ... })
229
+ """
230
+ return FieldSpec(
231
+ type_=type_,
232
+ required=required,
233
+ default=default,
234
+ choices=choices,
235
+ description=description,
236
+ )
237
+
238
+
239
+ class Schema:
240
+ """A lightweight schema for validating config dicts without Pydantic.
241
+
242
+ Useful when you want quick validation without defining a full model class,
243
+ or when the config shape is determined at runtime.
244
+
245
+ Parameters
246
+ ----------
247
+ fields:
248
+ Mapping of field name -> :class:`FieldSpec` (use :func:`field` helper).
249
+ allow_extra:
250
+ If False (default), unknown keys in the config dict are reported as
251
+ errors. If True, extra keys are silently ignored.
252
+
253
+ Examples
254
+ --------
255
+ >>> schema = Schema({"port": field(int), "host": field(str)})
256
+ >>> result = schema.validate({"port": 8080, "host": "localhost"})
257
+ >>> result.ok
258
+ True
259
+ """
260
+
261
+ def __init__(
262
+ self,
263
+ fields: dict[str, FieldSpec],
264
+ *,
265
+ allow_extra: bool = False,
266
+ ) -> None:
267
+ self._fields = fields
268
+ self._allow_extra = allow_extra
269
+
270
+ def validate(self, data: ConfigDict) -> ValidationResult:
271
+ """Validate *data* against this schema.
272
+
273
+ Returns
274
+ -------
275
+ ValidationResult
276
+ ``result.data`` on success is a new dict with defaults filled in
277
+ and values coerced to their declared types where possible.
278
+ """
279
+ errors: list[FieldError] = []
280
+ result: ConfigDict = {}
281
+
282
+ # Check declared fields
283
+ for name, spec in self._fields.items():
284
+ if name not in data:
285
+ if spec.required:
286
+ errors.append(FieldError(
287
+ field=name,
288
+ message="Required field is missing.",
289
+ ))
290
+ else:
291
+ result[name] = spec.default
292
+ continue
293
+
294
+ value = data[name]
295
+
296
+ # Type check + coerce
297
+ if spec.type_ is not None:
298
+ value, type_error = _coerce(name, value, spec.type_)
299
+ if type_error:
300
+ errors.append(type_error)
301
+ continue
302
+
303
+ # Choices check
304
+ if spec.choices is not None and value not in spec.choices:
305
+ errors.append(FieldError(
306
+ field=name,
307
+ message=f"Must be one of {spec.choices}.",
308
+ value=value,
309
+ ))
310
+ continue
311
+
312
+ result[name] = value
313
+
314
+ # Check for unknown keys
315
+ if not self._allow_extra:
316
+ for key in data:
317
+ if key not in self._fields:
318
+ errors.append(FieldError(
319
+ field=key,
320
+ message="Unknown field (not declared in schema).",
321
+ value=data[key],
322
+ ))
323
+
324
+ if errors:
325
+ return ValidationResult(ok=False, data=None, errors=errors)
326
+ return ValidationResult(ok=True, data=result, errors=[])
327
+
328
+
329
+ # ---------------------------------------------------------------------------
330
+ # Type coercion helper
331
+ # ---------------------------------------------------------------------------
332
+
333
+ # Types we attempt to cast automatically (safe, lossless conversions only)
334
+ _SAFE_COERCIONS: dict[type, tuple[type, ...]] = {
335
+ int: (str, float), # "8080" -> 8080, 8080.0 -> 8080
336
+ float: (str, int), # "3.14" -> 3.14, 3 -> 3.0
337
+ bool: (str,), # "true" -> True (via cast_value)
338
+ str: (int, float, bool), # 42 -> "42"
339
+ }
340
+
341
+ _BOOL_TRUE = {"true", "yes", "on", "1"}
342
+ _BOOL_FALSE = {"false", "no", "off", "0"}
343
+
344
+
345
+ def _coerce(
346
+ name: str,
347
+ value: Any,
348
+ expected: type,
349
+ ) -> tuple[Any, FieldError | None]:
350
+ """Attempt to coerce *value* to *expected* type.
351
+
352
+ Returns (coerced_value, None) on success,
353
+ (value, FieldError) on failure.
354
+ """
355
+ if isinstance(value, expected):
356
+ # bool is a subclass of int — guard against that
357
+ if expected is int and isinstance(value, bool):
358
+ return value, FieldError(
359
+ field=name,
360
+ message=f"Expected int, got bool.",
361
+ value=value,
362
+ )
363
+ return value, None
364
+
365
+ allowed_from = _SAFE_COERCIONS.get(expected, ())
366
+ if not isinstance(value, allowed_from):
367
+ return value, FieldError(
368
+ field=name,
369
+ message=f"Expected {expected.__name__}, got {type(value).__name__}.",
370
+ value=value,
371
+ )
372
+
373
+ try:
374
+ if expected is bool:
375
+ if isinstance(value, str):
376
+ lower = value.strip().lower()
377
+ if lower in _BOOL_TRUE:
378
+ return True, None
379
+ if lower in _BOOL_FALSE:
380
+ return False, None
381
+ raise ValueError(f"Cannot interpret {value!r} as bool")
382
+ coerced = expected(value)
383
+ return coerced, None
384
+ except (ValueError, TypeError) as exc:
385
+ return value, FieldError(
386
+ field=name,
387
+ message=f"Cannot coerce to {expected.__name__}: {exc}",
388
+ value=value,
389
+ )
@@ -0,0 +1,21 @@
1
+ """
2
+ infrakit.core.logger
3
+ ~~~~~~~~~~~~~~~~~~~~~
4
+ Public surface — import only from here.
5
+
6
+ from infrakit.core.logger import setup, get_logger
7
+
8
+ setup(
9
+ level = "INFO",
10
+ strategy = "date_level",
11
+ stream = "stdout",
12
+ session = True,
13
+ retention = 30,
14
+ )
15
+ log = get_logger(__name__)
16
+ log.info("Ready")
17
+ """
18
+
19
+ from infrakit.core.logger.setup import get_logger, reset, setup
20
+
21
+ __all__ = ["setup", "get_logger", "reset"]
@@ -0,0 +1,143 @@
1
+ """
2
+ infrakit.core.logger.formatters
3
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4
+ Two formatters — HumanFormatter for dev, JsonFormatter for prod/aggregators.
5
+
6
+ HumanFormatter output:
7
+ 2025-03-22 14:32:01 | INFO | infrakit.config.loader | Config loaded
8
+
9
+ JsonFormatter output:
10
+ {"timestamp": "2025-03-22T14:32:01Z", "level": "INFO",
11
+ "logger": "infrakit.config.loader", "message": "Config loaded"}
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import json
17
+ import logging
18
+ import traceback
19
+ from datetime import datetime, timezone
20
+
21
+
22
+ # ---------------------------------------------------------------------------
23
+ # Human formatter
24
+ # ---------------------------------------------------------------------------
25
+
26
+ class HumanFormatter(logging.Formatter):
27
+ """Coloured, pipe-delimited single-line format for terminal output.
28
+
29
+ Columns are padded so they align vertically across log lines:
30
+ TIMESTAMP | LEVEL | LOGGER (truncated) | MESSAGE
31
+ 2025-03-22 14:32:01 | INFO | infrakit.config.loader | ...
32
+ """
33
+
34
+ # Pad level name to this width so columns stay aligned
35
+ _LEVEL_WIDTH = 8
36
+
37
+ # Truncate logger name to this width (right side kept — most specific part)
38
+ _LOGGER_WIDTH = 24
39
+
40
+ # ANSI colour codes — only applied to stream handlers (TTY)
41
+ _COLOURS = {
42
+ "DEBUG": "\033[36m", # cyan
43
+ "INFO": "\033[32m", # green
44
+ "WARNING": "\033[33m", # yellow
45
+ "ERROR": "\033[31m", # red
46
+ "CRITICAL": "\033[35m", # magenta
47
+ }
48
+ _RESET = "\033[0m"
49
+
50
+ def __init__(self, *, use_colour: bool = False) -> None:
51
+ super().__init__()
52
+ self.use_colour = use_colour
53
+
54
+ def format(self, record: logging.LogRecord) -> str:
55
+ ts = datetime.fromtimestamp(record.created, tz=timezone.utc).strftime(
56
+ "%Y-%m-%d %H:%M:%S"
57
+ )
58
+ level = record.levelname.ljust(self._LEVEL_WIDTH)
59
+ logger = _truncate_left(record.name, self._LOGGER_WIDTH).ljust(self._LOGGER_WIDTH)
60
+ message = record.getMessage()
61
+
62
+ if record.exc_info:
63
+ message += "\n" + self.formatException(record.exc_info)
64
+
65
+ if self.use_colour:
66
+ colour = self._COLOURS.get(record.levelname, "")
67
+ level = f"{colour}{level}{self._RESET}"
68
+
69
+ return f"{ts} | {level} | {logger} | {message}"
70
+
71
+
72
+ # ---------------------------------------------------------------------------
73
+ # JSON formatter
74
+ # ---------------------------------------------------------------------------
75
+
76
+ class JsonFormatter(logging.Formatter):
77
+ """Newline-delimited JSON — one object per log record.
78
+
79
+ Always-present fields:
80
+ timestamp, level, logger, message
81
+
82
+ Optional fields (only present when relevant):
83
+ exc_type, exc_message, exc_traceback — when exc_info is set
84
+ extra.* — any extra={} keys on the record
85
+ """
86
+
87
+ # Keys that live on every LogRecord — exclude from the "extra" sweep
88
+ _STDLIB_ATTRS = frozenset({
89
+ "args", "created", "exc_info", "exc_text", "filename",
90
+ "funcName", "levelname", "levelno", "lineno", "message",
91
+ "module", "msecs", "msg", "name", "pathname", "process",
92
+ "processName", "relativeCreated", "stack_info", "thread",
93
+ "threadName", "taskName",
94
+ })
95
+
96
+ def format(self, record: logging.LogRecord) -> str:
97
+ record.getMessage() # populate record.message
98
+
99
+ payload: dict = {
100
+ "timestamp": datetime.fromtimestamp(
101
+ record.created, tz=timezone.utc
102
+ ).strftime("%Y-%m-%dT%H:%M:%SZ"),
103
+ "level": record.levelname,
104
+ "logger": record.name,
105
+ "message": record.getMessage(),
106
+ }
107
+
108
+ # Exception info
109
+ if record.exc_info and record.exc_info[0] is not None:
110
+ exc_type, exc_value, exc_tb = record.exc_info
111
+ payload["exc_type"] = exc_type.__name__
112
+ payload["exc_message"] = str(exc_value)
113
+ payload["exc_traceback"] = "".join(
114
+ traceback.format_tb(exc_tb)
115
+ ).strip()
116
+
117
+ # Extra fields attached via log.info("msg", extra={"key": "val"})
118
+ for key, value in record.__dict__.items():
119
+ if key not in self._STDLIB_ATTRS and not key.startswith("_"):
120
+ try:
121
+ json.dumps(value) # check it's serialisable
122
+ payload[key] = value
123
+ except (TypeError, ValueError):
124
+ payload[key] = repr(value)
125
+
126
+ return json.dumps(payload, ensure_ascii=False)
127
+
128
+
129
+ # ---------------------------------------------------------------------------
130
+ # Helpers
131
+ # ---------------------------------------------------------------------------
132
+
133
+ def _truncate_left(s: str, width: int) -> str:
134
+ """Keep the rightmost *width* chars of *s*, prefixing '…' if truncated.
135
+
136
+ Used for logger names so the most specific part (e.g. 'loader') is always
137
+ visible even when the full dotted path is long.
138
+
139
+ "infrakit.core.config.loader" (28) → "…infrakit.core.config.load" (25)
140
+ """
141
+ if len(s) <= width:
142
+ return s
143
+ return "\u2026" + s[-(width - 1):]