syntropylog 0.9.2 β 0.9.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +16 -0
- package/README.md +2 -1
- package/package.json +37 -29
- package/docs/ANALISIS_TIMEOUT_SERIALIZATION.md +0 -127
- package/docs/IMPROVEMENT_OPPORTUNITIES.md +0 -84
- package/docs/PR_DESCRIPTION_0.9.2.md +0 -22
- package/docs/REVISION_MOCKS_SOLID_FP.md +0 -50
- package/docs/SINGLETON_E_INSTANCE_REGISTRY.md +0 -187
- package/docs/TRANSPORTS_CONDICIONALES_POR_AMBIENTE.md +0 -178
- package/docs/configuration.md +0 -99
- package/docs/enterprise.md +0 -44
- package/docs/middleware.md +0 -62
- package/docs/persistence.md +0 -81
- package/docs/philosophy.md +0 -15
- package/docs/serialization.md +0 -67
- package/docs/testing.md +0 -28
package/CHANGELOG.md
CHANGED
|
@@ -5,6 +5,22 @@ All notable changes to this project will be documented in this file.
|
|
|
5
5
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
|
6
6
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
|
7
7
|
|
|
8
|
+
## [0.9.4] - 2026-03-07
|
|
9
|
+
|
|
10
|
+
### π§ Maintenance
|
|
11
|
+
- **Security**: Excluded `docs/` folder from npm package to resolve false positive security alerts in Socket.dev regarding example URLs.
|
|
12
|
+
|
|
13
|
+
## [0.9.3] - 2026-03-07
|
|
14
|
+
|
|
15
|
+
### π Documentation
|
|
16
|
+
- **README intro**: Added to the "What is SyntropyLog?" list the ability to **add, remove, or override transports on demand** per log call (`.override()`, `.add()`, `.remove()`), without creating new logger instances.
|
|
17
|
+
- **package.json**: Added `funding` field for open-source visibility.
|
|
18
|
+
|
|
19
|
+
### π§ Maintenance
|
|
20
|
+
- No breaking changes. Patch release for documentation and second publication.
|
|
21
|
+
|
|
22
|
+
---
|
|
23
|
+
|
|
8
24
|
## [0.9.2] - 2026-03-04
|
|
9
25
|
|
|
10
26
|
### β οΈ Breaking changes
|
package/README.md
CHANGED
|
@@ -17,7 +17,7 @@
|
|
|
17
17
|
<a href="https://github.com/Syntropysoft/SyntropyLog/blob/main/LICENSE"><img src="https://img.shields.io/npm/l/syntropylog.svg" alt="License"></a>
|
|
18
18
|
<a href="https://github.com/Syntropysoft/SyntropyLog/actions/workflows/ci.yaml"><img src="https://github.com/Syntropysoft/SyntropyLog/actions/workflows/ci.yaml/badge.svg" alt="CI Status"></a>
|
|
19
19
|
<a href="#"><img src="https://img.shields.io/badge/coverage-84.64%25-brightgreen" alt="Test Coverage"></a>
|
|
20
|
-
<a href="#"><img src="https://img.shields.io/badge/status-v0.9.
|
|
20
|
+
<a href="#"><img src="https://img.shields.io/badge/status-v0.9.3-brightgreen.svg" alt="Version 0.9.3"></a>
|
|
21
21
|
<a href="https://socket.dev/npm/package/syntropylog"><img src="https://socket.dev/api/badge/npm/package/syntropylog" alt="Socket Badge"></a>
|
|
22
22
|
</p>
|
|
23
23
|
|
|
@@ -32,6 +32,7 @@ The core idea is simple: **you declare what your logs should carry, and Syntropy
|
|
|
32
32
|
That means:
|
|
33
33
|
- A **declarative Logging Matrix** that controls exactly which context fields appear at each log level β lean on `info`, full context on `error`.
|
|
34
34
|
- A **fluent logger API** (`withRetention`, `withSource`, `withTransactionId`) that lets you create specialized loggers carrying arbitrary organization-defined metadata.
|
|
35
|
+
- **Add, remove, or override transports on demand** β per log call you can send only to specific transports (`.override()`), add extra destinations (`.add()`), or drop one (`.remove()`), without creating new logger instances.
|
|
35
36
|
- A **MaskingEngine** that redacts sensitive fields before they reach any transport β built-in strategies and fully custom rules.
|
|
36
37
|
- An **Intelligent Serialization Pipeline** that automatically detects and neutralizes circular references, limits object depth, and enforces execution timeouts β making logs immune to application crashes.
|
|
37
38
|
- A **UniversalAdapter** that routes logs to any backend (PostgreSQL, MongoDB, Elasticsearch, S3) via a single `executor` function β no coupling, no lock-in.
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "syntropylog",
|
|
3
|
-
"version": "0.9.
|
|
3
|
+
"version": "0.9.4",
|
|
4
4
|
"engines": {
|
|
5
5
|
"node": ">=20.0.0"
|
|
6
6
|
},
|
|
@@ -112,7 +112,6 @@
|
|
|
112
112
|
"files": [
|
|
113
113
|
"dist",
|
|
114
114
|
"assets",
|
|
115
|
-
"docs",
|
|
116
115
|
"LICENSE",
|
|
117
116
|
"README.md",
|
|
118
117
|
"CHANGELOG.md",
|
|
@@ -123,10 +122,44 @@
|
|
|
123
122
|
"examples/*",
|
|
124
123
|
"modules/@syntropylog/types"
|
|
125
124
|
],
|
|
125
|
+
"scripts": {
|
|
126
|
+
"example:transport-pool": "npx tsx examples/TransportPoolExample.ts",
|
|
127
|
+
"setup": "node scripts/setup-env.js",
|
|
128
|
+
"build": "npm run clean && npm run build:types && rollup -c",
|
|
129
|
+
"build:types": "tsc -p src/tsconfig.json",
|
|
130
|
+
"dev": "rollup -c -w",
|
|
131
|
+
"clean": "rm -rf dist",
|
|
132
|
+
"lint": "eslint . --fix",
|
|
133
|
+
"format": "prettier --write \"src/**/*.ts\"",
|
|
134
|
+
"test": "vitest --run --reporter=verbose --config vitest.config.mjs",
|
|
135
|
+
"test:coverage": "vitest run --coverage --config vitest.config.mjs",
|
|
136
|
+
"test:integration": "vitest run -c vitest.integration.config.mjs",
|
|
137
|
+
"test:all": "npm test && npm run test:integration",
|
|
138
|
+
"check:deps": "depcheck",
|
|
139
|
+
"prepublishOnly": "npm run build",
|
|
140
|
+
"prepare": "husky",
|
|
141
|
+
"changeset": "changeset",
|
|
142
|
+
"version-packages": "changeset version",
|
|
143
|
+
"release": "npm run build && changeset publish",
|
|
144
|
+
"version:patch": "npm version patch",
|
|
145
|
+
"version:minor": "npm version minor",
|
|
146
|
+
"version:major": "npm version major",
|
|
147
|
+
"version:alpha": "npm version prerelease --preid=alpha",
|
|
148
|
+
"version:beta": "npm version prerelease --preid=beta",
|
|
149
|
+
"version:rc": "npm version prerelease --preid=rc",
|
|
150
|
+
"publish:alpha": "./scripts/release-alpha.sh patch",
|
|
151
|
+
"publish:alpha:minor": "./scripts/release-alpha.sh minor",
|
|
152
|
+
"publish:alpha:major": "./scripts/release-alpha.sh major",
|
|
153
|
+
"publish:stable": "npm run test:all && npm run build && npm publish",
|
|
154
|
+
"release:alpha": "./scripts/release-alpha.sh"
|
|
155
|
+
},
|
|
126
156
|
"repository": {
|
|
127
157
|
"type": "git",
|
|
128
158
|
"url": "git+https://github.com/Syntropysoft/SyntropyLog.git"
|
|
129
159
|
},
|
|
160
|
+
"funding": {
|
|
161
|
+
"url": "https://github.com/Syntropysoft/SyntropyLog"
|
|
162
|
+
},
|
|
130
163
|
"publishConfig": {
|
|
131
164
|
"access": "public"
|
|
132
165
|
},
|
|
@@ -162,7 +195,8 @@
|
|
|
162
195
|
"tslib": "^2.8.1",
|
|
163
196
|
"typescript": "^5.7.3",
|
|
164
197
|
"typescript-eslint": "8.56.1",
|
|
165
|
-
"vitest": "^4.0.18"
|
|
198
|
+
"vitest": "^4.0.18",
|
|
199
|
+
"@changesets/cli": "^2.27.1"
|
|
166
200
|
},
|
|
167
201
|
"peerDependencies": {
|
|
168
202
|
"chalk": "^5.4.1",
|
|
@@ -178,31 +212,5 @@
|
|
|
178
212
|
},
|
|
179
213
|
"lint-staged": {
|
|
180
214
|
"src/**/*.ts": "eslint --fix --max-warnings=0"
|
|
181
|
-
},
|
|
182
|
-
"scripts": {
|
|
183
|
-
"example:transport-pool": "npx tsx examples/TransportPoolExample.ts",
|
|
184
|
-
"setup": "node scripts/setup-env.js",
|
|
185
|
-
"build": "npm run clean && npm run build:types && rollup -c",
|
|
186
|
-
"build:types": "tsc -p src/tsconfig.json",
|
|
187
|
-
"dev": "rollup -c -w",
|
|
188
|
-
"clean": "rm -rf dist",
|
|
189
|
-
"lint": "eslint . --fix",
|
|
190
|
-
"format": "prettier --write \"src/**/*.ts\"",
|
|
191
|
-
"test": "vitest --run --reporter=verbose --config vitest.config.mjs",
|
|
192
|
-
"test:coverage": "vitest run --coverage --config vitest.config.mjs",
|
|
193
|
-
"test:integration": "vitest run -c vitest.integration.config.mjs",
|
|
194
|
-
"test:all": "npm test && npm run test:integration",
|
|
195
|
-
"check:deps": "depcheck",
|
|
196
|
-
"version:patch": "npm version patch",
|
|
197
|
-
"version:minor": "npm version minor",
|
|
198
|
-
"version:major": "npm version major",
|
|
199
|
-
"version:alpha": "npm version prerelease --preid=alpha",
|
|
200
|
-
"version:beta": "npm version prerelease --preid=beta",
|
|
201
|
-
"version:rc": "npm version prerelease --preid=rc",
|
|
202
|
-
"publish:alpha": "./scripts/release-alpha.sh patch",
|
|
203
|
-
"publish:alpha:minor": "./scripts/release-alpha.sh minor",
|
|
204
|
-
"publish:alpha:major": "./scripts/release-alpha.sh major",
|
|
205
|
-
"publish:stable": "npm run test:all && npm run build && npm publish",
|
|
206
|
-
"release:alpha": "./scripts/release-alpha.sh"
|
|
207
215
|
}
|
|
208
216
|
}
|
|
@@ -1,127 +0,0 @@
|
|
|
1
|
-
# AnΓ‘lisis: Timeouts en el pipeline de serializaciΓ³n
|
|
2
|
-
|
|
3
|
-
Documento de anΓ‘lisis para revisar cΓ³mo se usan los timeouts (config vs. estrategias) y si el cΓ³digo de `selectTimeoutStrategy` / estrategias por tipo de dato sigue teniendo sentido.
|
|
4
|
-
|
|
5
|
-
---
|
|
6
|
-
|
|
7
|
-
## 1. DΓ³nde estΓ‘ el timeout que **sΓ** limita la ejecuciΓ³n
|
|
8
|
-
|
|
9
|
-
El ΓΊnico lugar que **corta** la ejecuciΓ³n por tiempo es el loop del pipeline en `SerializationPipeline.process()`:
|
|
10
|
-
|
|
11
|
-
```ts
|
|
12
|
-
// SerializationPipeline.ts, ~lΓnea 55-79
|
|
13
|
-
const globalTimeout = context?.serializationContext?.timeoutMs || 50;
|
|
14
|
-
|
|
15
|
-
for (const step of this.steps) {
|
|
16
|
-
const stepExecution = step.execute(currentData, context);
|
|
17
|
-
const timeoutPromise = new Promise<SerializableData>((_, reject) => {
|
|
18
|
-
setTimeout(() => reject(new Error(`Timeout in step '${step.name}' (> ${globalTimeout}ms)`)), globalTimeout);
|
|
19
|
-
});
|
|
20
|
-
currentData = await Promise.race([stepExecution, timeoutPromise]);
|
|
21
|
-
}
|
|
22
|
-
```
|
|
23
|
-
|
|
24
|
-
- **Origen del valor:** `context.serializationContext.timeoutMs` (si no viene, se usa **50 ms**).
|
|
25
|
-
- **Efecto:** Cada step (serialization, hygiene, sanitization, timeout) tiene como mΓ‘ximo `globalTimeout` ms; si se pasa, se rechaza la promesa y el pipeline devuelve `success: false` con el error.
|
|
26
|
-
- **ConclusiΓ³n:** Cualquier serializador (o step) corre dentro de este lΓmite; el que βmandaβ es este `globalTimeout`.
|
|
27
|
-
|
|
28
|
-
---
|
|
29
|
-
|
|
30
|
-
## 2. De dΓ³nde sale `serializationContext` y si lleva `timeoutMs`
|
|
31
|
-
|
|
32
|
-
Flujo actual:
|
|
33
|
-
|
|
34
|
-
1. **Config de la app:** `logger.serializerTimeoutMs` (en `config.schema.ts`, default **50**).
|
|
35
|
-
2. **LifecycleManager** crea `SerializationManager` con:
|
|
36
|
-
- `timeoutMs: this.config.logger?.serializerTimeoutMs`
|
|
37
|
-
β queda en `SerializationManager.config.timeoutMs`.
|
|
38
|
-
3. **LoggerFactory** tambiΓ©n crea un `SerializationManager` con:
|
|
39
|
-
- `timeoutMs: config.logger?.serializerTimeoutMs`
|
|
40
|
-
β mismo valor en `SerializationManager.config.timeoutMs`.
|
|
41
|
-
4. Cuando el **Logger** hace un log, llama a:
|
|
42
|
-
```ts
|
|
43
|
-
this.dependencies.serializationManager.serialize(logEntry, {
|
|
44
|
-
depth: 0,
|
|
45
|
-
maxDepth: 10,
|
|
46
|
-
sensitiveFields: [],
|
|
47
|
-
sanitize: true,
|
|
48
|
-
});
|
|
49
|
-
```
|
|
50
|
-
Ese segundo argumento es el `context` (SerializationContextConfig). **No incluye `timeoutMs`**.
|
|
51
|
-
5. **SerializationManager.serialize()** arma el contexto del pipeline asΓ:
|
|
52
|
-
```ts
|
|
53
|
-
const pipelineContext: SerializationPipelineContext = {
|
|
54
|
-
serializationContext: context, // el que pasΓ³ el Logger (sin timeoutMs)
|
|
55
|
-
sanitizeSensitiveData: this.config.sanitizeSensitiveData,
|
|
56
|
-
...
|
|
57
|
-
};
|
|
58
|
-
```
|
|
59
|
-
No inyecta `this.config.timeoutMs` en `serializationContext`.
|
|
60
|
-
|
|
61
|
-
**Consecuencia:** El pipeline siempre recibe `serializationContext.timeoutMs === undefined` y usa el fallback **50 ms**. El valor de `logger.serializerTimeoutMs` de la config **no llega** al pipeline en el flujo actual del Logger.
|
|
62
|
-
|
|
63
|
-
Para que βcualquier serializador respete el timeout de la configuraciΓ³nβ, habrΓa que pasar ese valor al pipeline, por ejemplo:
|
|
64
|
-
- que el Logger pase `timeoutMs` en el context (leyΓ©ndolo de algΓΊn sitio que tenga la config), o
|
|
65
|
-
- que `SerializationManager.serialize()` complete `serializationContext` con `timeoutMs: this.config.timeoutMs` cuando el caller no lo envΓe.
|
|
66
|
-
|
|
67
|
-
---
|
|
68
|
-
|
|
69
|
-
## 3. QuΓ© hace `operationTimeout` y `selectTimeoutStrategy` (no limitan ejecuciΓ³n)
|
|
70
|
-
|
|
71
|
-
DespuΓ©s del loop de steps, el pipeline hace:
|
|
72
|
-
|
|
73
|
-
```ts
|
|
74
|
-
const timeoutStrategy = this.selectTimeoutStrategy(currentData);
|
|
75
|
-
const operationTimeout = timeoutStrategy.calculateTimeout(currentData);
|
|
76
|
-
this.metrics.operationTimeout = operationTimeout;
|
|
77
|
-
this.metrics.timeoutStrategy = timeoutStrategy.getStrategyName();
|
|
78
|
-
// y se mete en result.metadata
|
|
79
|
-
```
|
|
80
|
-
|
|
81
|
-
- **`selectTimeoutStrategy(data)`:** Siempre usa **DefaultTimeoutStrategy** (5000 ms). Los adaptadores por tipo (Prisma, TypeORM, MySQL, etc.) se eliminaron; solo queda la estrategia `default`.
|
|
82
|
-
- **`operationTimeout`:** Es un nΓΊmero que se escribe en **mΓ©tricas y metadata** (p. ej. βrecomendaciΓ³nβ de timeout para esa operaciΓ³n). **No se usa** para hacer `Promise.race` ni para cortar ningΓΊn step.
|
|
83
|
-
- **`timeoutStrategy`:** Nombre de la estrategia usada, tambiΓ©n solo metadata (mΓ©tricas, `timeoutStrategyDistribution`). En el cΓ³digo actual siempre es `default`.
|
|
84
|
-
|
|
85
|
-
Es decir: todo esto es **informativo**. El lΓmite real de ejecuciΓ³n sigue siendo solo `globalTimeout` (derivado de `serializationContext.timeoutMs` o 50 ms).
|
|
86
|
-
|
|
87
|
-
---
|
|
88
|
-
|
|
89
|
-
## 4. Estrategia de timeout en el pipeline
|
|
90
|
-
|
|
91
|
-
El pipeline usa **una ΓΊnica estrategia de timeout**: **DefaultTimeoutStrategy** (nombre `default`, 5000 ms). No existe ya selecciΓ³n por `data.type`; los adaptadores por tipo (Prisma, TypeORM, MySQL, PostgreSQL, SQL Server, Oracle) se eliminaron. Todo el flujo (Logger, log entry, metadata, etc.) usa siempre la estrategia `default`.
|
|
92
|
-
|
|
93
|
-
---
|
|
94
|
-
|
|
95
|
-
## 5. TimeoutStep (step βtimeoutβ)
|
|
96
|
-
|
|
97
|
-
- Recibe el `Map` de estrategias del pipeline (`this.pipeline['timeoutStrategies']`).
|
|
98
|
-
- En su `execute()` hace su propio `selectTimeoutStrategy(data)` y aΓ±ade al dato:
|
|
99
|
-
- `operationTimeout`, `timeoutStrategy`, `timeoutDuration`, `timeoutApplied`, etc.
|
|
100
|
-
- Tampoco aplica ningΓΊn lΓmite de tiempo a la ejecuciΓ³n; solo **anota metadata** en el objeto que circula por el pipeline. El lΓmite real sigue siendo el `Promise.race` con `globalTimeout` en el pipeline.
|
|
101
|
-
|
|
102
|
-
---
|
|
103
|
-
|
|
104
|
-
## 6. Resumen y conclusiones
|
|
105
|
-
|
|
106
|
-
| Concepto | DΓ³nde estΓ‘ | Efecto real |
|
|
107
|
-
|----------|------------|-------------|
|
|
108
|
-
| **Timeout que limita** | `SerializationPipeline.process()` β `globalTimeout = context.serializationContext?.timeoutMs \|\| 50` | MΓ‘ximo tiempo por step; cualquier serializador estΓ‘ acotado por este valor. |
|
|
109
|
-
| **Config del usuario** | `logger.serializerTimeoutMs` (default 50) | Se guarda en `SerializationManager.config.timeoutMs` pero **no se inyecta** en el context que recibe el pipeline cuando el Logger llama a `serialize()`. |
|
|
110
|
-
| **operationTimeout / timeoutStrategy** | Calculados con `selectTimeoutStrategy()` β siempre `default` | Solo **metadata y mΓ©tricas**. No controlan ni cortan la ejecuciΓ³n. |
|
|
111
|
-
| **Estrategia de timeout** | Solo **DefaultTimeoutStrategy** (`default`, 5000 ms) | Los adaptadores por tipo (Prisma, TypeORM, MySQL, etc.) se eliminaron. |
|
|
112
|
-
|
|
113
|
-
Conclusiones para decidir quΓ© hacer:
|
|
114
|
-
|
|
115
|
-
1. **Que el serializador respete el timeout de configuraciΓ³n:** Hoy no ocurre del todo, porque `serializerTimeoutMs` no llega al pipeline. HabrΓa que conectar `config.timeoutMs` (o equivalente) con `serializationContext.timeoutMs` en las llamadas a `serialize()` (p. ej. en SerializationManager o en el Logger).
|
|
116
|
-
2. **selectTimeoutStrategy:** Simplificado a βsiempre defaultβ. No hay estrategias por tipo; solo **DefaultTimeoutStrategy**.
|
|
117
|
-
3. **Estado del cΓ³digo:** Eliminadas las estrategias por adaptador (Prisma, TypeORM, MySQL, PostgreSQL, SQL Server, Oracle); solo queda la estrategia `default`.
|
|
118
|
-
|
|
119
|
-
---
|
|
120
|
-
|
|
121
|
-
## 7. Referencias rΓ‘pidas de cΓ³digo
|
|
122
|
-
|
|
123
|
-
- Timeout que limita: `src/serialization/SerializationPipeline.ts` (~lΓneas 55β79).
|
|
124
|
-
- Selector de estrategia: `src/serialization/SerializationPipeline.ts` ~`selectTimeoutStrategy`, `initializeDefaultStrategies`, `DefaultTimeoutStrategy` (ΓΊnica estrategia).
|
|
125
|
-
- Context del pipeline: `src/serialization/SerializationManager.ts` ~`serialize()` (construcciΓ³n de `pipelineContext`).
|
|
126
|
-
- Llamada desde el Logger: `src/logger/Logger.ts` ~`_log()` β `serializationManager.serialize(logEntry, { depth, maxDepth, sensitiveFields, sanitize })`.
|
|
127
|
-
- Config: `config.schema.ts` `serializerTimeoutMs`; `LifecycleManager` / `LoggerFactory` donde se crea `SerializationManager` con `timeoutMs`.
|
|
@@ -1,84 +0,0 @@
|
|
|
1
|
-
# Oportunidades de mejora β SyntropyLog
|
|
2
|
-
|
|
3
|
-
Documento generado a partir de una revisiΓ³n del cΓ³digo para usar como backlog o como base para issues en GitHub.
|
|
4
|
-
|
|
5
|
-
---
|
|
6
|
-
|
|
7
|
-
## 1. Consistencia de idioma (prioridad alta) β
Hecho (v0.9.2)
|
|
8
|
-
|
|
9
|
-
Implementado en v0.9.2:
|
|
10
|
-
|
|
11
|
-
El proyecto estΓ‘ documentado en inglΓ©s (README, JSDoc, mensajes de usuario), pero hay comentarios y mensajes de error en espaΓ±ol en el pipeline de serializaciΓ³n.
|
|
12
|
-
|
|
13
|
-
| UbicaciΓ³n | Actual | Sugerencia |
|
|
14
|
-
|-----------|--------|------------|
|
|
15
|
-
| `src/serialization/SerializationPipeline.ts` | `Timeout en etapa '${step.name}' (> ${globalTimeout}ms)` | `Timeout in step '${step.name}' (> ${globalTimeout}ms)` |
|
|
16
|
-
| `src/serialization/SerializationPipeline.ts` | Comentarios "Ejecutar pasos", "Carrera de mates", "Seleccionar estrategia..." | Traducir a inglΓ©s |
|
|
17
|
-
| `src/serialization/pipeline/TimeoutStep.ts` | `'Error en timeout'` | `'Timeout error'` |
|
|
18
|
-
| `src/serialization/pipeline/TimeoutStep.ts` | Comentarios "Seleccionar estrategia...", "Calcular timeout..." | Traducir a inglΓ©s |
|
|
19
|
-
|
|
20
|
-
**Beneficio:** Consistencia para contribuidores y usuarios que esperan mensajes en inglΓ©s; mejor experiencia en entornos internacionales.
|
|
21
|
-
|
|
22
|
-
---
|
|
23
|
-
|
|
24
|
-
## 2. ~~TODO pendiente: `BeaconRedis.multi()`~~ β
Hecho (v0.9.2)
|
|
25
|
-
|
|
26
|
-
Implementado en v0.9.2:
|
|
27
|
-
|
|
28
|
-
- `RedisCommandExecutor.multi()` devuelve la transacciΓ³n nativa del cliente.
|
|
29
|
-
- `BeaconRedis.multi()` devuelve una `BeaconRedisTransaction` que envuelve la transacciΓ³n nativa; todos los comandos encolables estΓ‘n delegados y `exec()` / `discard()` pasan por `_executeCommand` (logging, timing, errores).
|
|
30
|
-
- `executeScript` dentro de una transacciΓ³n no estΓ‘ soportado y lanza un error claro.
|
|
31
|
-
- Tests aΓ±adidos: transacciΓ³n retornada por `multi()`, instrumentaciΓ³n de `exec()` y `discard()`.
|
|
32
|
-
|
|
33
|
-
---
|
|
34
|
-
|
|
35
|
-
## 3. Tipado y uso de `as any`
|
|
36
|
-
|
|
37
|
-
En `SerializationPipeline.ts`, al construir el resultado se usa `(currentData as any)` para `serializer`, `serializationComplexity`, etc. (aprox. lΓneas 98, 106-107, 112-113).
|
|
38
|
-
|
|
39
|
-
**Sugerencia:** Definir un tipo (por ejemplo `SerializableDataWithMeta`) que extienda `SerializableData` con campos opcionales como `serializer`, `serializationComplexity`, y usarlo en lugar de `as any`. AsΓ se mantiene type-safety y se documenta la forma del dato en ese punto del pipeline.
|
|
40
|
-
|
|
41
|
-
---
|
|
42
|
-
|
|
43
|
-
## 4. Tests y cobertura
|
|
44
|
-
|
|
45
|
-
- Hay buena cobertura de tests unitarios (README indica ~84.64%).
|
|
46
|
-
- `BeaconRedis.multi()` estΓ‘ cubierto (se espera que lance).
|
|
47
|
-
- Los tests de integraciΓ³n viven en `test_integration/`.
|
|
48
|
-
|
|
49
|
-
**Posibles mejoras:**
|
|
50
|
-
|
|
51
|
-
- AΓ±adir tests que fuercen timeout en el pipeline (pasos lentos) para validar mensajes de error y que no se pierdan datos crΓticos.
|
|
52
|
-
- Revisar si hay ramas de `LifecycleManager` (por ejemplo init con/sin HTTP, con/sin brokers) sin cubrir.
|
|
53
|
-
|
|
54
|
-
---
|
|
55
|
-
|
|
56
|
-
## 5. DocumentaciΓ³n y DX
|
|
57
|
-
|
|
58
|
-
- README y docs en `docs/` estΓ‘n muy completos.
|
|
59
|
-
- **Sugerencia:** En la secciΓ³n de Redis del README (o en una guΓa de Redis), mencionar que `multi()` no estΓ‘ implementado en el cliente real y que para transacciones en tests se use `BeaconRedisMock`.
|
|
60
|
-
|
|
61
|
-
---
|
|
62
|
-
|
|
63
|
-
## 6. Seguridad y mantenimiento (ya abordados recientemente)
|
|
64
|
-
|
|
65
|
-
Por el CHANGELOG (0.9.1, 0.8.13):
|
|
66
|
-
|
|
67
|
-
- CΓ³digo ofuscado eliminado en el mΓ³dulo Redis.
|
|
68
|
-
- `executeScript` refactorizado para evitar falsos positivos de "eval" en scanners.
|
|
69
|
-
|
|
70
|
-
**Sugerencia:** Mantener esta lΓnea en futuros cambios (evitar patrones que puedan disparar alertas de seguridad sin necesidad).
|
|
71
|
-
|
|
72
|
-
---
|
|
73
|
-
|
|
74
|
-
## 7. Resumen de acciones sugeridas
|
|
75
|
-
|
|
76
|
-
| # | AcciΓ³n | Esfuerzo | Impacto |
|
|
77
|
-
|---|--------|----------|---------|
|
|
78
|
-
| 1 | Unificar mensajes y comentarios a inglΓ©s en serialization pipeline y TimeoutStep | Bajo | Consistencia, profesionalismo |
|
|
79
|
-
| 2 | Decidir e implementar o documentar `BeaconRedis.multi()` | Medio/Alto | Completitud de API Redis |
|
|
80
|
-
| 3 | Reemplazar `as any` por tipos explΓcitos en `SerializationPipeline` | Bajo | Type-safety, mantenibilidad |
|
|
81
|
-
| 4 | Tests de timeout en pipeline y revisiΓ³n de cobertura de LifecycleManager | Medio | Confiabilidad |
|
|
82
|
-
| 5 | Documentar limitaciΓ³n de `multi()` en README o guΓa Redis | Bajo | DX |
|
|
83
|
-
|
|
84
|
-
Si querΓ©s, el siguiente paso puede ser implementar el punto 1 (mensajes y comentarios en inglΓ©s) o el 3 (tipado en el pipeline); ambos son cambios acotados y de bajo riesgo.
|
|
@@ -1,22 +0,0 @@
|
|
|
1
|
-
# PR description (copy into your Pull Request)
|
|
2
|
-
|
|
3
|
-
---
|
|
4
|
-
|
|
5
|
-
## Breaking changes
|
|
6
|
-
|
|
7
|
-
- **Config**: `syntropyLog.init()` no longer accepts `http` or `brokers`; the core only manages Redis.
|
|
8
|
-
- **API**: `SyntropyLog.getHttp()` and `SyntropyLog.getBroker()` have been removed. Callers must obtain HTTP/broker clients from their own app or from `syntropylog/http` / `syntropylog/brokers` directly.
|
|
9
|
-
|
|
10
|
-
## Migration
|
|
11
|
-
|
|
12
|
-
- **If you used `getHttp()` or `getBroker()`**: Migrate to whatever your app uses to expose those clients (e.g. dependency injection, or instantiating from `syntropylog/http` / `syntropylog/brokers`).
|
|
13
|
-
- **If you passed `http` or `brokers` in `init()`**: Remove those options; the core no longer uses them.
|
|
14
|
-
|
|
15
|
-
## Context for reviewers (High Risk)
|
|
16
|
-
|
|
17
|
-
This PR is marked **High Risk** because it removes public config/API surface and changes logger transport selection and Redis transaction behavior. To mitigate:
|
|
18
|
-
|
|
19
|
-
- **Tests**: Unit and integration tests cover the new flows; pre-commit runs lint, tests, and build.
|
|
20
|
-
- **Breaking changes and migration** are documented in `CHANGELOG.md` (section [0.9.2], including the Migration subsection) so existing integrations can plan the upgrade.
|
|
21
|
-
|
|
22
|
-
---
|
|
@@ -1,50 +0,0 @@
|
|
|
1
|
-
# RevisiΓ³n: mocks β funciones puras, SOLID, guardas
|
|
2
|
-
|
|
3
|
-
RevisiΓ³n en bloques de **BeaconRedisMock** aplicando guardas, helpers puros y menos duplicaciΓ³n.
|
|
4
|
-
|
|
5
|
-
---
|
|
6
|
-
|
|
7
|
-
## 1. BeaconRedisMock.ts
|
|
8
|
-
|
|
9
|
-
### Hecho
|
|
10
|
-
- **Guardas**: `_getValidEntry`: orden claro (no entry β null, expired β delete + null, wrong type β throw). `expire`: early return `if (!entry) return false`. `incrBy` / `hIncrBy`: `Number.isNaN` y throw temprano.
|
|
11
|
-
- **Puro**: `_serialize` ya era puro; aΓ±adido `_toKeyArray(keys)` estΓ‘tico puro para normalizar `string | string[]` β `string[]`.
|
|
12
|
-
- **Menos mutaciΓ³n directa en loops**: `del` y `exists` usan `_toKeyArray` + `filter().length`. `hDel` usa `filter` para contar borrados. `hSet`: rama string con early return; uso de `Object.hasOwn` donde aplica.
|
|
13
|
-
- **ILogger**: `updateConfig` corrige orden de argumentos a `(meta, message)`.
|
|
14
|
-
|
|
15
|
-
### Opcional (siguiente bloque)
|
|
16
|
-
- Extraer lΓ³gica de βclave expiradaβ a un helper que reciba `StoreEntry` y devuelva boolean (puro).
|
|
17
|
-
- `scan`: extraer el filtro por MATCH a una funciΓ³n pura `(key, pattern) => boolean`.
|
|
18
|
-
|
|
19
|
-
---
|
|
20
|
-
|
|
21
|
-
## Principios aplicados
|
|
22
|
-
|
|
23
|
-
| Principio | AplicaciΓ³n |
|
|
24
|
-
|-----------------|----------------------------------------------------------------------------|
|
|
25
|
-
| Guard clauses | Early return / throw al inicio de mΓ©todo; un solo nivel de anidaciΓ³n. |
|
|
26
|
-
| Pure functions | `_serialize`, `_toKeyArray`; helpers sin efectos. |
|
|
27
|
-
| SOLID (SRP) | Responsabilidades concentradas por componente. |
|
|
28
|
-
| DRY | Helpers reutilizables y un solo loop donde aplica. |
|
|
29
|
-
| Menos mutaciΓ³n | Uso de `filter`/`length` y returns tempranos en lugar de contadores en loops. |
|
|
30
|
-
|
|
31
|
-
Los mocks siguen siendo wrappers de la interfaz original: la lΓ³gica extra es mΓnima y predecible (guardas + delegaciΓ³n).
|
|
32
|
-
|
|
33
|
-
---
|
|
34
|
-
|
|
35
|
-
## 2. TimeoutStep.ts
|
|
36
|
-
|
|
37
|
-
### Hecho
|
|
38
|
-
- **Pure**: `buildSuccessPayload` y `buildErrorPayload` son funciones puras (data + duraciΓ³n + estrategia/error β objeto).
|
|
39
|
-
- **Guardas**: `execute` intenta estrategia + cΓ‘lculo; en catch solo construye payload de error. Constante `DEFAULT_TIMEOUT_MS`.
|
|
40
|
-
- **Estrategia ΓΊnica**: Solo se usa la estrategia `default`; `selectTimeoutStrategy` devuelve `timeoutStrategies.get('default') ?? null` (sin lookup por tipo de data).
|
|
41
|
-
|
|
42
|
-
---
|
|
43
|
-
|
|
44
|
-
## 3. SerializationPipeline.ts
|
|
45
|
-
|
|
46
|
-
### Hecho
|
|
47
|
-
- **selectTimeoutStrategy**: Obtiene solo `timeoutStrategies.get('default')`; guarda que lanza si no hay estrategia default.
|
|
48
|
-
- **Pure**: `buildSuccessResult` y `buildErrorResult` estΓ‘ticos que construyen `SerializationResult` sin efectos.
|
|
49
|
-
- **Constantes**: `DEFAULT_SERIALIZER`, `UNKNOWN_STRATEGY`; `globalTimeout` con `??` 50.
|
|
50
|
-
- **Estrategia ΓΊnica**: Solo **DefaultTimeoutStrategy** (5000 ms). Eliminadas las estrategias por adaptador (Prisma, TypeORM, MySQL, PostgreSQL, SQL Server, Oracle).
|
|
@@ -1,187 +0,0 @@
|
|
|
1
|
-
# Singletons y registro de instancias β DiseΓ±o
|
|
2
|
-
|
|
3
|
-
Documento de diseΓ±o para el **manejador de instancias** genΓ©rico del framework. Refleja lo acordado: estructura, polΓtica estricta, teardown en orden inverso y API solo para tests.
|
|
4
|
-
|
|
5
|
-
---
|
|
6
|
-
|
|
7
|
-
## 1. QuΓ© hay hoy
|
|
8
|
-
|
|
9
|
-
### 1.1 Singleton global del framework
|
|
10
|
-
|
|
11
|
-
- **SyntropyLog:** una sola instancia por proceso (`getInstance()`). Punto de entrada.
|
|
12
|
-
- **Export** `syntropyLog`: llama a `getInstance()` al importar el mΓ³dulo.
|
|
13
|
-
|
|
14
|
-
### 1.2 Managers βpor tipoβ (registro por nombre)
|
|
15
|
-
|
|
16
|
-
Tres managers con varias instancias por nombre:
|
|
17
|
-
|
|
18
|
-
| Manager | Config | API en SyntropyLog | QuΓ© guarda |
|
|
19
|
-
|-----------------|---------------|--------------------|-------------------------------|
|
|
20
|
-
| **RedisManager** | `config.redis` | `getRedis(name)` | Clientes Redis instrumentados (BeaconRedis) |
|
|
21
|
-
| **HttpManager** | `config.http` | `getHttp(name)` | Clientes HTTP instrumentados |
|
|
22
|
-
| **BrokerManager** | `config.brokers`| `getBroker(name)` | Clientes de broker instrumentados |
|
|
23
|
-
|
|
24
|
-
Cada manager se crea en `LifecycleManager.init()` si viene su bloque en la config. En `shutdown()` se llama a `redisManager?.shutdown()`, `httpManager?.shutdown()`, `brokerManager?.shutdown()`.
|
|
25
|
-
|
|
26
|
-
---
|
|
27
|
-
|
|
28
|
-
## 2. Objetivo acordado
|
|
29
|
-
|
|
30
|
-
- **Mantener:** Solo el **cliente Redis instrumentado** (RedisManager + BeaconRedis). Γnico βtipoβ con manager propio y config dedicada.
|
|
31
|
-
- **Quitar:** HTTP/axios y brokers del core: HttpManager, BrokerManager, config `http`/`brokers`, `getHttp`/`getBroker`.
|
|
32
|
-
- **AΓ±adir:** Un **registro genΓ©rico de instancias** (nombre β entrada con valor y dispose) para que el desarrollador guarde lo que quiera: axios, Kafka, clases propias, diccionarios, etc.
|
|
33
|
-
|
|
34
|
-
---
|
|
35
|
-
|
|
36
|
-
## 3. Estructura del registro
|
|
37
|
-
|
|
38
|
-
### 3.1 Entrada (por llave)
|
|
39
|
-
|
|
40
|
-
Cada entrada del registro tiene:
|
|
41
|
-
|
|
42
|
-
| Campo | Tipo | DescripciΓ³n |
|
|
43
|
-
|----------|----------|-------------|
|
|
44
|
-
| **nombre** | `string` | Llave para registrar y para `get(nombre)`. |
|
|
45
|
-
| **valor** | cualquiera | La instancia que se devuelve con `get(nombre)`. |
|
|
46
|
-
| **dispose** | `() => void \| Promise<void>` (opcional) | FunciΓ³n de limpieza; se llama en el teardown del framework. |
|
|
47
|
-
|
|
48
|
-
**Forma de la API (extensible):** se puede usar un objeto de opciones para poder aΓ±adir campos despuΓ©s sin romper la API:
|
|
49
|
-
|
|
50
|
-
```ts
|
|
51
|
-
syntropyLog.register('kafka-producer', {
|
|
52
|
-
value: producer,
|
|
53
|
-
dispose: async () => await producer.disconnect(),
|
|
54
|
-
});
|
|
55
|
-
```
|
|
56
|
-
|
|
57
|
-
O firma corta cuando no hay dispose:
|
|
58
|
-
|
|
59
|
-
```ts
|
|
60
|
-
syntropyLog.register('api', axiosInstance);
|
|
61
|
-
// equivalente a { value: axiosInstance }
|
|
62
|
-
```
|
|
63
|
-
|
|
64
|
-
### 3.2 PolΓtica estricta (producciΓ³n)
|
|
65
|
-
|
|
66
|
-
- **Si la llave ya existe β error.** No se permite sobrescribir. Comportamiento de diccionario estricto.
|
|
67
|
-
- **Arranque:** Si algo falla (init, registro duplicado, etc.), la librerΓa **informa el error y no levanta la API**. Fail fast.
|
|
68
|
-
- **Teardown:** Se ejecuta en **orden inverso** al de registro (el ΓΊltimo registrado se dispone primero). El registro debe preservar orden (p. ej. array o estructura ordenada).
|
|
69
|
-
|
|
70
|
-
### 3.3 Dispose y errores
|
|
71
|
-
|
|
72
|
-
- Si un valor tiene `dispose`, se llama en el teardown (en orden inverso al registro).
|
|
73
|
-
- Si no tiene `dispose`, no se hace nada con ese valor.
|
|
74
|
-
- Si un `dispose` falla (throw o reject), se recomienda usar algo tipo `Promise.allSettled`: ejecutar todos los dispose y luego reportar fallos (log o evento), para no dejar otros recursos sin cerrar.
|
|
75
|
-
|
|
76
|
-
---
|
|
77
|
-
|
|
78
|
-
## 4. API pΓΊblica (producciΓ³n)
|
|
79
|
-
|
|
80
|
-
- **`register(name: string, value: unknown): void**
|
|
81
|
-
**`register(name: string, entry: { value: unknown; dispose?: () => void \| Promise<void> }): void**
|
|
82
|
-
Registra una instancia. Si la llave ya existe, **lanza error**. Orden de registro se usa para el teardown inverso.
|
|
83
|
-
|
|
84
|
-
- **`get<T>(name: string): T`**
|
|
85
|
-
Devuelve la instancia registrada; lanza si no existe.
|
|
86
|
-
|
|
87
|
-
- **`has(name: string): boolean`**
|
|
88
|
-
Indica si hay una entrada con ese nombre.
|
|
89
|
-
|
|
90
|
-
Redis sigue fuera del registro: se usa `getRedis(name)` como hasta ahora.
|
|
91
|
-
|
|
92
|
-
---
|
|
93
|
-
|
|
94
|
-
## 5. Ejemplos de uso
|
|
95
|
-
|
|
96
|
-
### Kafka (cliente con cierre)
|
|
97
|
-
|
|
98
|
-
```ts
|
|
99
|
-
const kafka = new Kafka({ clientId: 'my-app', brokers: ['localhost:9092'] });
|
|
100
|
-
const producer = kafka.producer();
|
|
101
|
-
await producer.connect();
|
|
102
|
-
|
|
103
|
-
syntropyLog.register('kafka-producer', {
|
|
104
|
-
value: producer,
|
|
105
|
-
dispose: async () => await producer.disconnect(),
|
|
106
|
-
});
|
|
107
|
-
|
|
108
|
-
const producer = syntropyLog.get<KafkaProducer>('kafka-producer');
|
|
109
|
-
await producer.send({ topic: 'events', messages: [...] });
|
|
110
|
-
```
|
|
111
|
-
|
|
112
|
-
### Axios (sin cierre)
|
|
113
|
-
|
|
114
|
-
```ts
|
|
115
|
-
const api = axios.create({ baseURL: 'https://api.ejemplo.com' });
|
|
116
|
-
api.interceptors.request.use((req) => {
|
|
117
|
-
req.headers['x-correlation-id'] = syntropyLog.getContextManager().getCorrelationId();
|
|
118
|
-
return req;
|
|
119
|
-
});
|
|
120
|
-
|
|
121
|
-
syntropyLog.register('api', api);
|
|
122
|
-
|
|
123
|
-
const api = syntropyLog.get<AxiosInstance>('api');
|
|
124
|
-
const { data } = await api.get('/users');
|
|
125
|
-
```
|
|
126
|
-
|
|
127
|
-
### Diccionario comΓΊn (objeto plano)
|
|
128
|
-
|
|
129
|
-
```ts
|
|
130
|
-
const featureFlags = { betaCheckout: true, newDashboard: false };
|
|
131
|
-
syntropyLog.register('feature-flags', featureFlags);
|
|
132
|
-
|
|
133
|
-
const flags = syntropyLog.get<typeof featureFlags>('feature-flags');
|
|
134
|
-
if (flags.betaCheckout) { ... }
|
|
135
|
-
```
|
|
136
|
-
|
|
137
|
-
---
|
|
138
|
-
|
|
139
|
-
## 6. Tests: reemplazar y dejar a cero
|
|
140
|
-
|
|
141
|
-
Objetivo: poder correr los tests **como la aplicaciΓ³n** (mismo init, mismos registros) y luego **reemplazar** entradas por mocks, y **dejar la instancia a cero** para el siguiente test. Todo esto **solo en test**, sin mezclar con producciΓ³n.
|
|
142
|
-
|
|
143
|
-
### 6.1 API solo para test (propuesta)
|
|
144
|
-
|
|
145
|
-
- **Reemplazar una entrada:** p. ej. `_replaceForTesting(name, value, dispose?)`. Permite que el test, despuΓ©s del init, sustituya una entrada (p. ej. `'api'`) por un mock. Solo disponible o con efecto en entorno de test.
|
|
146
|
-
|
|
147
|
-
- **Reemplazar toda la lista:** p. ej. `_replaceRegistryForTesting(entries)` o equivalente (vaciar y registrar una lista de entradas). El test puede tirar todo lo registrado en init y poner solo sus mocks.
|
|
148
|
-
|
|
149
|
-
- **Dejar a cero para el siguiente test:** p. ej. `_clearRegistryForTesting()` que vacΓe el registro. Si `_resetForTesting()` ya recrea el `LifecycleManager`, ese reset puede incluir un registro nuevo y vacΓo; asΓ βa ceroβ = mismo reset que hoy para estado limpio.
|
|
150
|
-
|
|
151
|
-
Nombres con prefijo `_` y sufijo `ForTesting` dejan claro que es API solo para tests.
|
|
152
|
-
|
|
153
|
-
### 6.2 Mocks sin columna extra
|
|
154
|
-
|
|
155
|
-
No se aΓ±ade ninguna columna βes mockβ en la entrada. En tests se usa el **mismo registro**: el test registra **el mock** en lugar del objeto real (o reemplaza con la API de test). Mismo nombre, mismo `get('api')`; lo que cambia es quΓ© instancia se registrΓ³.
|
|
156
|
-
|
|
157
|
-
---
|
|
158
|
-
|
|
159
|
-
## 7. Resumen de impacto (al implementar)
|
|
160
|
-
|
|
161
|
-
| Γrea | AcciΓ³n |
|
|
162
|
-
|-------------------|--------|
|
|
163
|
-
| **LifecycleManager** | Quitar HttpManager y BrokerManager. AΓ±adir registro genΓ©rico (ordenado). En shutdown: ejecutar dispose del registro en orden inverso, luego `redisManager?.shutdown()`. |
|
|
164
|
-
| **SyntropyLog** | Quitar `getHttp` y `getBroker`. AΓ±adir `register` y `get` (y `has`). Mantener `getRedis(name)`. Exponer API de test: reemplazo y clear. |
|
|
165
|
-
| **Config (schema)** | Quitar (o deprecar) `http` y `brokers`; mantener `redis`. |
|
|
166
|
-
| **Tests / mocks** | Dejar de usar config HTTP/broker y getHttp/getBroker. Usar registro; en test usar reemplazo/clear segΓΊn necesidad. Ajustar o eliminar tests de HttpManager/BrokerManager. |
|
|
167
|
-
| **DocumentaciΓ³n** | Explicar que solo Redis es βtipo integradoβ; el resto va al registro genΓ©rico. Ejemplos (Kafka, Axios, diccionario) y uso en tests. |
|
|
168
|
-
|
|
169
|
-
---
|
|
170
|
-
|
|
171
|
-
## 8. Cerrado vs pendiente
|
|
172
|
-
|
|
173
|
-
**Cerrado:**
|
|
174
|
-
|
|
175
|
-
- Estructura de entrada: nombre, valor, dispose (opcional); objeto de opciones para extensibilidad.
|
|
176
|
-
- PolΓtica: llave existente β error; fail fast en arranque; teardown en orden inverso.
|
|
177
|
-
- Ejemplos: Kafka, Axios, diccionario.
|
|
178
|
-
- Tests: reemplazar por mocks y dejar a cero; API solo para test; sin columna βmockβ.
|
|
179
|
-
|
|
180
|
-
**Pendiente (detalle de implementaciΓ³n):**
|
|
181
|
-
|
|
182
|
-
- Nombre exacto del registro: `InstanceRegistry`, `InstanceManager`, otro.
|
|
183
|
-
- Nombre de mΓ©todos: `get` / `register` vs `getInstance` / `registerInstance`.
|
|
184
|
-
- Registro solo en estado READY o tambiΓ©n antes de `init()`.
|
|
185
|
-
- Mantener o no los mΓ³dulos `src/http` y `src/brokers` como utilidades opcionales.
|
|
186
|
-
|
|
187
|
-
Cuando se implemente, este doc sirve como referencia del diseΓ±o acordado.
|
|
@@ -1,178 +0,0 @@
|
|
|
1
|
-
# Transports condicionales por ambiente
|
|
2
|
-
|
|
3
|
-
Objetivo: poder habilitar o deshabilitar transports segΓΊn el ambiente (NODE_ENV, APP_ENV, etc.) y repartir salida (consola, archivo, Azure, etc.) de forma condicional sin duplicar config.
|
|
4
|
-
|
|
5
|
-
---
|
|
6
|
-
|
|
7
|
-
## Forma recomendada: transportList + env
|
|
8
|
-
|
|
9
|
-
Lista de transports **separada** del criterio por ambiente: definΓs un pool (nombre β transport) y, por ambiente, quΓ© nombres del pool se usan como default. Muy flexible y declarativo.
|
|
10
|
-
|
|
11
|
-
- **`transportList`:** `Record<string, Transport>` β pool de todos los transports (nombre β instancia).
|
|
12
|
-
- **`env`:** `Record<string, string[]>` β por cada ambiente, lista de **nombres** del pool que se usan como default.
|
|
13
|
-
|
|
14
|
-
El ambiente actual se lee de `process.env[logger.envKey ?? 'NODE_ENV']`. Si el ambiente no estΓ‘ en `env`, el default queda vacΓo (conviene definir una entrada por cada env que usΓ©s).
|
|
15
|
-
|
|
16
|
-
**Ejemplo (autocontenido: todo a consola con simulados):**
|
|
17
|
-
|
|
18
|
-
En el ejemplo, `db`, `azure` y `archivo` se simulan con `AdapterTransport` + `UniversalAdapter` mandando a consola con una etiqueta. En producciΓ³n reemplazΓ‘s por el transport real (Azure, DB, archivo, etc.).
|
|
19
|
-
|
|
20
|
-
```ts
|
|
21
|
-
import { syntropyLog, ColorfulConsoleTransport, AdapterTransport, UniversalAdapter } from 'syntropylog';
|
|
22
|
-
|
|
23
|
-
// Registro tipo mock: cada βdestinoβ va a consola con etiqueta (solo para el ejemplo).
|
|
24
|
-
const mockToConsole = (label: string) =>
|
|
25
|
-
new AdapterTransport({
|
|
26
|
-
name: label,
|
|
27
|
-
adapter: new UniversalAdapter({
|
|
28
|
-
executor: (data) => console.log(`[${label}]`, JSON.stringify(data)),
|
|
29
|
-
}),
|
|
30
|
-
});
|
|
31
|
-
|
|
32
|
-
await syntropyLog.init({
|
|
33
|
-
logger: {
|
|
34
|
-
envKey: 'NODE_ENV',
|
|
35
|
-
transportList: {
|
|
36
|
-
consola: new ColorfulConsoleTransport({ name: 'consola' }),
|
|
37
|
-
db: mockToConsole('db'),
|
|
38
|
-
azure: mockToConsole('azure'),
|
|
39
|
-
archivo: mockToConsole('archivo'),
|
|
40
|
-
},
|
|
41
|
-
env: {
|
|
42
|
-
development: ['consola'],
|
|
43
|
-
staging: ['consola', 'archivo', 'azure'],
|
|
44
|
-
production: ['consola', 'db', 'azure'],
|
|
45
|
-
},
|
|
46
|
-
},
|
|
47
|
-
redis: { instances: [] },
|
|
48
|
-
});
|
|
49
|
-
|
|
50
|
-
const log = syntropyLog.getLogger('app');
|
|
51
|
-
log.info('default segΓΊn env');
|
|
52
|
-
log.override('consola').info('solo consola');
|
|
53
|
-
```
|
|
54
|
-
|
|
55
|
-
Override/add/remove por llamada usan los mismos nombres del pool (`logger.override('consola').info('...')`). En producciΓ³n, reemplazΓ‘s `mockToConsole('db')` por tu transport real (p. ej. uno que use UniversalAdapter con un executor que persista en DB).
|
|
56
|
-
|
|
57
|
-
**Prioridad:** Si estΓ‘n `transportList` y `env`, se usa esta forma. Si no, se usa la forma clΓ‘sica `transports` (compatibilidad).
|
|
58
|
-
|
|
59
|
-
---
|
|
60
|
-
|
|
61
|
-
## Forma clΓ‘sica (compatibilidad): transports
|
|
62
|
-
|
|
63
|
-
### Descriptor en config (declarativo, por env)
|
|
64
|
-
|
|
65
|
-
En `logger.transports` (forma antigua) cada elemento puede ser:
|
|
66
|
-
|
|
67
|
-
- **Un `Transport`** β siempre activo.
|
|
68
|
-
- **Un descriptor** `{ transport: Transport, env?: string | string[] }` β el transport solo se incluye cuando el **ambiente actual** estΓ‘ en la lista. Si no se pone `env`, se considera activo en todos los ambientes.
|
|
69
|
-
|
|
70
|
-
El "ambiente actual" se lee de una variable de entorno. Por defecto `NODE_ENV`. Se puede cambiar con `logger.envKey` (ej. `'APP_ENV'`).
|
|
71
|
-
|
|
72
|
-
**Ejemplo:**
|
|
73
|
-
|
|
74
|
-
```ts
|
|
75
|
-
await syntropyLog.init({
|
|
76
|
-
logger: {
|
|
77
|
-
envKey: 'NODE_ENV', // opcional; por defecto es 'NODE_ENV'
|
|
78
|
-
transports: [
|
|
79
|
-
// Siempre: consola colorida en desarrollo
|
|
80
|
-
new ColorfulConsoleTransport(),
|
|
81
|
-
// Solo en production: Azure
|
|
82
|
-
{ transport: new AzureLogTransport({ ... }), env: 'production' },
|
|
83
|
-
// En production y staging: archivo
|
|
84
|
-
{ transport: new FileTransport({ path: 'app.log' }), env: ['production', 'staging'] },
|
|
85
|
-
],
|
|
86
|
-
},
|
|
87
|
-
// ...
|
|
88
|
-
});
|
|
89
|
-
```
|
|
90
|
-
|
|
91
|
-
Comportamiento:
|
|
92
|
-
|
|
93
|
-
- **development:** solo ColorfulConsoleTransport.
|
|
94
|
-
- **staging:** ColorfulConsoleTransport + FileTransport.
|
|
95
|
-
- **production:** ColorfulConsoleTransport + AzureLogTransport + FileTransport.
|
|
96
|
-
|
|
97
|
-
Si no indicΓ‘s `env`, el transport se usa en todos los ambientes. AsΓ podΓ©s βpartirβ quΓ© va a consola, a archivo o a Azure segΓΊn el env, sin APIs adicionales.
|
|
98
|
-
|
|
99
|
-
---
|
|
100
|
-
|
|
101
|
-
### 2. ConditionalTransport (flexible, por funciΓ³n)
|
|
102
|
-
|
|
103
|
-
Para condiciones que no sean solo βenv en listaβ (por ejemplo βsi existe cierta variableβ, βsi es martesβ, etc.), un transport **wrapper** que delega solo cuando una funciΓ³n devuelve true:
|
|
104
|
-
|
|
105
|
-
```ts
|
|
106
|
-
import { ConditionalTransport } from 'syntropylog';
|
|
107
|
-
|
|
108
|
-
new ConditionalTransport({
|
|
109
|
-
transport: new AzureLogTransport({ ... }),
|
|
110
|
-
enableWhen: () => process.env.NODE_ENV === 'production' && process.env.ENABLE_AZURE === '1',
|
|
111
|
-
})
|
|
112
|
-
```
|
|
113
|
-
|
|
114
|
-
En `log(entry)`: si `enableWhen()` es true, se llama al transport envuelto; si no, no se hace nada. No requiere cambios en el schema; es composiciΓ³n.
|
|
115
|
-
|
|
116
|
-
---
|
|
117
|
-
|
|
118
|
-
## 3. Override por llamada: .override(), .add(), .remove()
|
|
119
|
-
|
|
120
|
-
Objetivo: tener un **pool de N transports** configurados y nombrados (por ambiente, etc.) y, **solo para un caso puntual**, decidir a dΓ³nde va ese log sin cambiar la config global. API declarativa y fluida.
|
|
121
|
-
|
|
122
|
-
### 3.1 Pool de transports con nombre
|
|
123
|
-
|
|
124
|
-
Todos los transports configurados deben tener **nombre** (ej. `name: 'consola'`, `name: 'db'`, `name: 'azure'`, `name: 'archivo'`). Esa lista N es el universo de destinos; en cada ambiente suelen estar activos solo algunos (ej. 2β4). Cualquier override/add/remove usa **solo nombres de ese pool**; no se inventan transports nuevos en la llamada.
|
|
125
|
-
|
|
126
|
-
### 3.2 Tres formas (fluent, aplican al siguiente log)
|
|
127
|
-
|
|
128
|
-
| MΓ©todo | Significado | Ejemplo |
|
|
129
|
-
|--------|-------------|---------|
|
|
130
|
-
| **`.override("a", "b")`** | Para este log, **solo** estos transports. Lista exacta. | `logger.override("consola").info("solo consola")` |
|
|
131
|
-
| **`.add("x")`** | Para este log, default **mΓ‘s** estos (de la lista configurada). Encadenable. | `logger.add("azure").info("transacciΓ³n especial")` |
|
|
132
|
-
| **`.remove("x").remove("z")`** | Para este log, default **menos** estos. Encadenable. | `logger.remove("db").remove("azure").info("debug, no guardar")` |
|
|
133
|
-
|
|
134
|
-
- **Override** reemplaza la lista efectiva por exactamente la que indicΓ‘s. Si usΓ‘s override, no se mezcla con default.
|
|
135
|
-
- **Add** y **remove** se aplican sobre el default (o sobre lo que quede tras add/remove). Se pueden encadenar: `logger.remove("db").add("archivo").info("...")` β default β db + archivo.
|
|
136
|
-
- Aplica al **siguiente log**; la llamada siguiente sin override/add/remove vuelve al default.
|
|
137
|
-
|
|
138
|
-
### 3.3 Ejemplos
|
|
139
|
-
|
|
140
|
-
```ts
|
|
141
|
-
// Solo a consola (override)
|
|
142
|
-
logger.override("consola").info("buscando un error, no mandar a DB");
|
|
143
|
-
|
|
144
|
-
// Solo consola y archivo
|
|
145
|
-
logger.override("consola", "archivo").info("backup manual");
|
|
146
|
-
|
|
147
|
-
// Default + Azure para esta lΓnea
|
|
148
|
-
logger.add("azure").info("transacciΓ³n que tambiΓ©n va a Azure");
|
|
149
|
-
|
|
150
|
-
// Default menos DB y Azure (ej. debug)
|
|
151
|
-
logger.remove("db").remove("azure").info("solo ver en consola");
|
|
152
|
-
|
|
153
|
-
// Default β db + archivo
|
|
154
|
-
logger.remove("db").add("archivo").info("auditorΓa a archivo sin DB");
|
|
155
|
-
```
|
|
156
|
-
|
|
157
|
-
### 3.4 Resumen de la idea
|
|
158
|
-
|
|
159
|
-
- ConfigurΓ‘s una lista N de transports (con nombre y por ambiente).
|
|
160
|
-
- Para el 99,9 % de los casos usΓ‘s el default que da esa config.
|
|
161
|
-
- Para un caso puntual: **override** (βsolo estosβ), **add** (βdefault + estosβ) o **remove** (βdefault β estosβ), siempre sobre la lista que ya configuraste. Declarativo y fluido.
|
|
162
|
-
|
|
163
|
-
---
|
|
164
|
-
|
|
165
|
-
## Resumen
|
|
166
|
-
|
|
167
|
-
| Necesidad | SoluciΓ³n |
|
|
168
|
-
|-----------|----------|
|
|
169
|
-
| **Pool + env (recomendado)** | `logger.transportList` (nombre β Transport) + `logger.env` (env β nombres). Ej: `env: { development: ['consola'], production: ['consola','db'] }`. |
|
|
170
|
-
| Habilitar transport solo en ciertos env (forma clΓ‘sica) | Descriptor `{ transport, env: 'production' }` o `env: ['production', 'staging']` en `logger.transports`. |
|
|
171
|
-
| Variable de ambiente distinta de NODE_ENV | `logger.envKey: 'APP_ENV'` (o la que uses). |
|
|
172
|
-
| CondiciΓ³n arbitraria (funciΓ³n) | `ConditionalTransport({ transport, enableWhen: () => boolean })`. |
|
|
173
|
-
| Algunos a consola, otros a archivo/Azure, segΓΊn env | Mismo array de transports: algunos sin `env` (siempre), otros con `env` para filtrar por ambiente. |
|
|
174
|
-
| **Por llamada: solo estos destinos** | `logger.override("consola", "archivo").info("...")` β solo esos transports (del pool configurado). |
|
|
175
|
-
| **Por llamada: default + algunos** | `logger.add("azure").info("...")` β fluido, encadenable. |
|
|
176
|
-
| **Por llamada: default β algunos** | `logger.remove("db").remove("azure").info("...")` β fluido, encadenable. |
|
|
177
|
-
|
|
178
|
-
ImplementaciΓ³n: en `LoggerFactory`, al construir la lista de transports, resolver el ambiente con `envKey`/NODE_ENV y filtrar los descriptores por `env`; el resto del cΓ³digo sigue recibiendo `Transport[]`. Transports con `name` para el pool nombrado. Logger expone `.override()`, `.add()`, `.remove()` que aplican al siguiente log y resuelven por nombre contra ese pool. Opcionalmente exportar `ConditionalTransport` para quien prefiera la API por funciΓ³n.
|
package/docs/configuration.md
DELETED
|
@@ -1,99 +0,0 @@
|
|
|
1
|
-
# Master Configuration Guide
|
|
2
|
-
|
|
3
|
-
This guide provides a complete reference for every configuration option in SyntropyLog.
|
|
4
|
-
|
|
5
|
-
## π Top-Level Configuration
|
|
6
|
-
|
|
7
|
-
The configuration object passed to `syntropyLog.init()` follows this structure:
|
|
8
|
-
|
|
9
|
-
```typescript
|
|
10
|
-
await syntropyLog.init({
|
|
11
|
-
logger: { /* Logger specific settings */ },
|
|
12
|
-
loggingMatrix: { /* Context visibility control */ },
|
|
13
|
-
redis: { /* Managed Redis instances */ },
|
|
14
|
-
masking: { /* Data privacy & security rules */ },
|
|
15
|
-
context: { /* Correlation ID settings */ },
|
|
16
|
-
shutdownTimeout: 5000 // ms
|
|
17
|
-
});
|
|
18
|
-
```
|
|
19
|
-
|
|
20
|
-
---
|
|
21
|
-
|
|
22
|
-
## π² 1. Logger Configuration (`logger`)
|
|
23
|
-
|
|
24
|
-
Controls the core logging engine behavior.
|
|
25
|
-
|
|
26
|
-
| Property | Type | Description |
|
|
27
|
-
| :--- | :--- | :--- |
|
|
28
|
-
| `serviceName` | `string` | The identifier for your application in traces and logs. |
|
|
29
|
-
| `level` | `LogLevel` | Minimum severity level (`trace`, `debug`, `info`, `warn`, `error`, `fatal`, `silent`). |
|
|
30
|
-
| `transports` | `Transport[]` or `Record` | Array of transport instances (e.g., `ConsoleTransport`) or a mapping of logger names to transport arrays. |
|
|
31
|
-
| `serializerTimeoutMs` | `number` | Max time (ms) allowed for the security pipeline to process metadata. Default: `50ms`. |
|
|
32
|
-
| `prettyPrint` | `object` | `{ enabled: boolean }`. Formats logs for readability in development. |
|
|
33
|
-
|
|
34
|
-
---
|
|
35
|
-
|
|
36
|
-
## π 2. Logging Matrix (`loggingMatrix`)
|
|
37
|
-
|
|
38
|
-
The **Logging Matrix** is a unique feature that controls which context properties (e.g., `userId`, `correlationId`) are included in the final log output based on the log level.
|
|
39
|
-
|
|
40
|
-
```typescript
|
|
41
|
-
loggingMatrix: {
|
|
42
|
-
default: ['correlationId', 'serviceName'], // Included in all levels
|
|
43
|
-
error: ['*'], // Include ALL context fields for errors
|
|
44
|
-
trace: ['correlationId', 'serviceName', 'requestId'],
|
|
45
|
-
audit: ['*'] // Audit logs typically include all available context
|
|
46
|
-
}
|
|
47
|
-
```
|
|
48
|
-
|
|
49
|
-
---
|
|
50
|
-
|
|
51
|
-
## πΎ 3. Managed Resources (`redis`)
|
|
52
|
-
|
|
53
|
-
Centralize Redis connections with automatic observability.
|
|
54
|
-
|
|
55
|
-
### **Redis (`redis`)**
|
|
56
|
-
- `instances`: Array of Redis configurations.
|
|
57
|
-
- `default`: Name of the default instance.
|
|
58
|
-
|
|
59
|
-
**Per-instance Logging settings:**
|
|
60
|
-
- `onSuccess`: Level for successful commands (Default: `debug`).
|
|
61
|
-
- `onError`: Level for failed commands (Default: `error`).
|
|
62
|
-
- `logCommandValues`: Boolean. Log the command arguments (e.g., keys/values).
|
|
63
|
-
- `logReturnValue`: Boolean. Log what Redis returned.
|
|
64
|
-
|
|
65
|
-
---
|
|
66
|
-
|
|
67
|
-
## π‘οΈ 4. Masking & Security (`masking`)
|
|
68
|
-
|
|
69
|
-
Define rules to automatically redact sensitive information.
|
|
70
|
-
|
|
71
|
-
```typescript
|
|
72
|
-
masking: {
|
|
73
|
-
rules: [
|
|
74
|
-
{ pattern: 'password', strategy: MaskingStrategy.STAR },
|
|
75
|
-
{ pattern: /card_number/, strategy: MaskingStrategy.REDACT }
|
|
76
|
-
],
|
|
77
|
-
maskChar: '*',
|
|
78
|
-
preserveLength: true,
|
|
79
|
-
enableDefaultRules: true // Pre-mask common fields like 'token', 'apiKey'
|
|
80
|
-
}
|
|
81
|
-
```
|
|
82
|
-
|
|
83
|
-
---
|
|
84
|
-
|
|
85
|
-
## π 5. Context Propagation (`context`)
|
|
86
|
-
|
|
87
|
-
Defines how the library identifies and tracks requests through headers.
|
|
88
|
-
|
|
89
|
-
- `correlationIdHeader`: The header name used for tracing (e.g., `X-Correlation-ID`).
|
|
90
|
-
- `transactionIdHeader`: The header name for external trace IDs (e.g., `X-Trace-ID`).
|
|
91
|
-
|
|
92
|
-
---
|
|
93
|
-
|
|
94
|
-
## π Related Guides
|
|
95
|
-
- [π’ Enterprise Implementation](./enterprise.md)
|
|
96
|
-
- [π¦ Persistence & Universal Adapters](./persistence.md)
|
|
97
|
-
- [𧬠Serialization & Custom Formatting](./serialization.md)
|
|
98
|
-
- [βοΈ Middleware & Framework Integration](./middleware.md)
|
|
99
|
-
- [π§ͺ Testing Strategy](./testing.md)
|
package/docs/enterprise.md
DELETED
|
@@ -1,44 +0,0 @@
|
|
|
1
|
-
# Enterprise Implementation Guide
|
|
2
|
-
|
|
3
|
-
SyntropyLog is designed for enterprise environments and can be easily integrated into your internal infrastructure.
|
|
4
|
-
|
|
5
|
-
## Why SyntropyLog for Enterprise?
|
|
6
|
-
|
|
7
|
-
1. **Security by Default**
|
|
8
|
-
- Built-in data masking for sensitive information.
|
|
9
|
-
- Compliance-ready logging with retention rules.
|
|
10
|
-
- No external telemetry or tracking.
|
|
11
|
-
- 100% open source and auditable.
|
|
12
|
-
|
|
13
|
-
2. **Scalable Architecture**
|
|
14
|
-
- Singleton pattern prevents resource leaks.
|
|
15
|
-
- Automatic connection pooling.
|
|
16
|
-
- Kubernetes-ready with proper lifecycle management.
|
|
17
|
-
- Horizontal scaling support.
|
|
18
|
-
|
|
19
|
-
3. **Performance Excellence**
|
|
20
|
-
- Zero measurable performance overhead.
|
|
21
|
-
- Minimal bundle size impact.
|
|
22
|
-
- Optimized for high-throughput applications.
|
|
23
|
-
|
|
24
|
-
## Internal Implementation Strategy
|
|
25
|
-
|
|
26
|
-
### Phase 1: Pilot Project (2-4 weeks)
|
|
27
|
-
Start with a single microservice to validate the integration and observability benefits.
|
|
28
|
-
|
|
29
|
-
### Phase 2: Service Mesh Integration (4-8 weeks)
|
|
30
|
-
Standardize configuration across multiple services to enable cross-service tracing.
|
|
31
|
-
|
|
32
|
-
### Phase 3: Full Enterprise Rollout (8-12 weeks)
|
|
33
|
-
Integrate with all internal resources (Redis clusters, Kafka brokers, internal APIs) and compliance monitoring systems.
|
|
34
|
-
|
|
35
|
-
## Enterprise Patterns
|
|
36
|
-
|
|
37
|
-
### Environment-Based Configuration
|
|
38
|
-
Centralize your configuration logic to switch between local development and production environments effortlessly.
|
|
39
|
-
|
|
40
|
-
### Centralized Logging Infrastructure
|
|
41
|
-
Use JSON transports for seamless ingestion into ELK (Elasticsearch, Logstash, Kibana) or Splunk stacks.
|
|
42
|
-
|
|
43
|
-
### Security & Compliance
|
|
44
|
-
Leverage the `audit()` level for critical actions that must bypass standard filters and be persisted for regulatory compliance.
|
package/docs/middleware.md
DELETED
|
@@ -1,62 +0,0 @@
|
|
|
1
|
-
# Middleware & Framework Integration
|
|
2
|
-
|
|
3
|
-
SyntropyLog is framework-agnostic but provides patterns to integrate with the most popular Node.js web frameworks.
|
|
4
|
-
|
|
5
|
-
## π How Middleware Works in SyntropyLog
|
|
6
|
-
|
|
7
|
-
The goal of the middleware is to:
|
|
8
|
-
1. **Initialize the Context**: Start an asynchronous storage scope.
|
|
9
|
-
2. **Assign Correlation IDs**: Extract from headers or generate a new one.
|
|
10
|
-
3. **Bind Loggers**: Provide a context-aware logger to the request.
|
|
11
|
-
|
|
12
|
-
---
|
|
13
|
-
|
|
14
|
-
## π Native Integration Patterns
|
|
15
|
-
|
|
16
|
-
### **Express.js**
|
|
17
|
-
```typescript
|
|
18
|
-
import { syntropyLog } from 'syntropylog';
|
|
19
|
-
|
|
20
|
-
const syntropyMiddleware = (req, res, next) => {
|
|
21
|
-
const contextManager = syntropyLog.getContextManager();
|
|
22
|
-
|
|
23
|
-
contextManager.run(async () => {
|
|
24
|
-
// Automatically detect incoming header or generate new ID
|
|
25
|
-
const correlationId = contextManager.getCorrelationId();
|
|
26
|
-
contextManager.set(contextManager.getCorrelationIdHeaderName(), correlationId);
|
|
27
|
-
|
|
28
|
-
// Attach logger to request for convenience
|
|
29
|
-
req.logger = syntropyLog.getLogger('http');
|
|
30
|
-
|
|
31
|
-
next();
|
|
32
|
-
});
|
|
33
|
-
};
|
|
34
|
-
|
|
35
|
-
app.use(syntropyMiddleware);
|
|
36
|
-
```
|
|
37
|
-
|
|
38
|
-
### **NestJS**
|
|
39
|
-
Use a Global Interceptor or Middleware to wrap the execution context.
|
|
40
|
-
```typescript
|
|
41
|
-
@Injectable()
|
|
42
|
-
export class ObservabilityMiddleware implements NestMiddleware {
|
|
43
|
-
use(req: Request, res: Response, next: NextFunction) {
|
|
44
|
-
syntropyLog.getContextManager().run(() => {
|
|
45
|
-
// Context setup logic
|
|
46
|
-
next();
|
|
47
|
-
});
|
|
48
|
-
}
|
|
49
|
-
}
|
|
50
|
-
```
|
|
51
|
-
|
|
52
|
-
---
|
|
53
|
-
|
|
54
|
-
## π οΈ "The Middleware that needs to be done"
|
|
55
|
-
|
|
56
|
-
We are currently working on a unified `@syntropylog/middleware` package that will provide:
|
|
57
|
-
- **Auto-masking** for body/headers out of the box.
|
|
58
|
-
- **Performance tracking** (auto log request duration).
|
|
59
|
-
- **Graceful Error Handling** middleware.
|
|
60
|
-
|
|
61
|
-
Until the official package is released, we recommend using the **Express + Redis + Axios** example as a reference for a production-ready implementation:
|
|
62
|
-
π [See Example 12: Express + Redis + Axios](./examples/12-express-redis-axios/)
|
package/docs/persistence.md
DELETED
|
@@ -1,81 +0,0 @@
|
|
|
1
|
-
# Universal Persistence (Storage Agnostic)
|
|
2
|
-
|
|
3
|
-
Starting from v0.8.x, SyntropyLog includes a powerful way to persist logs to any destination without external dependencies. By using `UniversalAdapter` and `UniversalLogFormatter`, you can map your logs to any schema using JSON and provide an execution function.
|
|
4
|
-
|
|
5
|
-
## π― The Concept
|
|
6
|
-
|
|
7
|
-
Instead of writing a complex class for every database, you provide:
|
|
8
|
-
1. **A Formatter**: Maps the log object to your desired schema.
|
|
9
|
-
2. **An Executor**: A simple function that takes the formatted data and saves it.
|
|
10
|
-
|
|
11
|
-
---
|
|
12
|
-
|
|
13
|
-
## π Examples
|
|
14
|
-
|
|
15
|
-
### **1. Capture Logs in Memory (for Debugging)**
|
|
16
|
-
```typescript
|
|
17
|
-
import { UniversalAdapter, syntropyLog } from 'syntropylog';
|
|
18
|
-
|
|
19
|
-
const adapter = new UniversalAdapter({
|
|
20
|
-
executor: (data) => console.log('Captured by Adapter:', data)
|
|
21
|
-
});
|
|
22
|
-
|
|
23
|
-
// Use it in your configuration
|
|
24
|
-
await syntropyLog.init({
|
|
25
|
-
logger: {
|
|
26
|
-
transports: [ new AdapterTransport({ adapter }) ]
|
|
27
|
-
}
|
|
28
|
-
});
|
|
29
|
-
```
|
|
30
|
-
|
|
31
|
-
### **2. Persisting to MongoDB (Object-based)**
|
|
32
|
-
```typescript
|
|
33
|
-
import { UniversalAdapter, UniversalLogFormatter, syntropyLog } from 'syntropylog';
|
|
34
|
-
|
|
35
|
-
const mongoAdapter = new UniversalAdapter({
|
|
36
|
-
executor: (doc) => db.collection('logs').insertOne(doc)
|
|
37
|
-
});
|
|
38
|
-
|
|
39
|
-
const mongoFormatter = new UniversalLogFormatter({
|
|
40
|
-
mapping: {
|
|
41
|
-
user: 'metadata.userId',
|
|
42
|
-
event: 'message',
|
|
43
|
-
level: 'level',
|
|
44
|
-
payload: 'bindings' // Full object path
|
|
45
|
-
}
|
|
46
|
-
});
|
|
47
|
-
|
|
48
|
-
await syntropyLog.init({
|
|
49
|
-
logger: {
|
|
50
|
-
transports: {
|
|
51
|
-
audit: [new AdapterTransport({
|
|
52
|
-
adapter: mongoAdapter,
|
|
53
|
-
formatter: mongoFormatter
|
|
54
|
-
})]
|
|
55
|
-
}
|
|
56
|
-
}
|
|
57
|
-
});
|
|
58
|
-
```
|
|
59
|
-
|
|
60
|
-
### **3. Generic SQL (Postgres/MySQL)**
|
|
61
|
-
```typescript
|
|
62
|
-
const sqlAdapter = new UniversalAdapter({
|
|
63
|
-
// The executor receives the result of the formatter
|
|
64
|
-
executor: ({ sql, values }) => pool.query(sql, values)
|
|
65
|
-
});
|
|
66
|
-
|
|
67
|
-
const sqlFormatter = new UniversalLogFormatter({
|
|
68
|
-
mapping: {
|
|
69
|
-
column_user: 'bindings.userId',
|
|
70
|
-
column_msg: 'message'
|
|
71
|
-
}
|
|
72
|
-
});
|
|
73
|
-
```
|
|
74
|
-
|
|
75
|
-
---
|
|
76
|
-
|
|
77
|
-
## π‘οΈ Why use Universal Adapters?
|
|
78
|
-
|
|
79
|
-
- **Storage Agnostic**: Move from SQL to NoSQL without changing your application code.
|
|
80
|
-
- **Zero Dependencies**: You don't need `syntropylog-mongodb-adapter`βjust use your existing database client.
|
|
81
|
-
- **Pure JSON Mapping**: Define your schema in configuration, not in code.
|
package/docs/philosophy.md
DELETED
|
@@ -1,15 +0,0 @@
|
|
|
1
|
-
# Core Philosophy: Silent Observer
|
|
2
|
-
|
|
3
|
-
SyntropyLog follows the **Silent Observer** principle: we report what happened and nothing more. We never interfere with the primary execution flow of your application.
|
|
4
|
-
|
|
5
|
-
## Non-Blocking Execution
|
|
6
|
-
Your application should continue running normally, even if the logging pipeline or a transport fails. SyntropyLog catches and reports its own internal errors to the console but prevents them from crashing your main process.
|
|
7
|
-
|
|
8
|
-
## Error Handling Strategy
|
|
9
|
-
|
|
10
|
-
1. **Configuration Errors**: Fatal. The application fails to start if the environment is incorrectly configured.
|
|
11
|
-
2. **Pipeline/Serializer Errors**: Non-fatal. Reported to internal diagnostics, application continues.
|
|
12
|
-
3. **Transport Errors**: Non-fatal. Logged to the fallback console, application continues.
|
|
13
|
-
|
|
14
|
-
## Performance Benchmark
|
|
15
|
-
SyntropyLog is designed to provide tracing and management with identical performance to **Pino**, the industry standard for high-performance logging in Node.js.
|
package/docs/serialization.md
DELETED
|
@@ -1,67 +0,0 @@
|
|
|
1
|
-
# Serialization & Resiliency
|
|
2
|
-
|
|
3
|
-
SyntropyLog 0.9.1 introduces an **Intelligent Serialization Pipeline**. This system ensures that your application remains stable even when logging complex, circular, or deeply nested data structures.
|
|
4
|
-
|
|
5
|
-
## π‘οΈ The Security Pipeline
|
|
6
|
-
|
|
7
|
-
Every piece of metadata passed to a log call flows through a multi-step pipeline before reaching any transport. This process is automatic and requires zero configuration.
|
|
8
|
-
|
|
9
|
-
### 1. **Hygiene Step** (Circular References & Depth)
|
|
10
|
-
Uses `flatted` to detect and neutralize circular references. It also enforces a maximum object depth to prevent stack overflow or excessive memory consumption.
|
|
11
|
-
- **Goal**: Prevent recursion-based crashes.
|
|
12
|
-
- **Output**: A "clean" object safe for standard JSON operations.
|
|
13
|
-
|
|
14
|
-
### 2. **Serialization Step** (Internal Resiliency)
|
|
15
|
-
Translates complex objects (Errors, BigInts, etc.) into structured JSON values using internal, optimized serializers.
|
|
16
|
-
|
|
17
|
-
### 3. **Sanitization Step** (PII & Injection)
|
|
18
|
-
Integrates with the **MaskingEngine** to redact sensitive fields and strips control characters to prevent log injection attacks.
|
|
19
|
-
|
|
20
|
-
### 4. **Timeout Step** (Event Loop Protection)
|
|
21
|
-
Every step in the pipeline is wrapped in a mandatory execution timeout (default: **50ms**). If serialization takes too long, it is aborted via `Promise.race`, and a safe subset of the data is logged instead.
|
|
22
|
-
- **Goal**: Prevent "Death by Log" or Event Loop starvation in high-load scenarios.
|
|
23
|
-
- **Configurable**: Adjust this value using the `logger.serializerTimeoutMs` property in your configuration (min: 1ms, recommended: 20ms - 200ms).
|
|
24
|
-
|
|
25
|
-
---
|
|
26
|
-
|
|
27
|
-
## βοΈ Comparison with Traditional Loggers
|
|
28
|
-
|
|
29
|
-
In most logging implementations, serialization is a synchronous, blocking task. SyntropyLog differentiates itself by treating serialization as a **resilient asynchronous pipeline**:
|
|
30
|
-
|
|
31
|
-
| Feature | Traditional Loggers | SyntropyLog v0.9.1 |
|
|
32
|
-
| :--- | :--- | :--- |
|
|
33
|
-
| **Circular Objects** | Often crash or throw `TypeError` | Auto-detected and neutralized via `HygieneStep` |
|
|
34
|
-
| **Massive Objects** | Block the Event Loop until finished | Aborted after timeout (50ms) to protect latency |
|
|
35
|
-
| **Safety** | May throw exceptions on bad data | Guaranteed never to throw ("Silent Observer") |
|
|
36
|
-
| **Auditability** | Dropped logs leave no trace | Failure metadata is included in the log output |
|
|
37
|
-
|
|
38
|
-
## π§© Extensibility (Universal Contracts)
|
|
39
|
-
|
|
40
|
-
While SyntropyLog no longer allows "loose" serializer functions in the global config (for security reasons), advanced users can still extend the system by implementing the `ISerializer` contract and registering it with the `SerializationManager`.
|
|
41
|
-
|
|
42
|
-
```typescript
|
|
43
|
-
import { ISerializer, SerializationComplexity } from 'syntropylog';
|
|
44
|
-
|
|
45
|
-
const MyCustomSerializer: ISerializer = {
|
|
46
|
-
name: 'my-serializer',
|
|
47
|
-
priority: 10,
|
|
48
|
-
canSerialize: (data) => data instanceof MyCustomClass,
|
|
49
|
-
serialize: async (data) => ({
|
|
50
|
-
success: true,
|
|
51
|
-
data: data.toCustomString(),
|
|
52
|
-
}),
|
|
53
|
-
getComplexity: () => SerializationComplexity.SIMPLE
|
|
54
|
-
};
|
|
55
|
-
|
|
56
|
-
// Register via the facade
|
|
57
|
-
syntropyLog.getSerializer().register(MyCustomSerializer);
|
|
58
|
-
```
|
|
59
|
-
|
|
60
|
-
---
|
|
61
|
-
|
|
62
|
-
## ποΈ The Silent Observer Principle
|
|
63
|
-
|
|
64
|
-
The entire pipeline follows the **Silent Observer** philosophy:
|
|
65
|
-
- **No Exceptions**: Logging should never throw. If a step fails, the pipeline recovers gracefully.
|
|
66
|
-
- **Non-Blocking**: Timeouts ensure that serialization never starves the event loop.
|
|
67
|
-
- **Information over Perfection**: If data is too complex to serialize safely, SyntropyLog will log as much as possible rather than dropping the message entirely.
|
package/docs/testing.md
DELETED
|
@@ -1,28 +0,0 @@
|
|
|
1
|
-
# Testing Guide
|
|
2
|
-
|
|
3
|
-
SyntropyLog is designed to make testing easier by removing the need for complex connection management and boilerplate in your unit tests.
|
|
4
|
-
|
|
5
|
-
## Zero Boilerplate Testing
|
|
6
|
-
|
|
7
|
-
Use the built-in testing helper to inject mocks into your services without needing to initialize or shutdown actual connections.
|
|
8
|
-
|
|
9
|
-
```typescript
|
|
10
|
-
import { createTestHelper } from 'syntropylog/testing';
|
|
11
|
-
const testHelper = createTestHelper();
|
|
12
|
-
|
|
13
|
-
describe('MyService', () => {
|
|
14
|
-
beforeEach(() => {
|
|
15
|
-
testHelper.beforeEach(); // Resets all mocks automatically
|
|
16
|
-
});
|
|
17
|
-
|
|
18
|
-
it('works', () => {
|
|
19
|
-
const service = new MyService(testHelper.mockSyntropyLog);
|
|
20
|
-
// ...
|
|
21
|
-
});
|
|
22
|
-
});
|
|
23
|
-
```
|
|
24
|
-
|
|
25
|
-
## Benefits
|
|
26
|
-
- **No Connection Boilerplate**: No need for `init()` or `shutdown()` in your test files.
|
|
27
|
-
- **Lightning Fast**: All operations run in-memory using highly optimized mocks.
|
|
28
|
-
- **Assertion Ready**: Easily verify that your services are logging the correct information and using Redis/HTTP resources as expected.
|