badmfck-api-server 3.7.5 → 3.7.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/cocroach.help.md +191 -0
- package/connections.help.md +0 -0
- package/dist/apiServer/APIService.js +1 -1
- package/dist/apiServer/DBService.d.ts +3 -0
- package/dist/apiServer/DBService.js +4 -0
- package/dist/apiServer/db/PostgresAdapter.d.ts +31 -0
- package/dist/apiServer/db/PostgresAdapter.js +66 -0
- package/dist/apiServer/helper/Validator.d.ts +1 -1
- package/dist/apiServer/helper/Validator.js +27 -17
- package/dist/index.d.ts +2 -1
- package/dist/index.js +3 -1
- package/package.json +3 -1
package/cocroach.help.md
ADDED
@@ -0,0 +1,191 @@
|
|
1
|
+
Отличный выбор. CockroachDB как раз создан для «банковской» нагрузки: строгое **SERIALIZABLE** по умолчанию, multi‑region, онлайн‑DDL. Ниже — как я бы собрал ядро процессинга (ledger) именно под CockroachDB, с нюансами, где он отличается от PostgreSQL.
|
2
|
+
|
3
|
+
# Ключевые принципы в CockroachDB
|
4
|
+
|
5
|
+
* **Изоляция**: всегда `SERIALIZABLE`. Готовь **retry‑loop** на `40001 (retry txn)` — это норма для конфликтов.
|
6
|
+
* **Локальность**: multi‑region схемы — `REGIONAL BY ROW` для транзакционных таблиц; `GLOBAL` для справочников.
|
7
|
+
* **Анти‑hotspot**: избегай монотонных ключей (sequence), используй `UUID v4/v7` и/или **hash‑sharded** индексы.
|
8
|
+
* **Блокировки**: нет advisory locks как в PG — используй `SELECT … FOR UPDATE` на «строке‑замке» (см. ниже).
|
9
|
+
* **Триггеры/расширения**: ограничены → реактивность делай через **CHANGEFEED** (CDC) в Kafka/обработчик.
|
10
|
+
|
11
|
+
---
|
12
|
+
|
13
|
+
# 1) Схема ядра (ledger)
|
14
|
+
|
15
|
+
```sql
|
16
|
+
-- Региональная настройка базы (примерно)
|
17
|
+
-- CREATE DATABASE core PRIMARY REGION eu-central SECONDARY REGION eu-north ...;
|
18
|
+
|
19
|
+
-- Справочник счетов (регион по владельцу)
|
20
|
+
CREATE TABLE ledger_accounts (
|
21
|
+
account_id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
22
|
+
client_id UUID NOT NULL,
|
23
|
+
currency STRING NOT NULL,
|
24
|
+
status STRING NOT NULL,
|
25
|
+
crdb_region crdb_internal_region NOT NULL, -- для REGIONAL BY ROW
|
26
|
+
created_at TIMESTAMPTZ NOT NULL DEFAULT now()
|
27
|
+
) LOCALITY REGIONAL BY ROW;
|
28
|
+
|
29
|
+
-- Неизменяемый журнал проводок
|
30
|
+
CREATE TABLE ledger_journal (
|
31
|
+
entry_id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
32
|
+
batch_id UUID NOT NULL,
|
33
|
+
debit_account UUID NOT NULL,
|
34
|
+
credit_account UUID NOT NULL,
|
35
|
+
amount DECIMAL(20,6) NOT NULL,
|
36
|
+
currency STRING NOT NULL,
|
37
|
+
event_time TIMESTAMPTZ NOT NULL DEFAULT now(),
|
38
|
+
idempotency_key BYTES, -- уникальность на уровне батча/идемпотентности
|
39
|
+
meta JSONB
|
40
|
+
) LOCALITY REGIONAL BY ROW;
|
41
|
+
|
42
|
+
-- Ускоряем поиск по аккаунтам и времени + шардим индекс, чтобы не было hotspot’ов
|
43
|
+
CREATE INDEX lj_by_debit ON ledger_journal (debit_account, event_time DESC) USING HASH WITH BUCKET_COUNT = 32;
|
44
|
+
CREATE INDEX lj_by_credit ON ledger_journal (credit_account, event_time DESC) USING HASH WITH BUCKET_COUNT = 32;
|
45
|
+
CREATE UNIQUE INDEX lj_by_idem ON ledger_journal (idempotency_key) WHERE idempotency_key IS NOT NULL;
|
46
|
+
|
47
|
+
-- Текущие балансы (одна строка на аккаунт+валюту)
|
48
|
+
CREATE TABLE ledger_balances (
|
49
|
+
account_id UUID NOT NULL,
|
50
|
+
currency STRING NOT NULL,
|
51
|
+
current_balance DECIMAL(20,6) NOT NULL DEFAULT 0,
|
52
|
+
pending_balance DECIMAL(20,6) NOT NULL DEFAULT 0,
|
53
|
+
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
54
|
+
PRIMARY KEY (account_id, currency)
|
55
|
+
) LOCALITY REGIONAL BY ROW;
|
56
|
+
|
57
|
+
-- Бизнес-объект платежа
|
58
|
+
CREATE TABLE payments (
|
59
|
+
payment_id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
60
|
+
merchant_id UUID NOT NULL,
|
61
|
+
status STRING NOT NULL, -- pending/authorized/captured/refunded/failed
|
62
|
+
amount DECIMAL(20,6) NOT NULL,
|
63
|
+
currency STRING NOT NULL,
|
64
|
+
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
65
|
+
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
66
|
+
meta JSONB
|
67
|
+
) LOCALITY REGIONAL BY ROW;
|
68
|
+
|
69
|
+
CREATE INDEX p_by_merchant ON payments (merchant_id, created_at DESC) USING HASH WITH BUCKET_COUNT = 32;
|
70
|
+
|
71
|
+
-- Outbox для событий (CDC → Kafka)
|
72
|
+
CREATE TABLE outbox (
|
73
|
+
event_id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
74
|
+
aggregate_type STRING NOT NULL,
|
75
|
+
aggregate_id UUID NOT NULL,
|
76
|
+
payload JSONB NOT NULL,
|
77
|
+
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
78
|
+
published_at TIMESTAMPTZ
|
79
|
+
) LOCALITY REGIONAL BY ROW;
|
80
|
+
```
|
81
|
+
|
82
|
+
**Почему так:** `REGIONAL BY ROW` держит запись ближе к пользователю/мерчанту, снижая межрегиональные WAN‑RTT. `HASH‑sharded` индексы избегают горячих ключей. Журнал — только `INSERT` (сторно отдельной проводкой).
|
83
|
+
|
84
|
+
---
|
85
|
+
|
86
|
+
# 2) Конкурентность и целостность (без overdraft’ов)
|
87
|
+
|
88
|
+
В Cockroach нет advisory‑lock’ов → делаем «строку‑замок» (или просто лочим строку баланса):
|
89
|
+
|
90
|
+
```sql
|
91
|
+
-- Псевдокод перевода средств в одной транзакции
|
92
|
+
BEGIN;
|
93
|
+
|
94
|
+
-- 1) Лочим обе строки баланса детерминированно, чтобы избежать дедлоков
|
95
|
+
-- (фиксированный порядок по UUID)
|
96
|
+
SELECT current_balance
|
97
|
+
FROM ledger_balances
|
98
|
+
WHERE account_id IN ($from, $to) AND currency = $cur
|
99
|
+
ORDER BY account_id FOR UPDATE;
|
100
|
+
|
101
|
+
-- 2) Проверяем лимиты, статусы счетов, валюту и т.п.
|
102
|
+
-- 3) Пишем двойную запись в журнал (иммутабельно)
|
103
|
+
INSERT INTO ledger_journal (batch_id, debit_account, credit_account, amount, currency, idempotency_key, meta)
|
104
|
+
VALUES
|
105
|
+
($batch, $from, $to, $amount, $cur, $idem, $meta),
|
106
|
+
($batch, $to, $from, -$amount, $cur, $idem, $meta);
|
107
|
+
|
108
|
+
-- 4) Обновляем балансы, не допуская отрицательных
|
109
|
+
UPDATE ledger_balances
|
110
|
+
SET current_balance = current_balance - $amount, updated_at = now()
|
111
|
+
WHERE account_id = $from AND currency = $cur
|
112
|
+
AND current_balance >= $amount;
|
113
|
+
|
114
|
+
-- Если rowcount = 0 → недостаточно средств → ROLLBACK
|
115
|
+
|
116
|
+
UPDATE ledger_balances
|
117
|
+
SET current_balance = current_balance + $amount, updated_at = now()
|
118
|
+
WHERE account_id = $to AND currency = $cur;
|
119
|
+
|
120
|
+
-- 5) Пишем событие во внешний мир через outbox
|
121
|
+
INSERT INTO outbox (aggregate_type, aggregate_id, payload)
|
122
|
+
VALUES ('payment', $payment_id, jsonb_build_object(...));
|
123
|
+
|
124
|
+
COMMIT;
|
125
|
+
```
|
126
|
+
|
127
|
+
**Идемпотентность:** клиент всегда шлёт `Idempotency-Key`; на конфликте `lj_by_idem` возвращаем уже созданный результат.
|
128
|
+
|
129
|
+
**Важно для CRDB:** оберни это в **retry‑loop** — если получишь `40001`, повтори транзакцию (это нормальный рабочий режим при конкуренции).
|
130
|
+
|
131
|
+
---
|
132
|
+
|
133
|
+
# 3) Multi‑region дизайн
|
134
|
+
|
135
|
+
* Таблицы с интенсивной записью: **`REGIONAL BY ROW`**; колонку `crdb_region` проставляй по мерчанту/аккаунту.
|
136
|
+
* Справочники (валюты, MCC): **`GLOBAL`** — чтения быстрые во всех регионах.
|
137
|
+
* Критические чтения «здесь и сейчас» — по месту записи; отчётные/аналитические чтения могут использовать **Follower Reads / bounded staleness** для разгрузки.
|
138
|
+
* **Survival goal**: на уровне базы/таблиц выставляй устойчивость к падению региона.
|
139
|
+
|
140
|
+
---
|
141
|
+
|
142
|
+
# 4) Масштабирование и партиционирование
|
143
|
+
|
144
|
+
* CRDB сам шардингует на уровне KV‑диапазонов; ты задаёшь **partition by region** и **hash‑sharded** индексы.
|
145
|
+
* Для огромного журнала — **партиции по времени** (месяц) + вторичная локализация по региону.
|
146
|
+
* Избегай крупных транзакций и долгих `SELECT` с `FOR UPDATE` — держи транзакции короткими.
|
147
|
+
|
148
|
+
---
|
149
|
+
|
150
|
+
# 5) CDC, очереди, интеграции
|
151
|
+
|
152
|
+
* **CHANGEFEED → Kafka / cloud storage** из `outbox` и/или `ledger_journal`:
|
153
|
+
|
154
|
+
* ключ по `entry_id`/`event_id`,
|
155
|
+
* опции: маркеры «резолва» для exactly‑once в потребителе,
|
156
|
+
* downstream — вебхуки, AML, фрод, бухгалтерия.
|
157
|
+
* Идемпотентность консьюмеров — по `event_id`.
|
158
|
+
|
159
|
+
---
|
160
|
+
|
161
|
+
# 6) Надёжность и бэкапы
|
162
|
+
|
163
|
+
* **Scheduled Backups**: инкрементальные в объектное хранилище (S3/GCS), **PITR**.
|
164
|
+
* On‑line DDL безопасен, но проверяй планы (statement statistics, EXPLAIN ANALYZE).
|
165
|
+
* Наблюдаемость: встроенные графики + экспортер метрик в Prometheus/Grafana.
|
166
|
+
|
167
|
+
---
|
168
|
+
|
169
|
+
# 7) Что учесть vs PostgreSQL
|
170
|
+
|
171
|
+
* Нет/ограничены: **триггеры/расширения** уровня PG → логику выносим в приложение/CDC.
|
172
|
+
* **Advisory locks нет** → используй `SELECT … FOR UPDATE` на строках‑«замках».
|
173
|
+
* Избегай **монотонных** PK/sequence (горячие ключи) — UUID/временные UUID + hash‑sharded.
|
174
|
+
* Поддержка `UPSERT`, `INSERT ON CONFLICT` есть; но некоторые PG‑спец‑фичи могут отличаться.
|
175
|
+
* Готовь системно **retry‑loop** для всех записывающих транзакций.
|
176
|
+
|
177
|
+
---
|
178
|
+
|
179
|
+
# 8) Мини‑чеклист конфигурации
|
180
|
+
|
181
|
+
* Типы денег: `DECIMAL(20,6)` в БД; в приложении можно хранить в «центах» как `BigInt`.
|
182
|
+
* Время: `TIMESTAMPTZ`, всё в **UTC**.
|
183
|
+
* Балансы: строгий инвариант `current_balance >= 0` через `WHERE` + проверка rowcount.
|
184
|
+
* Индексы: на ключевых путях запросов, **HASH‑sharded** на горячих колонках.
|
185
|
+
* Таблицы: `REGIONAL BY ROW` для OLTP; `GLOBAL` для справочников.
|
186
|
+
* CDC: changefeed из `outbox`; консьюмер — идемпотентный.
|
187
|
+
* Транзакции: короткие, с retry‑loop на `40001`.
|
188
|
+
|
189
|
+
---
|
190
|
+
|
191
|
+
Если хочешь, под твои реальные **TPS и регионы** (например: EU‑Riga + DE‑Frankfurt) я соберу готовые DDL с `LOCALITY`, пример **CHANGEFEED** в Kafka, шаблон **retry‑loop** (Node.js/TS и Go/Java), и схему партиционирования журнала на 12–24 месяца хранения.
|
File without changes
|
@@ -95,7 +95,7 @@ async function Initializer(services) {
|
|
95
95
|
exports.Initializer = Initializer;
|
96
96
|
class APIService extends BaseService_1.BaseService {
|
97
97
|
static nextLogID = 0;
|
98
|
-
version = "3.7.
|
98
|
+
version = "3.7.7";
|
99
99
|
options;
|
100
100
|
monitor = null;
|
101
101
|
started = new Date();
|
@@ -64,6 +64,9 @@ export interface DBAdapterOptions {
|
|
64
64
|
password: string;
|
65
65
|
port: number;
|
66
66
|
database: string;
|
67
|
+
connectionParamers?: string;
|
68
|
+
idleTimeoutMillis?: number;
|
69
|
+
connectionTimeoutMillis?: number;
|
67
70
|
queueLimit?: number;
|
68
71
|
transactionFailReport?: (trx: any, message: string) => void;
|
69
72
|
transactionFailReportDir?: string;
|
@@ -28,6 +28,7 @@ const badmfck_signal_1 = __importStar(require("badmfck-signal"));
|
|
28
28
|
const BaseService_1 = require("./BaseService");
|
29
29
|
const MysqlAdapter_1 = require("./db/MysqlAdapter");
|
30
30
|
const LogService_1 = require("./LogService");
|
31
|
+
const PostgresAdapter_1 = require("./db/PostgresAdapter");
|
31
32
|
exports.REQ_DB = new badmfck_signal_1.Req(undefined, "REQ_DB");
|
32
33
|
exports.REQ_DBX = new badmfck_signal_1.Req(undefined, "REQ_DBX");
|
33
34
|
exports.REQ_DB_TBEGIN = new badmfck_signal_1.Req(undefined, "REQ_DB_TBEGIN");
|
@@ -77,6 +78,9 @@ class DBService extends BaseService_1.BaseService {
|
|
77
78
|
if (this.options.type === "mysql") {
|
78
79
|
await this.createMysqlDatabase();
|
79
80
|
}
|
81
|
+
if (this.options.type === "postgres") {
|
82
|
+
this.adapter = new PostgresAdapter_1.PostgresAdapter(this.options.connection);
|
83
|
+
}
|
80
84
|
exports.REQ_DBX.listener = async (req) => {
|
81
85
|
const executionStartTime = Date.now();
|
82
86
|
if (!req.dbid && DBService.allInstances.length === 1 && DBService.allInstances[0].adapter) {
|
@@ -0,0 +1,31 @@
|
|
1
|
+
import { Pool } from 'pg';
|
2
|
+
import { DBAdapterOptions, IDBQuery, IDBResult, IDBBulkQuery, IPoolStatus } from '../DBService';
|
3
|
+
import { ITransaction } from '../MysqlService';
|
4
|
+
import { IDBAdapter } from './IDBAdapter';
|
5
|
+
export declare class PostgresAdapter implements IDBAdapter {
|
6
|
+
options: DBAdapterOptions;
|
7
|
+
serviceStarted: boolean;
|
8
|
+
reconnectionTimeout: number;
|
9
|
+
reconnecting: boolean;
|
10
|
+
pool: Pool | null;
|
11
|
+
timeoutID: any;
|
12
|
+
queries: never[];
|
13
|
+
static nextTransactionID: number;
|
14
|
+
transactions: ITransaction[];
|
15
|
+
maxTransactionWaitTime: number;
|
16
|
+
lastSuccessQueryTime: number;
|
17
|
+
pingInterval: number;
|
18
|
+
poolConnections: number;
|
19
|
+
acquiredPoolConnections: number;
|
20
|
+
constructor(options: DBAdapterOptions);
|
21
|
+
tBegin?(): Promise<IDBResult>;
|
22
|
+
tCommit?(trxid: number): Promise<IDBResult>;
|
23
|
+
tRollback?(trxid: number): Promise<IDBResult>;
|
24
|
+
poolStatus(): IPoolStatus;
|
25
|
+
init(): Promise<void>;
|
26
|
+
healthcheck(): Promise<boolean>;
|
27
|
+
finish(): Promise<void>;
|
28
|
+
prepareQuery(request: IDBQuery): string;
|
29
|
+
query(request: IDBQuery): Promise<IDBResult>;
|
30
|
+
bulk(request: IDBBulkQuery): Promise<IDBResult[]>;
|
31
|
+
}
|
@@ -0,0 +1,66 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.PostgresAdapter = void 0;
|
4
|
+
const pg_1 = require("pg");
|
5
|
+
const LogService_1 = require("../LogService");
|
6
|
+
class PostgresAdapter {
|
7
|
+
options;
|
8
|
+
serviceStarted = false;
|
9
|
+
reconnectionTimeout = 1000 * 3;
|
10
|
+
reconnecting = false;
|
11
|
+
pool = null;
|
12
|
+
timeoutID;
|
13
|
+
queries = [];
|
14
|
+
static nextTransactionID = 1;
|
15
|
+
transactions = [];
|
16
|
+
maxTransactionWaitTime = 1000 * 60 * 2;
|
17
|
+
lastSuccessQueryTime = 0;
|
18
|
+
pingInterval = 1000 * 60 * 5;
|
19
|
+
poolConnections = 0;
|
20
|
+
acquiredPoolConnections = 0;
|
21
|
+
constructor(options) {
|
22
|
+
this.options = options;
|
23
|
+
}
|
24
|
+
tBegin() {
|
25
|
+
throw new Error('Method not implemented.');
|
26
|
+
}
|
27
|
+
tCommit(trxid) {
|
28
|
+
throw new Error('Method not implemented.');
|
29
|
+
}
|
30
|
+
tRollback(trxid) {
|
31
|
+
throw new Error('Method not implemented.');
|
32
|
+
}
|
33
|
+
poolStatus() {
|
34
|
+
throw new Error('Method not implemented.');
|
35
|
+
}
|
36
|
+
async init() {
|
37
|
+
this.pool = new pg_1.Pool({
|
38
|
+
connectionString: "postgresql://" + this.options.user + ":" + this.options.password + "@" + this.options.host + ":" + this.options.port + "/" + this.options.database + (this.options.connectionParamers ?? ""),
|
39
|
+
max: this.options.connectionLimit || 10,
|
40
|
+
idleTimeoutMillis: this.options.idleTimeoutMillis || 1000 * 60 * 5,
|
41
|
+
connectionTimeoutMillis: this.options.connectionTimeoutMillis || 1000 * 10,
|
42
|
+
keepAlive: true,
|
43
|
+
});
|
44
|
+
}
|
45
|
+
async healthcheck() {
|
46
|
+
if (!this.pool) {
|
47
|
+
(0, LogService_1.logError)("PostgreAdapter: Pool is not initialized.");
|
48
|
+
return false;
|
49
|
+
}
|
50
|
+
const { rows } = await this.pool.query('SELECT 1 AS ok');
|
51
|
+
return rows?.[0]?.ok === 1;
|
52
|
+
}
|
53
|
+
finish() {
|
54
|
+
throw new Error("Method not implemented.");
|
55
|
+
}
|
56
|
+
prepareQuery(request) {
|
57
|
+
throw new Error("Method not implemented.");
|
58
|
+
}
|
59
|
+
query(request) {
|
60
|
+
throw new Error("Method not implemented.");
|
61
|
+
}
|
62
|
+
bulk(request) {
|
63
|
+
throw new Error("Method not implemented.");
|
64
|
+
}
|
65
|
+
}
|
66
|
+
exports.PostgresAdapter = PostgresAdapter;
|
@@ -47,7 +47,7 @@ export type ValidationModel<T> = DeepMutable<{
|
|
47
47
|
export declare class Validator {
|
48
48
|
static validateObject(fields: string[], object: any): boolean;
|
49
49
|
static documentStructure(structure: any): Record<string, any> | undefined;
|
50
|
-
static validateStructure(structure: any, object: any, level?: number): Promise<string[] | undefined>;
|
50
|
+
static validateStructure(structure: any, object: any, level?: number, parentPath?: string): Promise<string[] | undefined>;
|
51
51
|
static filterStructure(structure: any, object: any): void;
|
52
52
|
static syncStructure(structure: any, object: any): any;
|
53
53
|
static convertToType<T>(value: any, type: "string" | "number" | "boolean" | "date"): T | null;
|
@@ -60,7 +60,7 @@ class Validator {
|
|
60
60
|
}
|
61
61
|
return params;
|
62
62
|
}
|
63
|
-
static async validateStructure(structure, object, level = 0) {
|
63
|
+
static async validateStructure(structure, object, level = 0, parentPath = "") {
|
64
64
|
if (!structure)
|
65
65
|
return;
|
66
66
|
if (!object)
|
@@ -127,10 +127,20 @@ class Validator {
|
|
127
127
|
if (typeof object[i] !== typeof structure[i])
|
128
128
|
errors.push("wrong datatype for field '" + i + "' expected " + typeof structure[i] + " got " + typeof object[i]);
|
129
129
|
if (typeof structure[i] === "object" || typeof structure[i] === "function") {
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
130
|
+
if (typeof object[i] !== "object") {
|
131
|
+
errors.push("wrong type for: " + parentPath + i);
|
132
|
+
}
|
133
|
+
else {
|
134
|
+
if (Array.isArray(structure[i]) && !Array.isArray(object[i])) {
|
135
|
+
errors.push("array expected for " + parentPath + i);
|
136
|
+
}
|
137
|
+
else {
|
138
|
+
const result = await this.validateStructure(structure[i], object[i], level++, parentPath + i + ".");
|
139
|
+
if (result) {
|
140
|
+
for (let j of result)
|
141
|
+
errors.push(j);
|
142
|
+
}
|
143
|
+
}
|
134
144
|
}
|
135
145
|
}
|
136
146
|
if (typeof structure[i] === "string" && structure[i].includes(",")) {
|
@@ -138,9 +148,9 @@ class Validator {
|
|
138
148
|
if (expected.length) {
|
139
149
|
const value = object[i].toString().toLowerCase().trim();
|
140
150
|
if (value.length === 0)
|
141
|
-
errors.push("empty value for field '" + i + "'");
|
151
|
+
errors.push("empty value for field '" + parentPath + i + "'");
|
142
152
|
else if (expected.indexOf(value) === -1)
|
143
|
-
errors.push("wrong value for field '" + i + "', expected " + expected.join(", ") + " got: " + value);
|
153
|
+
errors.push("wrong value for field '" + parentPath + i + "', expected " + expected.join(", ") + " got: " + value);
|
144
154
|
}
|
145
155
|
}
|
146
156
|
if (structure['$__' + i + "_values"]) {
|
@@ -155,7 +165,7 @@ class Validator {
|
|
155
165
|
break;
|
156
166
|
}
|
157
167
|
}
|
158
|
-
errors.push("
|
168
|
+
errors.push("wrong value for field '" + parentPath + i + "'" + (caseValue ? ", check case for: " + caseValue : ""));
|
159
169
|
}
|
160
170
|
}
|
161
171
|
}
|
@@ -163,12 +173,12 @@ class Validator {
|
|
163
173
|
const regex = structure["$__" + i + "_regex"];
|
164
174
|
const reg = new RegExp(regex);
|
165
175
|
if (reg.test(object[i]) === false)
|
166
|
-
errors.push("wrong value for field '" + i + "', false mask for: >" + object[i] + "<");
|
176
|
+
errors.push("wrong value for field '" + parentPath + i + "', false mask for: >" + object[i] + "<");
|
167
177
|
}
|
168
178
|
if (typeof structure[i] === "string" && (structure[i] === "@" || structure[i] === "email")) {
|
169
179
|
const value = object[i].toString().toLowerCase().trim();
|
170
180
|
if (value.replaceAll(/^[\w-\.]+@([\w-]+\.)+[\w-]{2,6}$/g, "").length !== 0)
|
171
|
-
errors.push("wrong value for field '" + i + "', expected email got: " + object[i]);
|
181
|
+
errors.push("wrong value for field '" + parentPath + i + "', expected email got: " + object[i]);
|
172
182
|
const domain = value.split("@")[1];
|
173
183
|
try {
|
174
184
|
await dns_1.default.promises.resolveMx(domain);
|
@@ -185,39 +195,39 @@ class Validator {
|
|
185
195
|
object[i] = parseInt(object[i]);
|
186
196
|
}
|
187
197
|
if (isNaN(object[i]))
|
188
|
-
errors.push("wrong value for field '" + i + "', expected number got " + object[i]);
|
198
|
+
errors.push("wrong value for field '" + parentPath + i + "', expected number got " + object[i]);
|
189
199
|
if (structure[i] != 0) {
|
190
200
|
if (structure[i] > 0) {
|
191
201
|
const max = structure[i];
|
192
202
|
if (object[i] > max)
|
193
|
-
errors.push("value for field '" + i + "' is too big, expected less than " + max + " got: " + object[i]);
|
203
|
+
errors.push("value for field '" + parentPath + i + "' is too big, expected less than " + max + " got: " + object[i]);
|
194
204
|
}
|
195
205
|
else {
|
196
206
|
const min = structure[i] * -1;
|
197
207
|
if (object[i] < min)
|
198
|
-
errors.push("value for field '" + i + "' is too small, expected more than " + min + " got: " + object[i]);
|
208
|
+
errors.push("value for field '" + parentPath + i + "' is too small, expected more than " + min + " got: " + object[i]);
|
199
209
|
}
|
200
210
|
}
|
201
211
|
}
|
202
212
|
if (typeof structure[i] === "number" && structure["$__" + i + "_min"]) {
|
203
213
|
const min = structure["$__" + i + "_min"];
|
204
214
|
if (object[i] < min)
|
205
|
-
errors.push("value for field '" + i + "' is too small, expected more than " + min + " got: " + object[i]);
|
215
|
+
errors.push("value for field '" + parentPath + i + "' is too small, expected more than " + min + " got: " + object[i]);
|
206
216
|
}
|
207
217
|
if (typeof structure[i] === "number" && structure["$__" + i + "_max"]) {
|
208
218
|
const max = structure["$__" + i + "_max"];
|
209
219
|
if (object[i] > max)
|
210
|
-
errors.push("value for field '" + i + "' is too big, expected less than " + max + " got: " + object[i]);
|
220
|
+
errors.push("value for field '" + parentPath + i + "' is too big, expected less than " + max + " got: " + object[i]);
|
211
221
|
}
|
212
222
|
if (typeof object[i] === "string" && structure["$__" + i + "_min"]) {
|
213
223
|
const min = structure["$__" + i + "_min"];
|
214
224
|
if (object[i].length < min)
|
215
|
-
errors.push("value length for field '" + i + "' is too small, expected more than " + min + " got: " + object[i].length);
|
225
|
+
errors.push("value length for field '" + parentPath + i + "' is too small, expected more than " + min + " got: " + object[i].length);
|
216
226
|
}
|
217
227
|
if (typeof object[i] === "string" && structure["$__" + i + "_max"]) {
|
218
228
|
const max = structure["$__" + i + "_max"];
|
219
229
|
if (object[i].length > max)
|
220
|
-
errors.push("value length for field '" + i + "' is too big, expected less than " + max + " got: " + object[i].length);
|
230
|
+
errors.push("value length for field '" + parentPath + i + "' is too big, expected less than " + max + " got: " + object[i].length);
|
221
231
|
}
|
222
232
|
foundKeys.push(i);
|
223
233
|
}
|
package/dist/index.d.ts
CHANGED
@@ -9,4 +9,5 @@ import { DataProvider } from "./apiServer/helper/DataProvider";
|
|
9
9
|
import { UID } from "./apiServer/helper/UID";
|
10
10
|
import { ExternalService } from "./apiServer/external/ExternalService";
|
11
11
|
import { DBService } from "./apiServer/DBService";
|
12
|
-
|
12
|
+
import { YYYYMMDDHH } from "./apiServer/helper/YYYYMMDDHH";
|
13
|
+
export { UID, YYYYMMDDHH, APIService, Initializer, LocalRequest, ValidationModel, MysqlService, Validator, LogService, DataProvider, ErrorUtils, ExternalService, DBService, S_MONITOR_REGISTRATE_ACTION };
|
package/dist/index.js
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.S_MONITOR_REGISTRATE_ACTION = exports.DBService = exports.ExternalService = exports.ErrorUtils = exports.DataProvider = exports.LogService = exports.Validator = exports.MysqlService = exports.LocalRequest = exports.Initializer = exports.APIService = exports.UID = void 0;
|
3
|
+
exports.S_MONITOR_REGISTRATE_ACTION = exports.DBService = exports.ExternalService = exports.ErrorUtils = exports.DataProvider = exports.LogService = exports.Validator = exports.MysqlService = exports.LocalRequest = exports.Initializer = exports.APIService = exports.YYYYMMDDHH = exports.UID = void 0;
|
4
4
|
const APIService_1 = require("./apiServer/APIService");
|
5
5
|
Object.defineProperty(exports, "APIService", { enumerable: true, get: function () { return APIService_1.APIService; } });
|
6
6
|
Object.defineProperty(exports, "Initializer", { enumerable: true, get: function () { return APIService_1.Initializer; } });
|
@@ -24,3 +24,5 @@ const ExternalService_1 = require("./apiServer/external/ExternalService");
|
|
24
24
|
Object.defineProperty(exports, "ExternalService", { enumerable: true, get: function () { return ExternalService_1.ExternalService; } });
|
25
25
|
const DBService_1 = require("./apiServer/DBService");
|
26
26
|
Object.defineProperty(exports, "DBService", { enumerable: true, get: function () { return DBService_1.DBService; } });
|
27
|
+
const YYYYMMDDHH_1 = require("./apiServer/helper/YYYYMMDDHH");
|
28
|
+
Object.defineProperty(exports, "YYYYMMDDHH", { enumerable: true, get: function () { return YYYYMMDDHH_1.YYYYMMDDHH; } });
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "badmfck-api-server",
|
3
|
-
"version": "3.7.
|
3
|
+
"version": "3.7.7",
|
4
4
|
"description": "Simple API http server based on express",
|
5
5
|
"main": "dist/index.js",
|
6
6
|
"types": "dist/index.d.ts",
|
@@ -19,6 +19,7 @@
|
|
19
19
|
"dependencies": {
|
20
20
|
"@types/express-fileupload": "^1.5.0",
|
21
21
|
"@types/mysql": "^2.15.21",
|
22
|
+
"@types/pg": "^8.15.5",
|
22
23
|
"@types/ws": "^8.5.9",
|
23
24
|
"axios": "^1.10.0",
|
24
25
|
"badmfck-signal": "^1.4.9",
|
@@ -26,6 +27,7 @@
|
|
26
27
|
"express": "^4.21.2",
|
27
28
|
"express-fileupload": "^1.5.2",
|
28
29
|
"mysql2": "^3.14.2",
|
30
|
+
"pg": "^8.16.3",
|
29
31
|
"ws": "^8.18.3"
|
30
32
|
},
|
31
33
|
"devDependencies": {
|