@thesashadev/girl-agent 0.1.6 → 0.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,21 @@
1
1
  # Changelog
2
2
 
3
+ ## 0.1.8 — OpenAI-compatible API compatibility
4
+
5
+ Дата: 2026-05-06
6
+
7
+ - JSON-ответы теперь сначала запрашиваются через `json_schema`, с fallback на `json_object` и `text` для разных OpenAI-compatible API. (#33)
8
+ - LM Studio и Ollama больше не требуют реальный API ключ в wizard/headless setup.
9
+ - Добавлена совместимость с OpenAI-compatible прокси, которые возвращают SSE/event-stream даже на обычный chat completions запрос.
10
+
11
+ ## 0.1.7 — MarkdownV2 escaping fix
12
+
13
+ Дата: 2026-05-06
14
+
15
+ - Исправлена ошибка `400: Bad Request: can't parse entities` при отправке сообщений с точками, скобками и другими зарезервированными символами MarkdownV2. (#15)
16
+ - Добавлен `escapeMarkdownV2()` хелпер для экранирования всех 18 зарезервированных символов.
17
+ - Fallback на plain text если экранирование не помогает.
18
+
3
19
  ## 0.1.6 — --new flag
4
20
 
5
21
  Дата: 2026-05-06
package/README.md CHANGED
@@ -1,4 +1,4 @@
1
- ![girl-agent banner](https://girl-agent.com/og-image.png)
1
+ ![girl-agent banner](https://girl-agent.com/og-image.png)
2
2
 
3
3
  [website]: https://girl-agent.com
4
4
  [docs]: https://docs.girl-agent.com
@@ -49,6 +49,22 @@ Wizard задаст пару вопросов — имя, возраст, Telegr
49
49
  npx @thesashadev/girl-agent --profile=arina
50
50
  ```
51
51
 
52
+ **Через Docker (рекомендуется для сервера):**
53
+
54
+ Первый запуск с интерактивом (для настройки через визард):
55
+ ```bash
56
+ docker-compose run --rm -it girl-agent
57
+ ```
58
+ *(пройдите все шаги и после появления дашборда нажмите `Ctrl+C`)*
59
+
60
+ Последующие (запуск в фоне):
61
+ ```bash
62
+ docker-compose up -d
63
+ ```
64
+ *(если профилей несколько, запустите конкретный так: `docker-compose run -d girl-agent node dist/cli.js --profile=arina`)*
65
+
66
+ *(посмотреть логи: `docker-compose logs -f`)*
67
+
52
68
  **Из исходников:**
53
69
 
54
70
  ```powershell
package/dist/cli.js CHANGED
@@ -18,6 +18,19 @@ var init_esm_shims = __esm({
18
18
  }
19
19
  });
20
20
 
21
+ // src/telegram/markdown.ts
22
+ function escapeMarkdownV2(text) {
23
+ return text.replace(MD2_RESERVED, "\\$1");
24
+ }
25
+ var MD2_RESERVED;
26
+ var init_markdown = __esm({
27
+ "src/telegram/markdown.ts"() {
28
+ "use strict";
29
+ init_esm_shims();
30
+ MD2_RESERVED = /([_*\[\]()~`>#+\-=|{}.!\\])/g;
31
+ }
32
+ });
33
+
21
34
  // src/telegram/userbot.ts
22
35
  var userbot_exports = {};
23
36
  __export(userbot_exports, {
@@ -177,7 +190,11 @@ function makeUserbotAdapter(cfg) {
177
190
  },
178
191
  async editLastMessage(chatId, messageId, text) {
179
192
  const peer = await resolvePeer(chatId);
180
- await client.editMessage(peer, { message: messageId, text, parseMode: "MarkdownV2" });
193
+ try {
194
+ await client.editMessage(peer, { message: messageId, text: escapeMarkdownV2(text), parseMode: "MarkdownV2" });
195
+ } catch {
196
+ await client.editMessage(peer, { message: messageId, text });
197
+ }
181
198
  },
182
199
  async deleteMessages(chatId, messageIds, revoke = false) {
183
200
  const peer = await resolvePeer(chatId);
@@ -241,6 +258,7 @@ var init_userbot = __esm({
241
258
  "src/telegram/userbot.ts"() {
242
259
  "use strict";
243
260
  init_esm_shims();
261
+ init_markdown();
244
262
  }
245
263
  });
246
264
 
@@ -275,8 +293,13 @@ function makeBotAdapter(cfg) {
275
293
  });
276
294
  },
277
295
  async sendText(chatId, text) {
278
- const msg = await bot.api.sendMessage(chatId, text, { parse_mode: "MarkdownV2" });
279
- return msg.message_id;
296
+ try {
297
+ const msg = await bot.api.sendMessage(chatId, escapeMarkdownV2(text), { parse_mode: "MarkdownV2" });
298
+ return msg.message_id;
299
+ } catch {
300
+ const msg = await bot.api.sendMessage(chatId, text);
301
+ return msg.message_id;
302
+ }
280
303
  },
281
304
  async setTyping(chatId, on) {
282
305
  if (on) {
@@ -318,6 +341,7 @@ var init_bot = __esm({
318
341
  "src/telegram/bot.ts"() {
319
342
  "use strict";
320
343
  init_esm_shims();
344
+ init_markdown();
321
345
  }
322
346
  });
323
347
 
@@ -353,8 +377,10 @@ var LLM_PRESETS = [
353
377
  proto: "openai",
354
378
  baseURL: "http://localhost:1234/v1",
355
379
  defaultModel: "",
380
+ defaultApiKey: "lm-studio",
381
+ apiKeyRequired: false,
356
382
  custom: true,
357
- hint: "\u043B\u043E\u043A\u0430\u043B\u044C\u043D\u043E, OpenAI-compatible endpoint"
383
+ hint: "\u043B\u043E\u043A\u0430\u043B\u044C\u043D\u043E, OpenAI-compatible endpoint; \u043A\u043B\u044E\u0447 \u043D\u0435 \u043D\u0443\u0436\u0435\u043D"
358
384
  },
359
385
  {
360
386
  id: "ollama",
@@ -362,8 +388,10 @@ var LLM_PRESETS = [
362
388
  proto: "openai",
363
389
  baseURL: "http://localhost:11434/v1",
364
390
  defaultModel: "llama3.1",
391
+ defaultApiKey: "ollama",
392
+ apiKeyRequired: false,
365
393
  custom: true,
366
- hint: "\u043B\u043E\u043A\u0430\u043B\u044C\u043D\u043E \u0447\u0435\u0440\u0435\u0437 /v1"
394
+ hint: "\u043B\u043E\u043A\u0430\u043B\u044C\u043D\u043E \u0447\u0435\u0440\u0435\u0437 /v1; \u043A\u043B\u044E\u0447 \u043D\u0435 \u043D\u0443\u0436\u0435\u043D"
367
395
  },
368
396
  {
369
397
  id: "anthropic",
@@ -1001,20 +1029,28 @@ var OpenAILike = class {
1001
1029
  constructor(cfg) {
1002
1030
  this.cfg = cfg;
1003
1031
  this.client = new OpenAI({
1004
- apiKey: cfg.apiKey,
1032
+ apiKey: openAIApiKey(cfg),
1005
1033
  baseURL: normalizeBaseURL(cfg.baseURL),
1006
1034
  timeout: LLM_TIMEOUT_MS,
1007
1035
  maxRetries: LLM_MAX_RETRIES
1008
1036
  });
1037
+ this.fetchClient = new OpenAI({
1038
+ apiKey: openAIApiKey(cfg),
1039
+ baseURL: normalizeBaseURL(cfg.baseURL),
1040
+ timeout: LLM_TIMEOUT_MS,
1041
+ maxRetries: LLM_MAX_RETRIES,
1042
+ fetch: compatibleFetch
1043
+ });
1009
1044
  }
1010
1045
  cfg;
1011
1046
  client;
1047
+ fetchClient;
1012
1048
  async chat(messages, opts = {}) {
1013
1049
  const params = {
1014
1050
  model: this.cfg.model,
1015
1051
  messages: openAIMessages(messages),
1016
1052
  temperature: opts.temperature ?? 0.85,
1017
- response_format: opts.json ? { type: "json_object" } : void 0
1053
+ response_format: openAIResponseFormat(opts)
1018
1054
  };
1019
1055
  if (usesMaxCompletionTokens(this.cfg.model)) {
1020
1056
  params.max_completion_tokens = opts.maxTokens ?? 600;
@@ -1025,19 +1061,86 @@ var OpenAILike = class {
1025
1061
  return res.choices[0]?.message?.content?.trim() ?? "";
1026
1062
  }
1027
1063
  async createWithCompatibilityFallback(params) {
1028
- try {
1029
- return await this.client.chat.completions.create(params);
1030
- } catch (error) {
1031
- const fallback = completionTokenFallback(params, error);
1032
- if (!fallback) throw enrichOpenAIError(error, this.cfg.baseURL);
1064
+ const attempted = /* @__PURE__ */ new Set();
1065
+ let current = params;
1066
+ let lastError;
1067
+ while (current) {
1068
+ const key = completionParamsKey(current);
1069
+ if (attempted.has(key)) break;
1070
+ attempted.add(key);
1071
+ try {
1072
+ return await this.client.chat.completions.create(current);
1073
+ } catch (error) {
1074
+ lastError = error;
1075
+ const next = completionFallback(current, error);
1076
+ if (!next) break;
1077
+ current = next;
1078
+ }
1079
+ }
1080
+ if (this.cfg.baseURL) {
1033
1081
  try {
1034
- return await this.client.chat.completions.create(fallback);
1035
- } catch (fallbackError) {
1036
- throw enrichOpenAIError(fallbackError, this.cfg.baseURL);
1082
+ return await this.fetchClient.chat.completions.create({ ...params, stream: false });
1083
+ } catch (fetchError) {
1084
+ lastError = fetchError;
1037
1085
  }
1038
1086
  }
1087
+ throw enrichOpenAIError(lastError, this.cfg.baseURL);
1039
1088
  }
1040
1089
  };
1090
+ async function compatibleFetch(url, init) {
1091
+ const res = await fetch(url, init);
1092
+ const contentType = res.headers.get("content-type") ?? "";
1093
+ if (!res.ok) return res;
1094
+ const text = await res.clone().text();
1095
+ if (contentType.includes("text/event-stream") || text.trimStart().startsWith("data:")) {
1096
+ return completionStreamToJsonResponse(res, text);
1097
+ }
1098
+ return res;
1099
+ }
1100
+ function completionStreamToJsonResponse(res, text) {
1101
+ const completion = parseOpenAIEventStream(text);
1102
+ return new Response(JSON.stringify(completion), {
1103
+ status: res.status,
1104
+ statusText: res.statusText,
1105
+ headers: { "content-type": "application/json" }
1106
+ });
1107
+ }
1108
+ function parseOpenAIEventStream(raw) {
1109
+ let id = "chatcmpl-stream";
1110
+ let model = "";
1111
+ let created = Math.floor(Date.now() / 1e3);
1112
+ const content = [];
1113
+ let finishReason = "stop";
1114
+ for (const line of raw.split(/\r?\n/)) {
1115
+ const trimmed = line.trim();
1116
+ if (!trimmed.startsWith("data:")) continue;
1117
+ const data = trimmed.slice(5).trim();
1118
+ if (!data || data === "[DONE]") continue;
1119
+ try {
1120
+ const chunk = JSON.parse(data);
1121
+ id = chunk.id || id;
1122
+ model = chunk.model || model;
1123
+ created = chunk.created || created;
1124
+ const choice = chunk.choices[0];
1125
+ finishReason = choice?.finish_reason ?? finishReason;
1126
+ const delta = choice?.delta?.content ?? choice?.message?.content;
1127
+ if (typeof delta === "string") content.push(delta);
1128
+ } catch {
1129
+ }
1130
+ }
1131
+ return {
1132
+ id,
1133
+ object: "chat.completion",
1134
+ created,
1135
+ model,
1136
+ choices: [{
1137
+ index: 0,
1138
+ message: { role: "assistant", content: content.join(""), refusal: null },
1139
+ finish_reason: finishReason,
1140
+ logprobs: null
1141
+ }]
1142
+ };
1143
+ }
1041
1144
  var AnthropicLike = class {
1042
1145
  constructor(cfg) {
1043
1146
  this.cfg = cfg;
@@ -1131,8 +1234,36 @@ function normalizeBaseURL(value) {
1131
1234
  function usesMaxCompletionTokens(model) {
1132
1235
  return /^(?:o\d|o\d-|o\d\b|gpt-5|gpt-5\.|gpt-[5-9])|\/(?:o\d|gpt-5|gpt-[5-9])/.test(model.trim().toLowerCase());
1133
1236
  }
1237
+ function openAIApiKey(cfg) {
1238
+ return cfg.apiKey.trim() || (cfg.presetId === "ollama" ? "ollama" : cfg.presetId === "lmstudio" ? "lm-studio" : "");
1239
+ }
1240
+ function openAIResponseFormat(opts) {
1241
+ if (!opts.json) return void 0;
1242
+ if (opts.jsonSchema) return { type: "json_schema", json_schema: opts.jsonSchema };
1243
+ return {
1244
+ type: "json_schema",
1245
+ json_schema: {
1246
+ name: "json_response",
1247
+ strict: false,
1248
+ schema: { type: "object", additionalProperties: true }
1249
+ }
1250
+ };
1251
+ }
1252
+ function completionFallback(params, error) {
1253
+ return responseFormatFallback(params, error) ?? completionTokenFallback(params, error);
1254
+ }
1255
+ function responseFormatFallback(params, error) {
1256
+ const message = openAIErrorText(error);
1257
+ if (!params.response_format || !message.includes("response_format")) return null;
1258
+ if (params.response_format.type === "json_schema" && message.includes("json_object")) {
1259
+ return { ...params, response_format: { type: "text" } };
1260
+ }
1261
+ if (params.response_format.type === "json_schema") return { ...params, response_format: { type: "json_object" } };
1262
+ if (params.response_format.type === "json_object") return { ...params, response_format: { type: "text" } };
1263
+ return null;
1264
+ }
1134
1265
  function completionTokenFallback(params, error) {
1135
- const message = errorMessage(error).toLowerCase();
1266
+ const message = openAIErrorText(error);
1136
1267
  if (params.max_tokens != null && message.includes("max_tokens") && message.includes("max_completion_tokens")) {
1137
1268
  const { max_tokens, ...rest } = params;
1138
1269
  return { ...rest, max_completion_tokens: max_tokens };
@@ -1143,6 +1274,16 @@ function completionTokenFallback(params, error) {
1143
1274
  }
1144
1275
  return null;
1145
1276
  }
1277
+ function completionParamsKey(params) {
1278
+ const tokenKey = params.max_completion_tokens != null ? "max_completion_tokens" : "max_tokens";
1279
+ return `${params.response_format?.type ?? "default"}:${tokenKey}`;
1280
+ }
1281
+ function openAIErrorText(error) {
1282
+ if (error instanceof OpenAI.APIError) {
1283
+ return `${error.status ?? ""} ${error.code ?? ""} ${error.type ?? ""} ${error.message}`.toLowerCase();
1284
+ }
1285
+ return errorMessage(error).toLowerCase();
1286
+ }
1146
1287
  function enrichOpenAIError(error, baseURL) {
1147
1288
  if (error instanceof OpenAI.APIConnectionError) {
1148
1289
  return new Error(connectionErrorMessage("OpenAI-compatible", baseURL, error));
@@ -1178,6 +1319,40 @@ function makeLLM(cfg) {
1178
1319
  init_esm_shims();
1179
1320
  var SYS = `\u0422\u044B \u2014 \u0440\u0435\u0436\u0438\u0441\u0441\u0451\u0440 \u043F\u0435\u0440\u0441\u043E\u043D\u0430\u0436\u0435\u0439. \u0422\u0432\u043E\u044F \u0437\u0430\u0434\u0430\u0447\u0430 \u2014 \u0441\u0433\u0435\u043D\u0435\u0440\u0438\u0440\u043E\u0432\u0430\u0442\u044C \u0434\u043E\u0441\u0442\u043E\u0432\u0435\u0440\u043D\u044B\u0439, \u043D\u0435-\u0433\u043E\u043B\u043B\u0438\u0432\u0443\u0434\u0441\u043A\u0438\u0439, \u043D\u0435-\u0441\u043B\u0430\u0434\u043A\u0438\u0439, \u043D\u0435-AI-\u043F\u043E\u043C\u043E\u0449\u043D\u0438\u043A\u043E\u0432\u044B\u0439 \u043F\u0440\u043E\u0444\u0438\u043B\u044C \u043E\u0431\u044B\u0447\u043D\u043E\u0439 \u0440\u0443\u0441\u0441\u043A\u043E\u044F\u0437\u044B\u0447\u043D\u043E\u0439 \u0434\u0435\u0432\u0443\u0448\u043A\u0438 \u0443\u043A\u0430\u0437\u0430\u043D\u043D\u043E\u0433\u043E \u0432\u043E\u0437\u0440\u0430\u0441\u0442\u0430. \u0411\u0435\u0437 \u043F\u0440\u0438\u0443\u043A\u0440\u0430\u0448\u0438\u0432\u0430\u043D\u0438\u0439, \u0431\u0435\u0437 \u043F\u043E\u0437\u0438\u0442\u0438\u0432\u043D\u043E\u0433\u043E \u043C\u043E\u0440\u0430\u043B\u044C\u043D\u043E\u0433\u043E \u0443\u043A\u043B\u043E\u043D\u0430, \u0431\u0435\u0437 \xAB\u0443\u0434\u043E\u0431\u043D\u043E\u0439\xBB \u043F\u0435\u0440\u0441\u043E\u043D\u044B. \u0414\u043E\u043F\u0443\u0441\u0442\u0438\u043C\u044B \u043D\u0435\u0434\u043E\u0441\u0442\u0430\u0442\u043A\u0438, \u0443\u0437\u043A\u0438\u0435 \u0438\u043D\u0442\u0435\u0440\u0435\u0441\u044B, \u0441\u043A\u0435\u043F\u0441\u0438\u0441, \u0442\u043E\u043A\u0441\u0438\u0447\u043D\u043E\u0441\u0442\u044C, \u043B\u0435\u043D\u044C, \u0442\u0440\u0435\u0432\u043E\u0436\u043D\u043E\u0441\u0442\u044C, \u043D\u0435\u0443\u0432\u0435\u0440\u0435\u043D\u043D\u043E\u0441\u0442\u044C \u2014 \u0447\u0442\u043E \u043F\u043E\u0434\u0445\u043E\u0434\u0438\u0442 \u0432\u043E\u0437\u0440\u0430\u0441\u0442\u0443. \u041D\u0438\u043A\u0430\u043A\u0438\u0445 \xAB\u043A\u043E\u0443\u0447\u0438\u043D\u0433\u043E\u0432\u044B\u0445\xBB \u0444\u043E\u0440\u043C\u0443\u043B\u0438\u0440\u043E\u0432\u043E\u043A. \u0422\u043E\u043B\u044C\u043A\u043E \u0436\u0438\u0432\u0430\u044F \u0440\u0435\u0447\u044C, \u043A\u0430\u043A \u0438\u0437 \u0434\u043D\u0435\u0432\u043D\u0438\u043A\u0430 \u0438\u043B\u0438 \u0432\u043D\u0443\u0442\u0440\u0435\u043D\u043D\u0435\u0433\u043E \u043C\u043E\u043D\u043E\u043B\u043E\u0433\u0430. \u0412\u043E\u0437\u0440\u0430\u0441\u0442: {{age}} \u043B\u0435\u0442, \u0438\u043C\u044F: {{name}}.`;
1180
1321
  var WEEKDAYS = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"];
1322
+ var BUSY_SCHEDULE_SCHEMA = {
1323
+ name: "busy_schedule",
1324
+ strict: false,
1325
+ schema: {
1326
+ type: "object",
1327
+ properties: {
1328
+ busySchedule: {
1329
+ type: "array",
1330
+ items: {
1331
+ type: "object",
1332
+ properties: {
1333
+ label: { type: "string" },
1334
+ days: {
1335
+ type: "array",
1336
+ items: { type: "string", enum: WEEKDAYS }
1337
+ },
1338
+ from: { type: "string" },
1339
+ to: { type: "string" },
1340
+ checkAfterMin: {
1341
+ type: "array",
1342
+ items: { type: "number" },
1343
+ minItems: 2,
1344
+ maxItems: 2
1345
+ }
1346
+ },
1347
+ required: ["label", "from", "to"],
1348
+ additionalProperties: false
1349
+ }
1350
+ }
1351
+ },
1352
+ required: ["busySchedule"],
1353
+ additionalProperties: false
1354
+ }
1355
+ };
1181
1356
  async function generatePersonaPack(llm, slug, name, age, nationality = "RU", personaNotes = "", onProgress) {
1182
1357
  const country = nationality === "UA" ? "\u0423\u043A\u0440\u0430\u0438\u043D\u0430" : "\u0420\u043E\u0441\u0441\u0438\u044F / \u0421\u041D\u0413";
1183
1358
  const langHint = nationality === "UA" ? "\u041F\u0438\u0448\u0435\u0442 \u043D\u0430 \u0420\u0423\u0421\u0421\u041A\u041E\u041C (\u043A\u0430\u043A \u0440\u0435\u0430\u043B\u044C\u043D\u043E \u043F\u0438\u0448\u0435\u0442 \u0431\u043E\u043B\u044C\u0448\u0438\u043D\u0441\u0442\u0432\u043E \u0434\u0435\u0432\u0443\u0448\u0435\u043A \u0432 \u0423\u043A\u0440\u0430\u0438\u043D\u0435 \u0432 \u0442\u0433). \u0414\u043E\u043F\u0443\u0441\u0442\u0438\u043C \u043B\u0451\u0433\u043A\u0438\u0439 \u0441\u0443\u0440\u0436\u0438\u043A: ~90% \u0440\u0443\u0441\u0441\u043A\u0438\u0439 + ~10% \u0443\u043A\u0440\u0430\u0438\u043D\u0441\u043A\u0438\u0445 \u0432\u0441\u0442\u0430\u0432\u043E\u043A (\u043E\u0442\u0434\u0435\u043B\u044C\u043D\u044B\u0435 \u0441\u043B\u043E\u0432\u0430: '\u0448\u043E', '\u043C\u0430\u0431\u0443\u0442\u044C', '\u0442\u0440\u043E\u0445\u0438', '\u044F\u043A', '\u043D\u0443 \u0448\u043E', '\u0442\u0430 \u0439\u0434\u0438', '\u0434\u044F\u043A\u0443\u044E'), \u043D\u043E \u0431\u0435\u0437 \u043F\u043E\u043B\u043D\u043E\u0433\u043E \u043F\u0435\u0440\u0435\u0445\u043E\u0434\u0430 \u043D\u0430 \u0443\u043A\u0440\u0430\u0438\u043D\u0441\u043A\u0438\u0439. \u0427\u0438\u0441\u0442\u043E-\u0443\u043A\u0440\u0430\u0438\u043D\u0441\u043A\u0438\u0439 \u0442\u0435\u043A\u0441\u0442 \u041D\u0415 \u0433\u0435\u043D\u0435\u0440\u0438\u0440\u0443\u0439 \u2014 \u044D\u0442\u043E \u043D\u0435\u0440\u0435\u0430\u043B\u0438\u0441\u0442\u0438\u0447\u043D\u043E \u0434\u043B\u044F \u0442\u0433-\u043F\u0435\u0440\u0435\u043F\u0438\u0441\u043A\u0438." : "\u0420\u0443\u0441\u0441\u043A\u043E\u044F\u0437\u044B\u0447\u043D\u0430\u044F \u0431\u0435\u0437 \u0443\u043A\u0440\u0430\u0438\u043D\u0438\u0437\u043C\u043E\u0432.";
@@ -1288,7 +1463,7 @@ ${personaNotes.trim()}
1288
1463
  onProgress?.(65, "\u0433\u0435\u043D\u0435\u0440\u0438\u0440\u0443\u0435\u043C communication.md\u2026");
1289
1464
  const boundaries = await llm.chat([{ role: "system", content: sys }, { role: "user", content: boundariesPrompt }], { temperature: 0.9, maxTokens: 3500 });
1290
1465
  onProgress?.(85, "\u0433\u0435\u043D\u0435\u0440\u0438\u0440\u0443\u0435\u043C busy schedule\u2026");
1291
- const routineRaw = await llm.chat([{ role: "system", content: sys }, { role: "user", content: routinePrompt }], { temperature: 0.85, maxTokens: 3500, json: true });
1466
+ const routineRaw = await llm.chat([{ role: "system", content: sys }, { role: "user", content: routinePrompt }], { temperature: 0.85, maxTokens: 3500, json: true, jsonSchema: BUSY_SCHEDULE_SCHEMA });
1292
1467
  const busySchedule = parseBusySchedule(routineRaw, name, age);
1293
1468
  await writeMd(slug, "persona.md", persona);
1294
1469
  await writeMd(slug, "speech.md", speech);
@@ -1842,6 +2017,7 @@ function Wizard({ initial, onDone }) {
1842
2017
  setLlmProto(preset.proto);
1843
2018
  setLlmBaseURL(preset.baseURL ?? "");
1844
2019
  setLlmModel(preset.defaultModel);
2020
+ setLlmKey(preset.defaultApiKey ?? "");
1845
2021
  if (preset.custom) setStep("api-base");
1846
2022
  else setStep("api-model");
1847
2023
  }
@@ -1869,7 +2045,9 @@ function Wizard({ initial, onDone }) {
1869
2045
  return /* @__PURE__ */ React.createElement(Box, { flexDirection: "column", padding: 1 }, /* @__PURE__ */ React.createElement(Header, { sub: "\u043D\u0430\u0437\u0432\u0430\u043D\u0438\u0435 \u043C\u043E\u0434\u0435\u043B\u0438" }), /* @__PURE__ */ React.createElement(Bar, { step: 2, total: 9 }), /* @__PURE__ */ React.createElement(Box, { marginTop: 1 }, /* @__PURE__ */ React.createElement(Text, null, "Model: "), /* @__PURE__ */ React.createElement(TextInput, { value: llmModel, onChange: setLlmModel, onSubmit: () => setStep("api-key") })));
1870
2046
  }
1871
2047
  if (step === "api-key") {
1872
- return /* @__PURE__ */ React.createElement(Box, { flexDirection: "column", padding: 1 }, /* @__PURE__ */ React.createElement(Header, { sub: "API \u043A\u043B\u044E\u0447" }), /* @__PURE__ */ React.createElement(Bar, { step: 2, total: 11 }), /* @__PURE__ */ React.createElement(Box, { marginTop: 1 }, /* @__PURE__ */ React.createElement(Text, null, "Key: "), /* @__PURE__ */ React.createElement(TextInput, { value: llmKey, onChange: setLlmKey, mask: "\u2022", onSubmit: () => llmKey && setStep("nationality") })));
2048
+ const preset = findPreset(llmPresetId);
2049
+ const apiKeyRequired = preset?.apiKeyRequired !== false;
2050
+ return /* @__PURE__ */ React.createElement(Box, { flexDirection: "column", padding: 1 }, /* @__PURE__ */ React.createElement(Header, { sub: apiKeyRequired ? "API \u043A\u043B\u044E\u0447" : "API \u043A\u043B\u044E\u0447 (\u043C\u043E\u0436\u043D\u043E \u043F\u0440\u043E\u043F\u0443\u0441\u0442\u0438\u0442\u044C)" }), /* @__PURE__ */ React.createElement(Bar, { step: 2, total: 11 }), /* @__PURE__ */ React.createElement(Box, { marginTop: 1 }, /* @__PURE__ */ React.createElement(Text, null, "Key: "), /* @__PURE__ */ React.createElement(TextInput, { value: llmKey, onChange: setLlmKey, mask: "\u2022", onSubmit: () => (llmKey || !apiKeyRequired) && setStep("nationality") })), !apiKeyRequired && /* @__PURE__ */ React.createElement(Text, { dimColor: true }, "\u0414\u043B\u044F \u043B\u043E\u043A\u0430\u043B\u044C\u043D\u043E\u0433\u043E API \u0431\u0443\u0434\u0435\u0442 \u0438\u0441\u043F\u043E\u043B\u044C\u0437\u043E\u0432\u0430\u043D \u0442\u0435\u0445\u043D\u0438\u0447\u0435\u0441\u043A\u0438\u0439 placeholder, \u0435\u0441\u043B\u0438 \u043E\u0441\u0442\u0430\u0432\u0438\u0442\u044C \u043F\u0443\u0441\u0442\u043E."));
1873
2051
  }
1874
2052
  if (step === "nationality") {
1875
2053
  return /* @__PURE__ */ React.createElement(Box, { flexDirection: "column", padding: 1 }, /* @__PURE__ */ React.createElement(Header, { sub: "\u043D\u0430\u0446\u0438\u043E\u043D\u0430\u043B\u044C\u043D\u043E\u0441\u0442\u044C (\u044F\u0437\u044B\u043A, \u0438\u043C\u044F, \u043A\u0443\u043B\u044C\u0442\u0443\u0440\u0430)" }), /* @__PURE__ */ React.createElement(Bar, { step: 3, total: 11 }), /* @__PURE__ */ React.createElement(Box, { marginTop: 1 }, /* @__PURE__ */ React.createElement(
@@ -5563,7 +5741,7 @@ usage:
5563
5741
  npx girl-agent --reset --profile=<slug>
5564
5742
  npx girl-agent <flags> # \u043F\u0440\u043E\u043F\u0443\u0441\u0442\u0438\u0442\u044C \u0432\u0438\u0437\u0430\u0440\u0434 \u0441 \u0430\u0440\u0433\u0443\u043C\u0435\u043D\u0442\u0430\u043C\u0438
5565
5743
 
5566
- required flags \u0434\u043B\u044F headless setup (--name --age --stage --api-preset --api-key --mode):
5744
+ required flags \u0434\u043B\u044F headless setup (--name --age --stage --api-preset --mode; --api-key \u043D\u0443\u0436\u0435\u043D \u0442\u043E\u043B\u044C\u043A\u043E \u0434\u043B\u044F \u043F\u0440\u043E\u0432\u0430\u0439\u0434\u0435\u0440\u043E\u0432 \u0441 \u0430\u0432\u0442\u043E\u0440\u0438\u0437\u0430\u0446\u0438\u0435\u0439):
5567
5745
  --profile=<slug> slug \u043F\u0440\u043E\u0444\u0438\u043B\u044F
5568
5746
  --mode=bot|userbot
5569
5747
  --token=<bot_token> \u0434\u043B\u044F bot
@@ -5572,7 +5750,7 @@ required flags \u0434\u043B\u044F headless setup (--name --age --stage --api-pre
5572
5750
  --base-url=<url> \u0434\u043B\u044F custom
5573
5751
  --proto=openai|anthropic \u0434\u043B\u044F custom
5574
5752
  --model=<model>
5575
- --api-key=<key>
5753
+ --api-key=<key> \u043D\u0435 \u043D\u0443\u0436\u0435\u043D \u0434\u043B\u044F \u043B\u043E\u043A\u0430\u043B\u044C\u043D\u044B\u0445 LM Studio/Ollama
5576
5754
  --name=<\u0438\u043C\u044F> \u043A\u043E\u043D\u043A\u0440\u0435\u0442\u043D\u043E\u0435 \u0438\u043C\u044F; \u0435\u0441\u043B\u0438 \u043F\u0440\u043E\u043F\u0443\u0441\u0442\u0438\u0442\u044C \u2014 \u0441\u043B\u0443\u0447\u0430\u0439\u043D\u043E\u0435 \u0438\u0437 \u043F\u0443\u043B\u0430 \u043F\u043E nationality (\u0442\u0443\u0440\u043D\u0438\u0440 \u0432\u044B\u0431\u043E\u0440\u0430 \u0438\u043C\u0451\u043D \u0434\u043E\u0441\u0442\u0443\u043F\u0435\u043D \u0422\u041E\u041B\u042C\u041A\u041E \u0432 TUI \u0432\u0438\u0437\u0430\u0440\u0434\u0435)
5577
5755
  --age=<n>
5578
5756
  --persona-notes=<text> \u0434\u043E\u043F. \u043F\u043E\u0436\u0435\u043B\u0430\u043D\u0438\u044F \u043A persona/speech/communication \u043F\u0435\u0440\u0435\u0434 \u0433\u0435\u043D\u0435\u0440\u0430\u0446\u0438\u0435\u0439
@@ -5659,7 +5837,9 @@ ${profiles.join("\n")}
5659
5837
  await runRuntime(cfg);
5660
5838
  return;
5661
5839
  }
5662
- const haveEnoughForFlags = argv.mode && argv["api-preset"] && argv["api-key"] && argv.age && argv.stage;
5840
+ const presetForFlags = argv["api-preset"] ? findPreset(String(argv["api-preset"])) : void 0;
5841
+ const apiKeyRequiredForFlags = presetForFlags?.apiKeyRequired !== false;
5842
+ const haveEnoughForFlags = argv.mode && argv["api-preset"] && (!apiKeyRequiredForFlags || argv["api-key"]) && argv.age && argv.stage;
5663
5843
  if (haveEnoughForFlags) {
5664
5844
  const cfg = await buildConfigFromFlags(argv);
5665
5845
  await writeConfig(cfg);
@@ -5731,7 +5911,7 @@ async function buildConfigFromFlags(argv) {
5731
5911
  tz,
5732
5912
  mode,
5733
5913
  stage: argv.stage,
5734
- llm: { presetId, proto, baseURL, apiKey: String(argv["api-key"]), model },
5914
+ llm: { presetId, proto, baseURL, apiKey: String(argv["api-key"] ?? preset?.defaultApiKey ?? ""), model },
5735
5915
  telegram: mode === "bot" ? { botToken: String(argv.token ?? "") } : {
5736
5916
  apiId: Number(argv["api-id"] ?? 0),
5737
5917
  apiHash: String(argv["api-hash"] ?? ""),
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@thesashadev/girl-agent",
3
- "version": "0.1.6",
3
+ "version": "0.1.8",
4
4
  "description": "Telegram AI persona engine with memory, schedule, relationship state and MTProto userbot mode.",
5
5
  "type": "module",
6
6
  "bin": {