@decentchat/decentclaw 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/monitor.ts ADDED
@@ -0,0 +1,1266 @@
1
+ import * as fs from "node:fs";
2
+ import * as os from "node:os";
3
+ import * as path from "node:path";
4
+ import { randomUUID } from "node:crypto";
5
+ import type { OpenClawConfig } from "openclaw/plugin-sdk";
6
+ import { resolveDecentChatAccount } from "./channel.js";
7
+ import { getDecentChatRuntime } from "./runtime.js";
8
+ import { resolveCompanyPromptContextForAccount, resolveThreadRoutingStateUpdate, decideCompanyParticipation, type CompanyThreadRoutingState } from "@decentchat/company-sim";
9
+ import { setActivePeer } from "./peer-registry.js";
10
+ import type { ResolvedDecentChatAccount } from "./types.js";
11
+
12
+ type ReplyPrefixOptions = {
13
+ responsePrefix?: string;
14
+ enableSlackInteractiveReplies?: boolean;
15
+ responsePrefixContextProvider?: (ctx: Record<string, unknown>) => string | undefined;
16
+ onModelSelected?: (modelCtx: { provider?: string; model?: string }) => void;
17
+ };
18
+
19
+ let cachedCreateReplyPrefixOptions:
20
+ | ((params: { cfg: OpenClawConfig; agentId: string; channel: string; accountId: string }) => ReplyPrefixOptions)
21
+ | null
22
+ | undefined;
23
+
24
+ async function resolveReplyPrefixOptions(params: {
25
+ cfg: OpenClawConfig;
26
+ agentId: string;
27
+ channel: string;
28
+ accountId: string;
29
+ }): Promise<ReplyPrefixOptions> {
30
+ if (cachedCreateReplyPrefixOptions === undefined) {
31
+ try {
32
+ const sdk = await import("openclaw/plugin-sdk") as {
33
+ createReplyPrefixOptions?: (params: {
34
+ cfg: OpenClawConfig;
35
+ agentId: string;
36
+ channel: string;
37
+ accountId: string;
38
+ }) => ReplyPrefixOptions;
39
+ };
40
+ cachedCreateReplyPrefixOptions = typeof sdk.createReplyPrefixOptions === "function"
41
+ ? sdk.createReplyPrefixOptions
42
+ : null;
43
+ } catch {
44
+ cachedCreateReplyPrefixOptions = null;
45
+ }
46
+ }
47
+
48
+ if (!cachedCreateReplyPrefixOptions) {
49
+ return {};
50
+ }
51
+
52
+ return cachedCreateReplyPrefixOptions(params);
53
+ }
54
+
55
+
56
+ function isCompanySimChannelMuted(account: ResolvedDecentChatAccount | undefined, chatType: IncomingPeerMessage['chatType'], channelId: string): boolean {
57
+ if (!account?.companySim?.enabled) return false;
58
+ if (chatType === 'direct') return false;
59
+ const silentChannelIds = account.companySim.silentChannelIds;
60
+ if (!Array.isArray(silentChannelIds) || silentChannelIds.length === 0) return false;
61
+
62
+ return silentChannelIds.some((value) => {
63
+ const trimmed = typeof value === 'string' ? value.trim() : '';
64
+ if (!trimmed) return false;
65
+ return trimmed === channelId || trimmed === `decentchat:channel:${channelId}`;
66
+ });
67
+ }
68
+
69
+ function resolveAgentWorkspaceDir(cfg: OpenClawConfig, agentId: string): string | undefined {
70
+ const agentList = (cfg as any)?.agents?.list;
71
+ if (!Array.isArray(agentList)) return undefined;
72
+
73
+ const entry = agentList.find((candidate: any) => candidate && typeof candidate === "object" && candidate.id === agentId);
74
+ if (!entry) return undefined;
75
+
76
+ const workspace = (entry as any).workspace;
77
+ if (typeof workspace !== "string") return undefined;
78
+
79
+ const trimmed = workspace.trim();
80
+ return trimmed.length > 0 ? trimmed : undefined;
81
+ }
82
+
83
+ type InboundAttachment = {
84
+ id: string;
85
+ name: string;
86
+ type: string;
87
+ size?: number;
88
+ thumbnail?: string;
89
+ width?: number;
90
+ height?: number;
91
+ };
92
+
93
+ type ThreadHistoryEntry = {
94
+ id: string;
95
+ senderId: string;
96
+ content: string;
97
+ timestamp: number;
98
+ };
99
+
100
+ type AssistantModelMeta = {
101
+ modelId?: string;
102
+ modelName?: string;
103
+ modelAlias?: string;
104
+ modelLabel?: string;
105
+ };
106
+
107
+ type PeerContext = {
108
+ account: ResolvedDecentChatAccount;
109
+ accountId: string;
110
+ log?: {
111
+ info: (s: string) => void;
112
+ warn?: (s: string) => void;
113
+ error?: (s: string) => void;
114
+ debug?: (s: string) => void;
115
+ };
116
+ setStatus: (patch: Record<string, unknown>) => void;
117
+ abortSignal?: AbortSignal;
118
+ };
119
+
120
+ type IncomingPeerMessage = {
121
+ channelId: string;
122
+ workspaceId: string;
123
+ content: string;
124
+ senderId: string;
125
+ senderName: string;
126
+ messageId: string;
127
+ chatType: "channel" | "direct";
128
+ timestamp: number;
129
+ replyToId?: string;
130
+ threadId?: string;
131
+ attachments?: InboundAttachment[];
132
+ };
133
+
134
+ type StreamingPeerAdapter = {
135
+ startStream: (args: {
136
+ channelId: string;
137
+ workspaceId: string;
138
+ messageId: string;
139
+ threadId?: string;
140
+ replyToId?: string;
141
+ model?: AssistantModelMeta;
142
+ }) => Promise<void>;
143
+ startDirectStream: (args: { peerId: string; messageId: string; model?: AssistantModelMeta }) => Promise<void>;
144
+ sendStreamDelta: (args: {
145
+ channelId: string;
146
+ workspaceId: string;
147
+ messageId: string;
148
+ content: string;
149
+ }) => Promise<void>;
150
+ sendDirectStreamDelta: (args: {
151
+ peerId: string;
152
+ messageId: string;
153
+ content: string;
154
+ }) => Promise<void>;
155
+ sendDirectStreamDone: (args: { peerId: string; messageId: string }) => Promise<void>;
156
+ sendStreamDone: (args: { channelId: string; workspaceId: string; messageId: string }) => Promise<void>;
157
+ sendTyping?: (args: { channelId: string; workspaceId: string; typing: boolean }) => Promise<void>;
158
+ sendDirectTyping?: (args: { peerId: string; typing: boolean }) => Promise<void>;
159
+ sendDirectToPeer: (peerId: string, content: string, threadId?: string, replyToId?: string, messageId?: string, model?: AssistantModelMeta) => Promise<void>;
160
+ sendToChannel: (channelId: string, content: string, threadId?: string, replyToId?: string, messageId?: string, model?: AssistantModelMeta) => Promise<void>;
161
+ persistMessageLocally: (channelId: string, workspaceId: string, content: string, threadId?: string, replyToId?: string, messageId?: string, model?: AssistantModelMeta) => Promise<void>;
162
+ sendReadReceipt: (peerId: string, channelId: string, messageId: string) => Promise<void>;
163
+ requestFullImage: (peerId: string, attachmentId: string) => Promise<Buffer | null>;
164
+ getThreadHistory?: (args: {
165
+ channelId: string;
166
+ threadId: string;
167
+ limit: number;
168
+ excludeMessageId?: string;
169
+ }) => Promise<ThreadHistoryEntry[]> | ThreadHistoryEntry[];
170
+ };
171
+
172
+ const TOOL_CALL_MISMATCH_RE = /^No tool call found for function call output with call_id\b/i;
173
+
174
+ /**
175
+ * Thread affinity tracker — remembers the last active thread per sender per channel.
176
+ *
177
+ * When a message arrives without threadId (e.g., sent from the main channel input
178
+ * instead of the thread panel), we check if the sender had recent thread activity
179
+ * in this channel. If so, we route to that thread instead of creating a new one.
180
+ *
181
+ * This prevents session fragmentation when the client loses thread panel state
182
+ * (page reload, navigation, etc.) while the user intends to continue the same thread.
183
+ */
184
+ const THREAD_AFFINITY_TTL_MS = 4 * 60 * 60 * 1000; // 4 hours
185
+
186
+ interface ThreadAffinityEntry {
187
+ threadId: string;
188
+ updatedAt: number;
189
+ }
190
+
191
+ const threadAffinityMap = new Map<string, ThreadAffinityEntry>();
192
+ const threadRoutingStateMap = new Map<string, CompanyThreadRoutingState & { updatedAt: number }>();
193
+
194
+ function threadAffinityKey(channelId: string, senderId: string): string {
195
+ return `${channelId}:${senderId}`;
196
+ }
197
+
198
+ function updateThreadAffinity(channelId: string, senderId: string, threadId: string): void {
199
+ const key = threadAffinityKey(channelId, senderId);
200
+ threadAffinityMap.set(key, { threadId, updatedAt: Date.now() });
201
+ }
202
+
203
+ function getThreadAffinity(channelId: string, senderId: string): string | null {
204
+ const key = threadAffinityKey(channelId, senderId);
205
+ const entry = threadAffinityMap.get(key);
206
+ if (!entry) return null;
207
+ if (Date.now() - entry.updatedAt > THREAD_AFFINITY_TTL_MS) {
208
+ threadAffinityMap.delete(key);
209
+ return null;
210
+ }
211
+ return entry.threadId;
212
+ }
213
+
214
+ function threadRoutingStateKey(channelId: string, threadRef: string): string {
215
+ return `${channelId}:${threadRef}`;
216
+ }
217
+
218
+ function updateThreadRoutingState(channelId: string, threadRef: string, state: CompanyThreadRoutingState): void {
219
+ const key = threadRoutingStateKey(channelId, threadRef);
220
+ threadRoutingStateMap.set(key, { ...state, updatedAt: Date.now() });
221
+ }
222
+
223
+ function getThreadRoutingState(channelId: string, threadRef: string): CompanyThreadRoutingState | null {
224
+ const key = threadRoutingStateKey(channelId, threadRef);
225
+ const entry = threadRoutingStateMap.get(key);
226
+ if (!entry) return null;
227
+ if (Date.now() - entry.updatedAt > THREAD_AFFINITY_TTL_MS) {
228
+ threadRoutingStateMap.delete(key);
229
+ return null;
230
+ }
231
+ return { assignedEmployeeId: entry.assignedEmployeeId, source: entry.source };
232
+ }
233
+
234
+ export function resetThreadRoutingStateForTests(): void {
235
+ threadAffinityMap.clear();
236
+ threadRoutingStateMap.clear();
237
+ }
238
+
239
+ function formatThreadHistoryContent(content: string, maxChars = 220): string {
240
+ const normalized = content.replace(/\s+/g, " ").trim();
241
+ if (!normalized) return "[empty]";
242
+ if (normalized.length <= maxChars) return normalized;
243
+ return `${normalized.slice(0, Math.max(1, maxChars - 1))}…`;
244
+ }
245
+
246
+ function logThreadRouteDecision(
247
+ log: { info?: (s: string) => void; debug?: (s: string) => void } | undefined,
248
+ params: {
249
+ chatType: "direct" | "channel";
250
+ replyToMode: "off" | "first" | "all";
251
+ historyScope: "thread" | "channel";
252
+ mode: "thread" | "base";
253
+ candidateThreadId?: string;
254
+ derivedThreadId?: string;
255
+ sessionKey: string;
256
+ previousTimestampPresent: boolean;
257
+ bootstrapReason: "enabled" | "not-thread" | "not-first-turn" | "limit-zero";
258
+ initialHistoryLimit: number;
259
+ },
260
+ ): void {
261
+ const message = [
262
+ "[decentchat] route",
263
+ `chatType=${params.chatType}`,
264
+ `replyToMode=${params.replyToMode}`,
265
+ `historyScope=${params.historyScope}`,
266
+ `mode=${params.mode}`,
267
+ `candidateThread=${params.candidateThreadId || "-"}`,
268
+ `thread=${params.derivedThreadId || "-"}`,
269
+ `session=${params.sessionKey}`,
270
+ `hasSessionHistory=${params.previousTimestampPresent ? "yes" : "no"}`,
271
+ `bootstrap=${params.bootstrapReason}`,
272
+ `initialHistoryLimit=${params.initialHistoryLimit}`,
273
+ ].join(" ");
274
+
275
+ if (log?.debug) {
276
+ log.debug(message);
277
+ return;
278
+ }
279
+ log?.info?.(message);
280
+ }
281
+
282
+ function normalizeModelMeta(selection?: { provider?: string; model?: string }): AssistantModelMeta | undefined {
283
+ if (!selection?.model && !selection?.provider) return undefined;
284
+
285
+ const rawModel = String(selection?.model ?? "").trim();
286
+ const rawProvider = String(selection?.provider ?? "").trim();
287
+ const providerPrefix = rawProvider ? `${rawProvider}/` : "";
288
+
289
+ let modelName = rawModel;
290
+ if (providerPrefix && rawModel.startsWith(providerPrefix)) {
291
+ modelName = rawModel.slice(providerPrefix.length);
292
+ } else if (rawModel.includes("/")) {
293
+ modelName = rawModel.split("/").pop() || rawModel;
294
+ }
295
+
296
+ const modelId = rawProvider && modelName ? `${rawProvider}/${modelName}` : (rawModel || undefined);
297
+ const modelLabel = modelName || rawModel || undefined;
298
+
299
+ if (!modelLabel && !modelId) return undefined;
300
+
301
+ return {
302
+ modelId,
303
+ modelName: modelName || undefined,
304
+ modelLabel,
305
+ };
306
+ }
307
+
308
+ export async function finalizePeerStream(params: {
309
+ nodePeer: {
310
+ sendDirectStreamDone: (args: { peerId: string; messageId: string }) => Promise<void>;
311
+ sendStreamDone: (args: { channelId: string; workspaceId: string; messageId: string }) => Promise<void>;
312
+ };
313
+ chatType: "direct" | "group";
314
+ senderId: string;
315
+ channelId: string;
316
+ workspaceId: string;
317
+ streamMessageId: string | null;
318
+ }): Promise<void> {
319
+ if (!params.streamMessageId) return;
320
+
321
+ if (params.chatType === "direct") {
322
+ await params.nodePeer.sendDirectStreamDone({
323
+ peerId: params.senderId,
324
+ messageId: params.streamMessageId,
325
+ });
326
+ return;
327
+ }
328
+
329
+ await params.nodePeer.sendStreamDone({
330
+ channelId: params.channelId,
331
+ workspaceId: params.workspaceId,
332
+ messageId: params.streamMessageId,
333
+ });
334
+ }
335
+
336
+ export async function relayInboundMessageToPeer(params: {
337
+ incoming: IncomingPeerMessage;
338
+ ctx: Pick<PeerContext, "account" | "accountId" | "log">;
339
+ core: ReturnType<typeof getDecentChatRuntime>;
340
+ nodePeer: StreamingPeerAdapter;
341
+ onFinalizeReady?: (finalize: () => Promise<void>) => void;
342
+ }): Promise<void> {
343
+ const { incoming, ctx, core, nodePeer } = params;
344
+ let streamMessageId: string | null = null;
345
+ let streamTimer: ReturnType<typeof setTimeout> | null = null;
346
+ let streamFlushTimer: ReturnType<typeof setTimeout> | null = null;
347
+ let streamedReply = "";
348
+ let streamChunkCount = 0;
349
+ const STREAM_COALESCE_MS = 120;
350
+ let selectedModel: AssistantModelMeta | undefined;
351
+ const streamEnabled = ctx.account.streamEnabled !== false;
352
+ let finalizeInFlight: Promise<void> | null = null;
353
+ let processingComplete = false; // Guard: prevent idle timer from finalizing mid-response
354
+ let lastSentStreamContent = "";
355
+ let typingActive = false;
356
+ let errorNoticeSent = false;
357
+
358
+ const sendProcessingFailureNotice = async (reason?: string): Promise<void> => {
359
+ if (errorNoticeSent) return;
360
+ if (streamedReply.trim().length > 0) return;
361
+ const reply = "Sorry - I ran into an internal error while generating a reply. Please try again.";
362
+ try {
363
+ if (incoming.chatType === "direct") {
364
+ await nodePeer.sendDirectToPeer(
365
+ incoming.senderId,
366
+ reply,
367
+ incoming.threadId,
368
+ incoming.messageId,
369
+ undefined,
370
+ selectedModel,
371
+ );
372
+ } else {
373
+ const threadId = incoming.threadId ?? incoming.messageId;
374
+ await nodePeer.sendToChannel(
375
+ incoming.channelId,
376
+ reply,
377
+ threadId,
378
+ incoming.messageId,
379
+ undefined,
380
+ selectedModel,
381
+ );
382
+ }
383
+ errorNoticeSent = true;
384
+ if (reason) {
385
+ ctx.log?.warn?.(`[decentchat] sent error fallback message after failure: ${reason}`);
386
+ }
387
+ } catch (err) {
388
+ ctx.log?.error?.(`[decentchat] failed to send error fallback message: ${String(err)}`);
389
+ }
390
+ };
391
+
392
+ const setTyping = async (typing: boolean) => {
393
+ if (incoming.chatType === 'direct') {
394
+ if (!nodePeer.sendDirectTyping) return;
395
+ if (typingActive == typing) return;
396
+ typingActive = typing;
397
+ try {
398
+ await nodePeer.sendDirectTyping({ peerId: incoming.senderId, typing });
399
+ } catch (err) {
400
+ ctx.log?.warn?.(`[decentchat] direct typing ${typing ? 'start' : 'stop'} failed: ${String(err)}`);
401
+ }
402
+ return;
403
+ }
404
+ if (!nodePeer.sendTyping) return;
405
+ if (typingActive == typing) return;
406
+ typingActive = typing;
407
+ try {
408
+ await nodePeer.sendTyping({ channelId: incoming.channelId, workspaceId: incoming.workspaceId, typing });
409
+ } catch (err) {
410
+ ctx.log?.warn?.(`[decentchat] typing ${typing ? 'start' : 'stop'} failed: ${String(err)}`);
411
+ }
412
+ };
413
+
414
+ const flushBufferedStream = async () => {
415
+ const content = streamedReply;
416
+ if (!streamEnabled || !streamMessageId) return;
417
+ if (!content.trim()) return;
418
+ if (content === lastSentStreamContent) return;
419
+
420
+ if (incoming.chatType === "direct") {
421
+ await nodePeer.sendDirectStreamDelta({
422
+ peerId: incoming.senderId,
423
+ messageId: streamMessageId,
424
+ content,
425
+ });
426
+ } else {
427
+ await nodePeer.sendStreamDelta({
428
+ channelId: incoming.channelId,
429
+ workspaceId: incoming.workspaceId,
430
+ messageId: streamMessageId,
431
+ content,
432
+ });
433
+ }
434
+
435
+ lastSentStreamContent = content;
436
+ await setTyping(false);
437
+ };
438
+
439
+ const scheduleBufferedStreamFlush = () => {
440
+ if (streamFlushTimer) return;
441
+ streamFlushTimer = setTimeout(() => {
442
+ streamFlushTimer = null;
443
+ void flushBufferedStream().catch((err) => {
444
+ ctx.log?.warn?.(`[decentchat] buffered stream flush failed: ${String(err)}`);
445
+ });
446
+ }, STREAM_COALESCE_MS);
447
+ };
448
+
449
+ const finalizeStream = async () => {
450
+ // Ignore idle-timer finalize calls while LLM is still generating
451
+ if (!processingComplete && streamMessageId) return;
452
+ if (finalizeInFlight) {
453
+ await finalizeInFlight;
454
+ return;
455
+ }
456
+
457
+ finalizeInFlight = (async () => {
458
+ if (streamTimer) {
459
+ clearTimeout(streamTimer);
460
+ streamTimer = null;
461
+ }
462
+ if (streamFlushTimer) {
463
+ clearTimeout(streamFlushTimer);
464
+ streamFlushTimer = null;
465
+ }
466
+
467
+ const mid = streamMessageId;
468
+ streamMessageId = null;
469
+
470
+ const finalReply = streamedReply.trim();
471
+ ctx.log?.info?.(`[decentchat] stream telemetry: enabled=${streamEnabled} chunks=${streamChunkCount} finalChars=${finalReply.length}`);
472
+ streamedReply = "";
473
+ streamChunkCount = 0;
474
+ // keep lastSentStreamContent until finalize reliability guard evaluates
475
+
476
+ // Reliability guard: push one final full delta before stream-done, but only
477
+ // when it adds new visible content compared to the last emitted delta.
478
+ if (mid && streamEnabled && finalReply && finalReply !== lastSentStreamContent) {
479
+ try {
480
+ if (incoming.chatType === "direct") {
481
+ await nodePeer.sendDirectStreamDelta({
482
+ peerId: incoming.senderId,
483
+ messageId: mid,
484
+ content: finalReply,
485
+ });
486
+ } else {
487
+ await nodePeer.sendStreamDelta({
488
+ channelId: incoming.channelId,
489
+ workspaceId: incoming.workspaceId,
490
+ messageId: mid,
491
+ content: finalReply,
492
+ });
493
+ }
494
+ lastSentStreamContent = finalReply;
495
+ await setTyping(false);
496
+ } catch (err) {
497
+ ctx.log?.warn?.(`[decentchat] finalizeStream final-delta failed: ${String(err)}`);
498
+ }
499
+ }
500
+
501
+ await finalizePeerStream({
502
+ nodePeer,
503
+ chatType: incoming.chatType === "direct" ? "direct" : "group",
504
+ senderId: incoming.senderId,
505
+ channelId: incoming.channelId,
506
+ workspaceId: incoming.workspaceId,
507
+ streamMessageId: mid,
508
+ });
509
+
510
+ if (!finalReply) {
511
+ ctx.log?.warn?.("[decentchat] finalizeStream: empty final reply, skipping persistence");
512
+ return;
513
+ }
514
+
515
+ const persistThreadId = incoming.chatType === "direct"
516
+ ? incoming.threadId
517
+ : (incoming.threadId ?? incoming.messageId);
518
+
519
+ // Always send one canonical final message envelope, even after streaming.
520
+ // It reuses the stream message ID so receivers can dedupe/update instead of
521
+ // rendering a duplicate bubble, and it covers peers that missed live stream events.
522
+ try {
523
+ if (incoming.chatType === "direct") {
524
+ await nodePeer.sendDirectToPeer(
525
+ incoming.senderId,
526
+ finalReply,
527
+ persistThreadId,
528
+ incoming.messageId,
529
+ mid ?? undefined,
530
+ selectedModel,
531
+ );
532
+ } else {
533
+ await nodePeer.sendToChannel(
534
+ incoming.channelId,
535
+ finalReply,
536
+ persistThreadId,
537
+ incoming.messageId,
538
+ mid ?? undefined,
539
+ selectedModel,
540
+ );
541
+ }
542
+ ctx.log?.info?.(
543
+ `[decentchat] persisted assistant reply (${finalReply.length} chars) in ${incoming.chatType}${mid && streamEnabled ? ' via stream-finalize' : ''}`,
544
+ );
545
+ } catch (err) {
546
+ ctx.log?.error?.(`[decentchat] failed to persist assistant reply: ${String(err)}`);
547
+ }
548
+ })();
549
+
550
+ try {
551
+ await finalizeInFlight;
552
+ } finally {
553
+ finalizeInFlight = null;
554
+ }
555
+ };
556
+
557
+ params.onFinalizeReady?.(finalizeStream);
558
+
559
+ // Request full-quality images for attachments before processing
560
+ const imageAttachments = (incoming.attachments ?? []).filter(
561
+ (att): att is InboundAttachment & { id: string; type: "image" } =>
562
+ att.type === "image" && typeof att.id === "string"
563
+ );
564
+
565
+ const fullImageBuffers: Map<string, Buffer> = new Map();
566
+ if (imageAttachments.length > 0) {
567
+ ctx.log?.info?.(`[decentchat] requesting ${imageAttachments.length} full-quality image(s) from ${incoming.senderId.slice(0, 8)}`);
568
+ const imageRequests = imageAttachments.map(async (att) => {
569
+ const buffer = await nodePeer.requestFullImage(incoming.senderId, att.id);
570
+ if (buffer) {
571
+ fullImageBuffers.set(att.id, buffer);
572
+ ctx.log?.info?.(`[decentchat] received full image ${att.id.slice(0, 8)} (${buffer.length} bytes)`);
573
+ } else {
574
+ ctx.log?.warn?.(`[decentchat] failed to get full image ${att.id.slice(0, 8)}, will use thumbnail`);
575
+ }
576
+ });
577
+ await Promise.all(imageRequests);
578
+ }
579
+
580
+ await setTyping(true);
581
+
582
+ try {
583
+ await processInboundMessage(
584
+ {
585
+ messageId: incoming.messageId,
586
+ channelId: incoming.channelId,
587
+ workspaceId: incoming.workspaceId,
588
+ senderId: incoming.senderId,
589
+ senderName: incoming.senderName,
590
+ content: incoming.content,
591
+ chatType: incoming.chatType,
592
+ timestamp: incoming.timestamp,
593
+ replyToId: incoming.replyToId,
594
+ threadId: incoming.threadId,
595
+ },
596
+ { accountId: ctx.accountId, account: ctx.account, log: ctx.log },
597
+ core,
598
+ nodePeer,
599
+ async (replyText) => {
600
+ if (TOOL_CALL_MISMATCH_RE.test(replyText.trim())) {
601
+ ctx.log?.warn?.("[decentchat] suppressed tool-call mismatch error text");
602
+ return;
603
+ }
604
+
605
+ // Real streaming path: forward provider chunks immediately (if enabled).
606
+ streamedReply += replyText;
607
+ streamChunkCount += 1;
608
+ ctx.log?.info?.(`[decentchat] deliver #${streamChunkCount}: +${replyText.length} chars, total=${streamedReply.length}`);
609
+
610
+ const hasVisibleContent = streamedReply.trim().length > 0;
611
+
612
+ if (streamEnabled && hasVisibleContent) {
613
+ if (!streamMessageId) {
614
+ streamMessageId = randomUUID();
615
+ // Always reply in a thread (Slack-bot style): use the existing thread root,
616
+ // or create a new thread under the inbound message.
617
+ const outThreadId = incoming.chatType === "direct"
618
+ ? undefined
619
+ : (incoming.threadId ?? incoming.messageId);
620
+ if (incoming.chatType === "direct") {
621
+ await nodePeer.startDirectStream({ peerId: incoming.senderId, messageId: streamMessageId, model: selectedModel });
622
+ } else {
623
+ await nodePeer.startStream({
624
+ channelId: incoming.channelId,
625
+ workspaceId: incoming.workspaceId,
626
+ messageId: streamMessageId,
627
+ threadId: outThreadId,
628
+ replyToId: incoming.messageId,
629
+ model: selectedModel,
630
+ });
631
+ }
632
+ }
633
+
634
+ if (streamedReply !== lastSentStreamContent) {
635
+ scheduleBufferedStreamFlush();
636
+ }
637
+ }
638
+
639
+ if (hasVisibleContent) {
640
+ if (streamTimer) clearTimeout(streamTimer);
641
+ streamTimer = setTimeout(() => { void finalizeStream(); }, 200);
642
+ }
643
+ },
644
+ async (reason) => {
645
+ await sendProcessingFailureNotice(reason);
646
+ },
647
+ incoming.attachments,
648
+ fullImageBuffers,
649
+ {
650
+ streamEnabled,
651
+ onModelResolved: (model) => {
652
+ selectedModel = model;
653
+ },
654
+ },
655
+ );
656
+
657
+ processingComplete = true;
658
+
659
+ await finalizeStream();
660
+ } catch (err) {
661
+ processingComplete = true;
662
+ await sendProcessingFailureNotice(String((err as Error)?.message ?? err));
663
+ await finalizeStream();
664
+ ctx.log?.error?.(`[decentchat] inbound processing failed for ${incoming.chatType} message ${incoming.messageId}: ${String(err)}`);
665
+ } finally {
666
+ await setTyping(false);
667
+ }
668
+ }
669
+
670
+
671
+ type DecentChatNodePeerCtor = new (...args: any[]) => {
672
+ start: () => Promise<void>;
673
+ stop: () => void | Promise<void>;
674
+ sendMessage: (...args: any[]) => Promise<any>;
675
+ markRead?: (...args: any[]) => Promise<any>;
676
+ listDirectory?: (...args: any[]) => Promise<any>;
677
+ };
678
+
679
+ let decentChatNodePeerCtorPromise: Promise<DecentChatNodePeerCtor> | null = null;
680
+
681
+ async function loadDecentChatNodePeerCtor(): Promise<DecentChatNodePeerCtor> {
682
+ if (!decentChatNodePeerCtorPromise) {
683
+ decentChatNodePeerCtorPromise = import('./peer/DecentChatNodePeer.js').then((mod) => {
684
+ const candidate = (mod as any).DecentChatNodePeer
685
+ ?? ((mod as any).default && (mod as any).default.DecentChatNodePeer)
686
+ ?? (typeof (mod as any).default === 'function' ? (mod as any).default : undefined);
687
+
688
+ if (typeof candidate !== 'function') {
689
+ const keys = mod && typeof mod === 'object' ? Object.keys(mod as object).join(', ') : typeof mod;
690
+ throw new Error(`DecentChat node peer constructor export missing (module keys: ${keys})`);
691
+ }
692
+ return candidate as DecentChatNodePeerCtor;
693
+ });
694
+ }
695
+ return decentChatNodePeerCtorPromise;
696
+ }
697
+
698
+ export async function startDecentChatPeer(ctx: PeerContext): Promise<void> {
699
+ const seedPhrase = ctx.account.seedPhrase?.trim();
700
+ if (!seedPhrase) {
701
+ throw new Error("DecentChat seed phrase is required: set channels.decentchat.seedPhrase");
702
+ }
703
+
704
+ return startNodePeerRuntime(ctx);
705
+ }
706
+
707
+ async function startNodePeerRuntime(ctx: PeerContext): Promise<void> {
708
+ const core = getDecentChatRuntime();
709
+ const DecentChatNodePeer = await loadDecentChatNodePeerCtor();
710
+
711
+ let nodePeer: InstanceType<DecentChatNodePeerCtor>;
712
+ let finalizeStream: () => Promise<void> = async () => {};
713
+
714
+ const openClawWorkspaceRoot = process.env.OPENCLAW_WORKSPACE_DIR?.trim()
715
+ || path.join(os.homedir(), '.openclaw', 'workspace');
716
+
717
+ nodePeer = new DecentChatNodePeer({
718
+ account: ctx.account,
719
+ onIncomingMessage: async (params) => {
720
+ await relayInboundMessageToPeer({
721
+ incoming: params,
722
+ ctx,
723
+ core,
724
+ nodePeer,
725
+ onFinalizeReady: (nextFinalize) => {
726
+ finalizeStream = nextFinalize;
727
+ },
728
+ });
729
+ },
730
+ onReply: () => { void finalizeStream(); },
731
+ companyTemplateControl: {
732
+ loadConfig: () => core.config.loadConfig() as Record<string, unknown>,
733
+ writeConfigFile: async (config) => {
734
+ await core.config.writeConfigFile(config);
735
+ },
736
+ workspaceRootDir: openClawWorkspaceRoot,
737
+ companySimsRootDir: path.join(openClawWorkspaceRoot, 'company-sims'),
738
+ },
739
+ onHuddleTranscription: async (text, peerId, channelId, senderName) => {
740
+ // Route voice transcription through the standard LLM pipeline,
741
+ // but capture the response text instead of sending it over the data channel.
742
+ return new Promise<string | undefined>((resolve) => {
743
+ let response = '';
744
+ const syntheticMsg = {
745
+ messageId: randomUUID(),
746
+ channelId: channelId || 'huddle',
747
+ workspaceId: '',
748
+ senderId: peerId,
749
+ senderName,
750
+ content: `[VOICE HUDDLE — reply in 1-2 short sentences max, conversational tone, no markdown/emoji]\n${text}`,
751
+ chatType: 'direct' as const,
752
+ timestamp: Date.now(),
753
+ };
754
+
755
+ processInboundMessage(
756
+ syntheticMsg,
757
+ { accountId: ctx.accountId, account: ctx.account, log: ctx.log },
758
+ core,
759
+ {
760
+ sendReadReceipt: async () => {},
761
+ },
762
+ async (replyText) => {
763
+ response += replyText;
764
+ },
765
+ (reason) => {
766
+ ctx.log?.error?.(`[huddle-llm] error: ${reason}`);
767
+ resolve(undefined);
768
+ },
769
+ ).then(() => {
770
+ resolve(response.trim() || undefined);
771
+ }).catch((err) => {
772
+ ctx.log?.error?.(`[huddle-llm] pipeline error: ${String(err)}`);
773
+ resolve(undefined);
774
+ });
775
+ });
776
+ },
777
+ log: ctx.log,
778
+ });
779
+
780
+ await nodePeer.start();
781
+ setActivePeer(nodePeer, ctx.accountId);
782
+ ctx.setStatus({
783
+ running: true,
784
+ peerId: nodePeer.peerId,
785
+ lastError: null,
786
+ });
787
+
788
+ return new Promise<void>((resolve) => {
789
+ const shutdown = async () => {
790
+ setActivePeer(null, ctx.accountId);
791
+ try {
792
+ nodePeer.destroy();
793
+ } catch (err) {
794
+ ctx.log?.warn?.(`[decentchat] destroy error during shutdown: ${String(err)}`);
795
+ }
796
+ ctx.setStatus({ running: false });
797
+ // Allow the PeerJS signaling server time to process the WebSocket
798
+ // disconnect before the gateway recreates a peer with the same ID.
799
+ // Without this delay, the new peer hits "unavailable-id" because the
800
+ // old session hasn't expired on the signaling server yet.
801
+ await new Promise<void>((r) => setTimeout(r, 2000));
802
+ resolve();
803
+ };
804
+
805
+ if (ctx.abortSignal?.aborted) {
806
+ void shutdown();
807
+ return;
808
+ }
809
+
810
+ ctx.abortSignal?.addEventListener("abort", () => void shutdown());
811
+ });
812
+ }
813
+
814
+ function resolveThreadSessionKeys(params: {
815
+ baseSessionKey: string;
816
+ threadId?: string | null;
817
+ parentSessionKey?: string;
818
+ }): { sessionKey: string; parentSessionKey?: string } {
819
+ const threadId = (params.threadId ?? "").trim();
820
+ if (!threadId) {
821
+ return { sessionKey: params.baseSessionKey, parentSessionKey: undefined };
822
+ }
823
+ return {
824
+ sessionKey: `${params.baseSessionKey}:thread:${threadId.toLowerCase()}`,
825
+ parentSessionKey: params.parentSessionKey,
826
+ };
827
+ }
828
+
829
+ export function resolveDecentThreadingFlags(cfg: OpenClawConfig, chatType?: "direct" | "group" | "channel"): {
830
+ replyToMode: "off" | "first" | "all";
831
+ historyScope: "thread" | "channel";
832
+ inheritParent: boolean;
833
+ initialHistoryLimit: number;
834
+ } {
835
+ const ch = (cfg as any)?.channels?.decentchat ?? {};
836
+ const globalReplyToMode = (ch.replyToMode === "off" || ch.replyToMode === "first" || ch.replyToMode === "all")
837
+ ? ch.replyToMode
838
+ : "all";
839
+ const byType = ch.replyToModeByChatType ?? {};
840
+ const directMode = (byType.direct === "off" || byType.direct === "first" || byType.direct === "all") ? byType.direct : undefined;
841
+ const groupMode = (byType.group === "off" || byType.group === "first" || byType.group === "all") ? byType.group : undefined;
842
+ const channelMode = (byType.channel === "off" || byType.channel === "first" || byType.channel === "all") ? byType.channel : undefined;
843
+
844
+ let replyToMode = globalReplyToMode;
845
+ if (chatType === "direct") {
846
+ replyToMode = directMode ?? globalReplyToMode;
847
+ } else if (chatType === "group") {
848
+ replyToMode = groupMode ?? channelMode ?? globalReplyToMode;
849
+ } else if (chatType === "channel") {
850
+ replyToMode = channelMode ?? groupMode ?? globalReplyToMode;
851
+ }
852
+ const historyScope = (ch.thread?.historyScope === "channel" || ch.thread?.historyScope === "thread")
853
+ ? ch.thread.historyScope
854
+ : "thread";
855
+ const inheritParent = ch.thread?.inheritParent === true;
856
+ const initialHistoryLimitRaw = ch.thread?.initialHistoryLimit;
857
+ const initialHistoryLimit = Number.isFinite(initialHistoryLimitRaw)
858
+ ? Math.max(0, Math.floor(initialHistoryLimitRaw))
859
+ : 20;
860
+ return { replyToMode, historyScope, inheritParent, initialHistoryLimit };
861
+ }
862
+
863
+ async function processInboundMessage(
864
+ msg: {
865
+ messageId: string;
866
+ channelId: string;
867
+ workspaceId: string;
868
+ senderId: string;
869
+ senderName: string;
870
+ content: string;
871
+ chatType: "channel" | "direct";
872
+ timestamp: number;
873
+ replyToId?: string;
874
+ threadId?: string;
875
+ },
876
+ ctx: { accountId: string; account?: ResolvedDecentChatAccount; log?: any },
877
+ core: ReturnType<typeof getDecentChatRuntime>,
878
+ nodePeer: Pick<StreamingPeerAdapter, "sendReadReceipt" | "getThreadHistory"> | {
879
+ resolveChannelNameById?: (channelId: string) => string | undefined;
880
+ sendReadReceipt?: (peerId: string, channelId: string, messageId: string) => Promise<void>;
881
+ getThreadHistory?: (args: {
882
+ channelId: string;
883
+ threadId: string;
884
+ limit: number;
885
+ excludeMessageId?: string;
886
+ }) => Promise<ThreadHistoryEntry[]> | ThreadHistoryEntry[];
887
+ },
888
+ deliver: (text: string) => Promise<void>,
889
+ onDeliverError?: (reason: string) => void,
890
+ attachments?: InboundAttachment[],
891
+ fullImageBuffers?: Map<string, Buffer>,
892
+ options?: {
893
+ streamEnabled?: boolean;
894
+ onModelResolved?: (model: AssistantModelMeta | undefined) => void;
895
+ },
896
+ ): Promise<void> {
897
+ let rawBody = msg.content?.trim() ?? "";
898
+
899
+ const mediaPaths: string[] = [];
900
+ const imageAttachments = (attachments ?? []).filter((att) => att.type === "image");
901
+
902
+ if (imageAttachments.length > 0) {
903
+ const inboundMediaDir = path.join(os.homedir(), ".openclaw", "media", "inbound");
904
+ fs.mkdirSync(inboundMediaDir, { recursive: true });
905
+
906
+ for (const attachment of imageAttachments) {
907
+ try {
908
+ const filePath = path.join(inboundMediaDir, `${randomUUID()}.jpg`);
909
+
910
+ // Prefer full-quality image if available
911
+ const fullBuffer = fullImageBuffers?.get(attachment.id);
912
+ if (fullBuffer) {
913
+ fs.writeFileSync(filePath, fullBuffer);
914
+ ctx.log?.info?.(`[decentchat] saved full-quality image ${attachment.id.slice(0, 8)} (${fullBuffer.length} bytes)`);
915
+ } else if (attachment.thumbnail) {
916
+ // Fallback to thumbnail
917
+ fs.writeFileSync(filePath, Buffer.from(attachment.thumbnail, "base64"));
918
+ ctx.log?.info?.(`[decentchat] saved thumbnail for ${attachment.id.slice(0, 8)} (fallback)`);
919
+ } else {
920
+ continue; // Skip if no image data available
921
+ }
922
+
923
+ mediaPaths.push(filePath);
924
+ } catch (err) {
925
+ ctx.log?.warn?.(`[decentchat] failed to persist image for ${attachment.id}: ${String(err)}`);
926
+ }
927
+ }
928
+ }
929
+
930
+ if (!rawBody && attachments && attachments.length > 0) {
931
+ const imageLabels = attachments
932
+ .filter((attachment) => attachment.type === "image")
933
+ .map((attachment, index) => {
934
+ const name = attachment.name?.trim();
935
+ return name ? `[Image: ${name}]` : `[Image ${index + 1}]`;
936
+ });
937
+ if (imageLabels.length > 0) {
938
+ rawBody = imageLabels.join("\n");
939
+ }
940
+ }
941
+
942
+ if (!rawBody && mediaPaths.length === 0) {
943
+ return;
944
+ }
945
+
946
+ const cfg = core.config.loadConfig() as OpenClawConfig;
947
+ const channel = "decentchat";
948
+ const peerId = msg.chatType === "direct"
949
+ ? msg.senderId
950
+ : `${msg.workspaceId}:${msg.channelId}`;
951
+ const effectiveAccount = resolveDecentChatAccount(cfg, ctx.accountId);
952
+
953
+ const route = core.channel.routing.resolveAgentRoute({
954
+ cfg,
955
+ channel,
956
+ accountId: ctx.accountId,
957
+ peer: { kind: msg.chatType === "direct" ? "direct" : "group", id: peerId },
958
+ });
959
+
960
+ if (isCompanySimChannelMuted(effectiveAccount, msg.chatType, msg.channelId)) {
961
+ ctx.log?.info?.(`[decentchat] company routing: silent account=${ctx.accountId} reason=muted-channel channel=${msg.channelId}`);
962
+ await nodePeer.sendReadReceipt?.(msg.senderId, msg.channelId, msg.messageId);
963
+ return;
964
+ }
965
+
966
+ const agentWorkspaceDir = resolveAgentWorkspaceDir(cfg, route.agentId);
967
+
968
+ let companyContext = null;
969
+ let companyContextPrefix = "";
970
+ if (ctx.account?.companySim?.enabled) {
971
+ try {
972
+ const resolvedCompanyPrompt = resolveCompanyPromptContextForAccount(ctx.account, {
973
+ log: ctx.log,
974
+ workspaceDir: agentWorkspaceDir,
975
+ agentId: route.agentId,
976
+ });
977
+ if (resolvedCompanyPrompt) {
978
+ companyContext = resolvedCompanyPrompt.context;
979
+ companyContextPrefix = resolvedCompanyPrompt.prompt;
980
+ }
981
+ } catch (err) {
982
+ ctx.log?.warn?.(`[decentchat] company context load failed for ${ctx.account.accountId}: ${String(err)}`);
983
+ }
984
+ }
985
+ const channelName = msg.chatType === "channel"
986
+ ? (nodePeer.resolveChannelNameById?.(msg.channelId) ?? msg.channelId)
987
+ : undefined;
988
+ const routingThreadRef = (msg.threadId ?? msg.replyToId ?? '').trim();
989
+ const currentThreadRoutingState = routingThreadRef
990
+ ? getThreadRoutingState(msg.channelId, routingThreadRef)
991
+ : null;
992
+ if (companyContext && routingThreadRef) {
993
+ const threadRoutingUpdate = resolveThreadRoutingStateUpdate({
994
+ manifest: companyContext.manifest,
995
+ text: rawBody,
996
+ });
997
+ if (threadRoutingUpdate) {
998
+ updateThreadRoutingState(msg.channelId, routingThreadRef, threadRoutingUpdate);
999
+ }
1000
+ }
1001
+ const participationDecision = decideCompanyParticipation({
1002
+ context: companyContext,
1003
+ chatType: msg.chatType === "direct" ? "direct" : "channel",
1004
+ channelNameOrId: channelName,
1005
+ text: rawBody,
1006
+ threadId: msg.threadId ?? msg.replyToId,
1007
+ threadAssignedEmployeeId: currentThreadRoutingState?.assignedEmployeeId,
1008
+ });
1009
+ if (!participationDecision.shouldRespond) {
1010
+ ctx.log?.info?.(`[decentchat] company routing: silent account=${ctx.accountId} reason=${participationDecision.reason} channel=${channelName ?? msg.channelId}`);
1011
+ await nodePeer.sendReadReceipt?.(msg.senderId, msg.channelId, msg.messageId);
1012
+ return;
1013
+ }
1014
+ const threadingFlags = resolveDecentThreadingFlags(cfg, msg.chatType === "direct" ? "direct" : "channel");
1015
+
1016
+ const baseSessionKey = route.sessionKey;
1017
+ // Thread-aware session routing (parallelism):
1018
+ // - prefer explicit threadId (from thread panel)
1019
+ // - fallback to replyToId ONLY when auto-threading is not active
1020
+ const explicitThreadId = (msg.threadId ?? "").trim();
1021
+ const fallbackThreadId = (msg.replyToId ?? "").trim();
1022
+
1023
+ // Only use replyToId as thread fallback when explicit threadId is absent.
1024
+ let candidateThreadId = explicitThreadId || fallbackThreadId;
1025
+
1026
+ // Thread affinity: when a message arrives without explicit thread metadata in a
1027
+ // group channel, check if this sender was recently active in a thread. If so,
1028
+ // route to that thread instead of creating a brand-new auto-thread. This keeps
1029
+ // the session stable when the client loses thread panel state (page reload, UI
1030
+ // reset, etc.) and matches user intent more often than spawning a fresh thread.
1031
+ let affinityApplied = false;
1032
+ if (!candidateThreadId && msg.chatType !== "direct" && threadingFlags.replyToMode === "all") {
1033
+ const affinityThreadId = getThreadAffinity(msg.channelId, msg.senderId);
1034
+ if (affinityThreadId) {
1035
+ candidateThreadId = affinityThreadId;
1036
+ affinityApplied = true;
1037
+ ctx.log?.info?.(`[decentchat] thread-affinity: sender=${msg.senderId.slice(0, 8)} → thread=${affinityThreadId.slice(0, 8)} (channel=${msg.channelId.slice(0, 8)})`);
1038
+ }
1039
+ }
1040
+
1041
+ // Auto-thread eligible: channel message without explicit thread context or
1042
+ // affinity match when replyToMode=all. In this mode each top-level message gets
1043
+ // its own parallel session.
1044
+ const autoThreadEligible = !candidateThreadId && msg.chatType !== "direct" && threadingFlags.replyToMode === "all";
1045
+
1046
+ // When auto-thread eligible, use the message's own ID as the thread so
1047
+ // it gets a unique parallel session and the reply appears as a thread reply.
1048
+ const autoThreadId = autoThreadEligible ? msg.messageId : "";
1049
+
1050
+ // Slack-compatible knobs:
1051
+ // - replyToMode=off => never route per-thread
1052
+ // - thread.historyScope=channel => keep base channel session
1053
+ const threadingDisabled = threadingFlags.replyToMode === "off" || threadingFlags.historyScope === "channel";
1054
+ const derivedThreadId = threadingDisabled ? "" : (candidateThreadId || autoThreadId);
1055
+ const isThreadReply = Boolean(derivedThreadId && (derivedThreadId !== msg.messageId || autoThreadId));
1056
+
1057
+ const threadKeys = resolveThreadSessionKeys({
1058
+ baseSessionKey,
1059
+ threadId: isThreadReply ? derivedThreadId : undefined,
1060
+ parentSessionKey: isThreadReply && threadingFlags.inheritParent ? baseSessionKey : undefined,
1061
+ });
1062
+ const sessionKey = threadKeys.sessionKey;
1063
+
1064
+ // Update thread affinity for this sender so future messages without threadId
1065
+ // can be routed to the same thread (within the TTL window).
1066
+ // Skip for auto-thread messages: affinity is not consulted in auto-thread mode,
1067
+ // so updating it would only pollute the map with per-message thread IDs.
1068
+ if (isThreadReply && !autoThreadEligible && msg.chatType !== "direct" && derivedThreadId) {
1069
+ updateThreadAffinity(msg.channelId, msg.senderId, derivedThreadId);
1070
+ }
1071
+
1072
+ const fromLabel = msg.chatType === "direct" ? msg.senderName : `${msg.senderName} in ${msg.channelId}`;
1073
+ const storePath = core.channel.session.resolveStorePath(cfg.session?.store, { agentId: route.agentId });
1074
+ const envelopeOptions = core.channel.reply.resolveEnvelopeFormatOptions(cfg);
1075
+ const previousTimestamp = core.channel.session.readSessionUpdatedAt({
1076
+ storePath,
1077
+ sessionKey,
1078
+ });
1079
+ // Bootstrap context only for genuine reply-based thread entry on the first turn.
1080
+ // Auto-threaded top-level channel messages must start clean (Slack-like), or they
1081
+ // inherit stale base-channel context into a brand-new thread session.
1082
+ const shouldBootstrapFromBase = isThreadReply
1083
+ && !previousTimestamp
1084
+ && !autoThreadEligible
1085
+ && threadingFlags.initialHistoryLimit > 0;
1086
+ const bootstrapParentSessionKey = shouldBootstrapFromBase ? baseSessionKey : undefined;
1087
+ const effectiveParentSessionKey = threadKeys.parentSessionKey ?? bootstrapParentSessionKey;
1088
+
1089
+ const bootstrapReason = !isThreadReply
1090
+ ? "not-thread"
1091
+ : previousTimestamp
1092
+ ? "not-first-turn"
1093
+ : autoThreadEligible
1094
+ ? "auto-thread-clean"
1095
+ : threadingFlags.initialHistoryLimit <= 0
1096
+ ? "limit-zero"
1097
+ : "enabled";
1098
+ logThreadRouteDecision(ctx.log, {
1099
+ chatType: msg.chatType,
1100
+ replyToMode: threadingFlags.replyToMode,
1101
+ historyScope: threadingFlags.historyScope,
1102
+ mode: isThreadReply ? "thread" : "base",
1103
+ candidateThreadId,
1104
+ derivedThreadId,
1105
+ sessionKey,
1106
+ previousTimestampPresent: Boolean(previousTimestamp),
1107
+ bootstrapReason,
1108
+ initialHistoryLimit: threadingFlags.initialHistoryLimit,
1109
+ });
1110
+
1111
+ let threadContextPrefix = "";
1112
+ let threadHistoryCount = 0;
1113
+ const shouldBootstrapThreadHistory = isThreadReply && !previousTimestamp && threadingFlags.initialHistoryLimit > 0;
1114
+ if (shouldBootstrapThreadHistory && nodePeer.getThreadHistory) {
1115
+ try {
1116
+ const history = await Promise.resolve(
1117
+ nodePeer.getThreadHistory({
1118
+ channelId: msg.channelId,
1119
+ threadId: derivedThreadId,
1120
+ limit: threadingFlags.initialHistoryLimit,
1121
+ excludeMessageId: msg.messageId,
1122
+ }),
1123
+ );
1124
+ if (history.length > 0) {
1125
+ const lines = history.map((entry) => {
1126
+ const senderLabel = entry.senderId === msg.senderId
1127
+ ? msg.senderName
1128
+ : entry.senderId.slice(0, 8);
1129
+ return `- ${senderLabel}: ${formatThreadHistoryContent(entry.content)}`;
1130
+ });
1131
+ threadContextPrefix = `[Thread context: last ${history.length} messages]\n${lines.join("\n")}`;
1132
+ threadHistoryCount = history.length;
1133
+ ctx.log?.debug?.(`[decentchat] thread-bootstrap thread=${derivedThreadId} fetched=${history.length} limit=${threadingFlags.initialHistoryLimit}`);
1134
+ }
1135
+ } catch (err) {
1136
+ ctx.log?.warn?.(`[decentchat] thread history bootstrap failed: ${String(err)}`);
1137
+ }
1138
+ }
1139
+
1140
+ if (shouldBootstrapThreadHistory && !nodePeer.getThreadHistory) {
1141
+ ctx.log?.warn?.("[decentchat] thread history bootstrap requested but adapter does not expose getThreadHistory");
1142
+ }
1143
+
1144
+ const contextPrefixes = [companyContextPrefix, threadContextPrefix].filter((value) => value && value.trim().length > 0);
1145
+ const contextPrefix = contextPrefixes.join("\n\n");
1146
+ const bodySource = contextPrefix ? `${contextPrefix}\n\n${rawBody}` : rawBody;
1147
+ const body = core.channel.reply.formatAgentEnvelope({
1148
+ channel: "DecentChat",
1149
+ from: fromLabel,
1150
+ timestamp: msg.timestamp,
1151
+ previousTimestamp,
1152
+ envelope: envelopeOptions,
1153
+ body: bodySource,
1154
+ });
1155
+
1156
+ const mediaType = mediaPaths.length > 0 ? "image/jpeg" : undefined;
1157
+
1158
+ const ctxPayload = core.channel.reply.finalizeInboundContext({
1159
+ Body: body,
1160
+ RawBody: rawBody,
1161
+ CommandBody: rawBody,
1162
+ From: msg.chatType === "direct" ? `decentchat:${msg.senderId}` : `decentchat:channel:${msg.channelId}`,
1163
+ To: "decentchat:bot",
1164
+ SessionKey: sessionKey,
1165
+ AccountId: route.accountId,
1166
+ ChatType: msg.chatType === "direct" ? "direct" : "group",
1167
+ ConversationLabel: fromLabel,
1168
+ SenderName: msg.senderName,
1169
+ SenderId: msg.senderId,
1170
+ GroupSubject: msg.chatType === "channel" ? msg.channelId : undefined,
1171
+ Provider: channel,
1172
+ Surface: channel,
1173
+ MessageSid: msg.messageId,
1174
+ Timestamp: msg.timestamp,
1175
+ OriginatingChannel: channel,
1176
+ OriginatingTo: msg.chatType === "direct" ? `decentchat:${msg.senderId}` : `decentchat:channel:${msg.channelId}`,
1177
+ ReplyToId: isThreadReply ? derivedThreadId : undefined,
1178
+ MessageThreadId: isThreadReply ? derivedThreadId : undefined,
1179
+ ParentSessionKey: effectiveParentSessionKey,
1180
+ IsFirstThreadTurn: isThreadReply && !previousTimestamp ? true : undefined,
1181
+ ThreadBootstrapHistoryCount: threadHistoryCount > 0 ? threadHistoryCount : undefined,
1182
+ MediaPath: mediaPaths[0],
1183
+ MediaType: mediaType,
1184
+ MediaPaths: mediaPaths.length > 1 ? mediaPaths : undefined,
1185
+ MediaTypes: mediaPaths.length > 1 ? mediaPaths.map(() => "image/jpeg") : undefined,
1186
+ });
1187
+
1188
+ await core.channel.session.recordInboundSession({
1189
+ storePath,
1190
+ sessionKey: ctxPayload.SessionKey ?? sessionKey,
1191
+ ctx: ctxPayload,
1192
+ onRecordError: (err) => ctx.log?.error?.(`[decentchat] session record error: ${String(err)}`),
1193
+ });
1194
+
1195
+ let streamingActive = false;
1196
+ let lastPartialLength = 0;
1197
+
1198
+ const { onModelSelected, ...prefixOptions } = await resolveReplyPrefixOptions({
1199
+ cfg,
1200
+ agentId: route.agentId,
1201
+ channel,
1202
+ accountId: ctx.accountId,
1203
+ });
1204
+
1205
+ const onModelSelectedWithCapture = (modelCtx: { provider?: string; model?: string }) => {
1206
+ const normalized = normalizeModelMeta(modelCtx);
1207
+ options?.onModelResolved?.(normalized);
1208
+ onModelSelected?.(modelCtx as any);
1209
+ };
1210
+
1211
+ await core.channel.reply.dispatchReplyWithBufferedBlockDispatcher({
1212
+ ctx: ctxPayload,
1213
+ cfg,
1214
+ dispatcherOptions: {
1215
+ ...prefixOptions,
1216
+ deliver: async (payload) => {
1217
+ const text = (payload as any).text;
1218
+ if (!text) {
1219
+ return;
1220
+ }
1221
+ // Drop upstream tool-call mismatch errors from being posted into chat.
1222
+ if (/^No tool call found for function call output with call_id\b/i.test(text.trim())) {
1223
+ ctx.log?.warn?.("[decentchat] suppressed tool-call mismatch error text");
1224
+ return;
1225
+ }
1226
+ // When real streaming is active (onPartialReply), tokens are already delivered.
1227
+ // The deliver callback only fires for the final aggregated text — skip it to avoid duplicates.
1228
+ // (Same pattern as Slack native streaming: stream IS the delivery.)
1229
+ if (options?.streamEnabled && streamingActive) {
1230
+ ctx.log?.info?.(`[decentchat] deliver: skipping (stream active, ${text.length} chars)`);
1231
+ return;
1232
+ }
1233
+ await deliver(text);
1234
+ },
1235
+ onError: (err, info) => {
1236
+ const reason = String(err);
1237
+ ctx.log?.error?.(`[decentchat] ${info.kind} reply error: ${reason}`);
1238
+ onDeliverError?.(reason);
1239
+ },
1240
+ },
1241
+ replyOptions: {
1242
+ onModelSelected: onModelSelectedWithCapture,
1243
+ suppressToolErrorWarnings: true,
1244
+ // Real token-level streaming: onPartialReply fires with each LLM token delta.
1245
+ // We forward these directly to the P2P stream protocol for live rendering.
1246
+ // NOTE: When onPartialReply is active, the dispatcherOptions.deliver callback
1247
+ // must NOT also accumulate text — it should only finalize the stream.
1248
+ onPartialReply: options?.streamEnabled
1249
+ ? async (payload) => {
1250
+ const fullText = (payload as any).text;
1251
+ if (!fullText) return;
1252
+ streamingActive = true;
1253
+ // onPartialReply gives CUMULATIVE text, not delta.
1254
+ // Extract only the new portion before passing to deliver (which accumulates).
1255
+ const delta = fullText.slice(lastPartialLength);
1256
+ lastPartialLength = fullText.length;
1257
+ if (delta) await deliver(delta);
1258
+ }
1259
+ : undefined,
1260
+ // Disable block streaming chunker — we handle streaming ourselves via onPartialReply.
1261
+ disableBlockStreaming: options?.streamEnabled,
1262
+ },
1263
+ });
1264
+
1265
+ await nodePeer.sendReadReceipt?.(msg.senderId, msg.channelId, msg.messageId);
1266
+ }