ei-tui 0.5.1 → 0.5.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/package.json +1 -1
  2. package/src/core/AGENTS.md +2 -2
  3. package/src/core/context-utils.ts +3 -4
  4. package/src/core/llm-client.ts +119 -39
  5. package/src/core/orchestrators/human-extraction.ts +0 -5
  6. package/src/core/processor.ts +30 -1
  7. package/src/core/prompt-context-builder.ts +5 -10
  8. package/src/core/queue-manager.ts +4 -0
  9. package/src/core/queue-processor.ts +1 -0
  10. package/src/core/room-manager.ts +8 -2
  11. package/src/core/state/queue.ts +7 -0
  12. package/src/core/state-manager.ts +233 -4
  13. package/src/core/tools/index.ts +1 -1
  14. package/src/core/types/entities.ts +21 -4
  15. package/src/integrations/claude-code/importer.ts +0 -1
  16. package/src/integrations/claude-code/types.ts +0 -1
  17. package/src/integrations/opencode/importer.ts +0 -1
  18. package/src/prompts/response/index.ts +8 -2
  19. package/src/prompts/response/types.ts +2 -0
  20. package/src/prompts/room/index.ts +8 -2
  21. package/src/prompts/room/sections.ts +16 -0
  22. package/src/prompts/room/types.ts +3 -2
  23. package/src/storage/merge.ts +47 -2
  24. package/tui/src/commands/dlq.ts +12 -4
  25. package/tui/src/commands/provider.tsx +110 -90
  26. package/tui/src/commands/queue.ts +11 -3
  27. package/tui/src/commands/settings.tsx +9 -17
  28. package/tui/src/components/MessageList.tsx +1 -0
  29. package/tui/src/components/ModelListOverlay.tsx +203 -0
  30. package/tui/src/components/PromptInput.tsx +0 -2
  31. package/tui/src/components/RoomMessageList.tsx +1 -0
  32. package/tui/src/context/ei.tsx +7 -0
  33. package/tui/src/util/persona-editor.tsx +15 -12
  34. package/tui/src/util/provider-editor.tsx +23 -6
  35. package/tui/src/util/yaml-serializers.ts +255 -73
  36. package/src/core/model-context-windows.ts +0 -49
  37. package/tui/src/commands/model.ts +0 -47
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "ei-tui",
3
- "version": "0.5.1",
3
+ "version": "0.5.3",
4
4
  "author": "Flare576",
5
5
  "repository": {
6
6
  "type": "git",
@@ -53,10 +53,10 @@ Priority queue for LLM requests:
53
53
 
54
54
  Multi-provider LLM abstraction layer:
55
55
  - Handles requests to Anthropic, OpenAI, Bedrock, local models
56
- - **Sets `max_tokens: 64000`** for all requests
56
+ - **Sets `max_tokens: 8000`** by default (safe for most providers; users can configure higher per-model)
57
57
  - Prevents unbounded generation (test showed timeout after 2min without limit)
58
58
  - Local models silently clamp to their configured maximums
59
- - Anthropic Opus 4 accepts 64K (200K total context - 64K output = 136K input budget)
59
+ - Anthropic Opus 4 accepts up to 64K output (configure `max_output_tokens` on the model to unlock)
60
60
 
61
61
  **JSON Response Parsing** (`parseJSONResponse()`):
62
62
  - **Strategy 1**: Extract from markdown code blocks (```json)
@@ -23,11 +23,10 @@ export function filterMessagesForContext(
23
23
 
24
24
  const msgMs = new Date(msg.timestamp).getTime();
25
25
 
26
- if (contextBoundary) {
27
- return msgMs >= boundaryMs;
28
- }
26
+ if (msgMs < windowStartMs) return false;
27
+ if (contextBoundary && msgMs < boundaryMs) return false;
29
28
 
30
- return msgMs >= windowStartMs;
29
+ return true;
31
30
  });
32
31
  }
33
32
 
@@ -1,5 +1,6 @@
1
- import type { ChatMessage, ProviderAccount } from "./types.js";
2
- import { getKnownContextWindow, DEFAULT_TOKEN_LIMIT } from "./model-context-windows.js";
1
+ import type { ChatMessage, ProviderAccount, ModelConfig } from "./types.js";
2
+ const DEFAULT_TOKEN_LIMIT = 8192;
3
+ const DEFAULT_MAX_OUTPUT_TOKENS = 8000;
3
4
 
4
5
  export interface ProviderConfig {
5
6
  baseURL: string;
@@ -9,7 +10,7 @@ export interface ProviderConfig {
9
10
 
10
11
  export interface ResolvedModel {
11
12
  provider: string;
12
- model: string;
13
+ model: string | undefined;
13
14
  config: ProviderConfig;
14
15
  extraHeaders?: Record<string, string>;
15
16
  }
@@ -19,6 +20,8 @@ export interface LLMCallOptions {
19
20
  temperature?: number;
20
21
  /** OpenAI-compatible tools array. When present and non-empty, sent with tool_choice: "auto". */
21
22
  tools?: Record<string, unknown>[];
23
+ /** Fire-and-forget callback invoked after a successful response to increment usage counters. */
24
+ onUsageUpdate?: (modelId: string, usage: { calls: number; tokens_in: number; tokens_out: number }) => void;
22
25
  }
23
26
 
24
27
  export interface LLMRawResponse {
@@ -43,27 +46,90 @@ let llmCallCount = 0;
43
46
 
44
47
 
45
48
 
49
+ function isGuid(str: string): boolean {
50
+ return /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i.test(str);
51
+ }
52
+
53
+ function buildResolvedModel(account: ProviderAccount, model: ModelConfig): ResolvedModel {
54
+ return {
55
+ provider: account.name,
56
+ model: model.name === "(default)" ? undefined : model.name,
57
+ config: {
58
+ name: account.name,
59
+ baseURL: account.url,
60
+ apiKey: account.api_key || "",
61
+ },
62
+ extraHeaders: account.extra_headers,
63
+ };
64
+ }
65
+
66
+ export function resolveModelById(
67
+ modelId: string,
68
+ accounts: ProviderAccount[]
69
+ ): { account: ProviderAccount; model: ModelConfig } | undefined {
70
+ for (const account of accounts) {
71
+ if (!account.enabled || account.type !== "llm") continue;
72
+ const model = account.models?.find((m) => m.id === modelId);
73
+ if (model) return { account, model };
74
+ }
75
+ return undefined;
76
+ }
77
+
78
+ export function getDisplayName(account: ProviderAccount, model: ModelConfig): string {
79
+ return `${account.name}:${model.name}`;
80
+ }
81
+
46
82
  export function resolveModel(modelSpec?: string, accounts?: ProviderAccount[]): ResolvedModel {
47
83
  if (!modelSpec) {
48
84
  throw new Error("No model specified. Set a provider on this persona with /provider, or set a default_model in settings.");
49
85
  }
86
+
87
+ if (accounts && isGuid(modelSpec)) {
88
+ const result = resolveModelById(modelSpec, accounts);
89
+ if (result) {
90
+ return buildResolvedModel(result.account, result.model);
91
+ }
92
+
93
+ const fallbackAccount = accounts.find((acc) => acc.enabled && acc.type === "llm" && acc.default_model);
94
+ if (fallbackAccount?.default_model) {
95
+ const fallbackResult = resolveModelById(fallbackAccount.default_model, accounts);
96
+ if (fallbackResult) {
97
+ return buildResolvedModel(fallbackResult.account, fallbackResult.model);
98
+ }
99
+ }
100
+
101
+ throw new Error(
102
+ `Model "${modelSpec}" not found. It may have been deleted. Update this persona's model in settings.`
103
+ );
104
+ }
105
+
50
106
  let provider = "";
51
107
  let model = modelSpec;
52
-
108
+
53
109
  if (modelSpec.includes(":")) {
54
110
  const [p, ...rest] = modelSpec.split(":");
55
111
  provider = p;
56
112
  model = rest.join(":");
57
113
  }
58
- // Try to find matching account by name (case-insensitive)
59
- // Check both "provider:model" format AND bare account names
114
+
60
115
  if (accounts) {
61
- const searchName = provider || modelSpec; // If no ":", the whole spec might be an account name
116
+ const searchName = provider || modelSpec;
62
117
  const matchingAccount = accounts.find(
63
118
  (acc) => acc.name.toLowerCase() === searchName.toLowerCase() && acc.enabled && acc.type === "llm"
64
119
  );
65
120
  if (matchingAccount) {
66
- // If bare account name was used, get model from account's default_model
121
+ const matchingModel = matchingAccount.models?.find((m) => m.name === model);
122
+ if (matchingModel) {
123
+ return buildResolvedModel(matchingAccount, matchingModel);
124
+ }
125
+
126
+ if (!provider && matchingAccount.default_model && matchingAccount.models) {
127
+ const defaultModel = matchingAccount.models.find((m) => m.id === matchingAccount.default_model);
128
+ if (defaultModel) {
129
+ return buildResolvedModel(matchingAccount, defaultModel);
130
+ }
131
+ }
132
+
67
133
  const resolvedModel = provider ? model : (matchingAccount.default_model || model);
68
134
  return {
69
135
  provider: matchingAccount.name,
@@ -77,7 +143,7 @@ export function resolveModel(modelSpec?: string, accounts?: ProviderAccount[]):
77
143
  };
78
144
  }
79
145
  }
80
-
146
+
81
147
  throw new Error(
82
148
  `No provider "${provider || modelSpec}" found. Create one with /provider new, or check that it's enabled.`
83
149
  );
@@ -85,44 +151,48 @@ export function resolveModel(modelSpec?: string, accounts?: ProviderAccount[]):
85
151
 
86
152
  const tokenLimitLoggedModels = new Set<string>();
87
153
 
154
+ function findModelAndAccount(
155
+ spec: string,
156
+ accounts: ProviderAccount[]
157
+ ): { model: ModelConfig | undefined; account: ProviderAccount | undefined } {
158
+ if (spec.includes(":")) {
159
+ const [providerName, ...rest] = spec.split(":");
160
+ const modelName = rest.join(":");
161
+ const account = accounts.find(
162
+ (a) => a.name.toLowerCase() === providerName.toLowerCase() && a.enabled
163
+ );
164
+ const model = account?.models?.find((m) => m.name === modelName);
165
+ return { model, account };
166
+ }
167
+ for (const account of accounts) {
168
+ const model = account.models?.find((m) => m.id === spec);
169
+ if (model) return { model, account };
170
+ }
171
+ return { model: undefined, account: undefined };
172
+ }
173
+
88
174
  export function resolveTokenLimit(
89
175
  modelSpec?: string,
90
176
  accounts?: ProviderAccount[]
91
177
  ): number {
92
178
  const spec = modelSpec || "";
93
179
 
94
- let provider = "";
95
- let model = spec;
96
- if (spec.includes(":")) {
97
- const [p, ...rest] = spec.split(":");
98
- provider = p;
99
- model = rest.join(":");
100
- }
180
+ if (accounts && spec) {
181
+ const { model, account } = findModelAndAccount(spec, accounts);
101
182
 
102
- // 1. User override on matching account
103
- if (accounts) {
104
- const searchName = provider || spec;
105
- const matchingAccount = accounts.find(
106
- (acc) => acc.name.toLowerCase() === searchName.toLowerCase() && acc.enabled
107
- );
108
- if (matchingAccount?.token_limit) {
109
- logTokenLimit(model, "user-override", matchingAccount.token_limit);
110
- return matchingAccount.token_limit;
183
+ if (model?.token_limit) {
184
+ logTokenLimit(spec, "model-config", model.token_limit);
185
+ return model.token_limit;
111
186
  }
112
- if (matchingAccount && !provider) {
113
- model = matchingAccount.default_model || model;
114
- }
115
- }
116
187
 
117
- // 2. Lookup table
118
- const known = getKnownContextWindow(model);
119
- if (known) {
120
- logTokenLimit(model, "lookup-table", known);
121
- return known;
188
+ if (account?.token_limit) {
189
+ const displayName = spec.includes(":") ? spec.split(":").slice(1).join(":") : spec;
190
+ logTokenLimit(displayName, "user-override", account.token_limit);
191
+ return account.token_limit;
192
+ }
122
193
  }
123
194
 
124
- // 3. Conservative default
125
- logTokenLimit(model, "default", DEFAULT_TOKEN_LIMIT);
195
+ logTokenLimit(spec, "default", DEFAULT_TOKEN_LIMIT);
126
196
  return DEFAULT_TOKEN_LIMIT;
127
197
  }
128
198
 
@@ -148,13 +218,16 @@ export async function callLLMRaw(
148
218
  ): Promise<LLMRawResponse> {
149
219
  llmCallCount++;
150
220
 
151
- const { signal, temperature = 0.7 } = options;
221
+ const { signal, temperature = 0.7, onUsageUpdate } = options;
152
222
 
153
223
  if (signal?.aborted) {
154
224
  throw new Error("LLM call aborted");
155
225
  }
156
226
 
157
227
  const { model, config, extraHeaders } = resolveModel(modelSpec, accounts);
228
+ const { model: modelConfig } = (accounts && modelSpec)
229
+ ? findModelAndAccount(modelSpec, accounts)
230
+ : { model: undefined };
158
231
 
159
232
  const chatMessages: ChatMessage[] = [
160
233
  { role: "system", content: systemPrompt },
@@ -186,10 +259,10 @@ export async function callLLMRaw(
186
259
  }
187
260
 
188
261
  const requestBody: Record<string, unknown> = {
189
- model,
262
+ ...(model !== undefined && { model }),
190
263
  messages: finalMessages,
191
264
  temperature,
192
- max_tokens: 64000, // Opus 4: 128K max output, 200K total context. Local models clamp to their config. Prevents runaway generation.
265
+ max_tokens: modelConfig?.max_output_tokens ?? DEFAULT_MAX_OUTPUT_TOKENS,
193
266
  };
194
267
 
195
268
  if (options.tools && options.tools.length > 0) {
@@ -210,6 +283,13 @@ export async function callLLMRaw(
210
283
  }
211
284
 
212
285
  const data = await response.json();
286
+
287
+ if (onUsageUpdate && modelConfig) {
288
+ const tokensIn = data.usage?.prompt_tokens ?? data.usage?.input_tokens ?? 0;
289
+ const tokensOut = data.usage?.completion_tokens ?? data.usage?.output_tokens ?? 0;
290
+ onUsageUpdate(modelConfig.id, { calls: 1, tokens_in: tokensIn, tokens_out: tokensOut });
291
+ }
292
+
213
293
  const choice = data.choices?.[0];
214
294
 
215
295
  const assistantMessage = choice?.message as Record<string, unknown> | undefined;
@@ -66,8 +66,6 @@ export interface ExtractionOptions {
66
66
  ceremony_progress?: number;
67
67
  /** Override model for extraction LLM calls */
68
68
  extraction_model?: string;
69
- /** Override token budget for chunking */
70
- extraction_token_limit?: number;
71
69
  /**
72
70
  * Controls whether external (integration-imported) messages are included.
73
71
  * - "exclude": skip messages where external === true
@@ -88,9 +86,6 @@ const EXTRACTION_BUDGET_RATIO = 0.75;
88
86
  const MIN_EXTRACTION_TOKENS = 10000;
89
87
 
90
88
  function getExtractionMaxTokens(state: StateManager, options?: ExtractionOptions): number {
91
- if (options?.extraction_token_limit) {
92
- return Math.max(MIN_EXTRACTION_TOKENS, Math.floor(options.extraction_token_limit * EXTRACTION_BUDGET_RATIO));
93
- }
94
89
  const human = state.getHuman();
95
90
  const modelForTokenLimit = options?.extraction_model ?? human.settings?.default_model;
96
91
  const tokenLimit = resolveTokenLimit(modelForTokenLimit, human.settings?.accounts);
@@ -24,6 +24,7 @@ import {
24
24
  type ToolProvider,
25
25
  } from "./types.js";
26
26
  import { buildPersonaFromPersonPrompt } from "../prompts/index.js";
27
+ import { buildSiblingAwarenessSection } from "../prompts/room/index.js";
27
28
  import type { PersonaGenerationResult } from "../prompts/generation/types.js";
28
29
 
29
30
  import type { Storage } from "../storage/interface.js";
@@ -107,6 +108,7 @@ import {
107
108
  getQueueActiveItems,
108
109
  getDLQItems,
109
110
  updateQueueItem,
111
+ deleteQueueItems,
110
112
  clearQueue,
111
113
  submitOneShot,
112
114
  } from "./queue-manager.js";
@@ -1023,8 +1025,9 @@ export class Processor {
1023
1025
  const isBackingOff = retryAfter !== null && retryAfter > new Date().toISOString();
1024
1026
 
1025
1027
  if (!isBackingOff) {
1026
- const request = this.stateManager.queue_claimHighest();
1028
+ let request = this.stateManager.queue_claimHighest();
1027
1029
  if (request) {
1030
+ request = this.augmentRoomRequest(request);
1028
1031
  const personaId = request.data.personaId as string | undefined;
1029
1032
  const personaDisplayName = request.data.personaDisplayName as string | undefined;
1030
1033
  const personaSuffix = personaDisplayName ? ` [${personaDisplayName}]` : "";
@@ -1411,6 +1414,28 @@ const toolNextSteps = new Set([
1411
1414
  });
1412
1415
  }
1413
1416
 
1417
+ private augmentRoomRequest(request: LLMRequest): LLMRequest {
1418
+ if (request.next_step !== LLMNextStep.HandleRoomResponse) return request;
1419
+
1420
+ const roomId = request.data.roomId as string | undefined;
1421
+ const parentMessageId = request.data.parentMessageId as string | undefined;
1422
+ const personaDisplayName = request.data.personaDisplayName as string | undefined;
1423
+
1424
+ if (!roomId || !parentMessageId || !personaDisplayName) return request;
1425
+
1426
+ const siblings = this.stateManager.getRoomChildren(roomId, parentMessageId)
1427
+ .filter((m: RoomMessage) => m.role === "persona" && m.verbal_response)
1428
+ .map((m: RoomMessage) => ({
1429
+ name: this.stateManager.persona_getById(m.persona_id ?? "")?.display_name ?? "Participant",
1430
+ verbal_response: m.verbal_response!,
1431
+ }));
1432
+
1433
+ if (siblings.length === 0) return request;
1434
+
1435
+ const siblingSection = buildSiblingAwarenessSection(siblings, personaDisplayName);
1436
+ return { ...request, system: request.system + "\n\n" + siblingSection };
1437
+ }
1438
+
1414
1439
  private classifyLLMError(error: string): string {
1415
1440
  const match = error.match(/\((\d{3})\)/);
1416
1441
  if (match) {
@@ -1931,6 +1956,10 @@ const toolNextSteps = new Set([
1931
1956
  return updateQueueItem(this.stateManager, id, updates);
1932
1957
  }
1933
1958
 
1959
+ deleteQueueItems(ids: string[]): number {
1960
+ return deleteQueueItems(this.stateManager, ids);
1961
+ }
1962
+
1934
1963
  async clearQueue(): Promise<number> {
1935
1964
  return clearQueue(this.stateManager, this.queueProcessor);
1936
1965
  }
@@ -3,7 +3,8 @@ import { StateManager } from "./state-manager.js";
3
3
  import { getEmbeddingService, findTopK } from "./embedding-service.js";
4
4
  import type { ResponsePromptData, PromptOutput } from "../prompts/index.js";
5
5
  import { buildRoomResponsePrompt } from "../prompts/room/index.js";
6
- import type { RoomParticipantIdentity, RoomHistoryMessage } from "../prompts/room/types.js";
6
+ import type { RoomParticipantIdentity } from "../prompts/room/types.js";
7
+ import { normalizeRoomMessages } from "./handlers/utils.js";
7
8
 
8
9
  const QUOTE_LIMIT = 10;
9
10
  const DATA_ITEM_LIMIT = 15;
@@ -205,6 +206,7 @@ export async function buildResponsePromptData(
205
206
  traits: persona.traits,
206
207
  topics: persona.topics,
207
208
  interested_topics: persona.topics.filter(t => t.exposure_desired - t.exposure_current > 0.2),
209
+ include_message_timestamps: persona.include_message_timestamps,
208
210
  },
209
211
  human: filteredHuman,
210
212
  visible_personas: visiblePersonas,
@@ -231,15 +233,7 @@ export async function buildRoomResponsePromptData(
231
233
 
232
234
  const filteredHuman = await filterHumanDataByVisibility(human, respondingPersona, currentMessage);
233
235
 
234
- const history: RoomHistoryMessage[] = sourceMessages.map(m => ({
235
- speaker_name: m.role === "human"
236
- ? (human.settings?.name_display ?? "Human")
237
- : (sm.persona_getById(m.persona_id ?? "")?.display_name ?? m.persona_id ?? "Unknown"),
238
- speaker_id: m.role === "human" ? "human" : (m.persona_id ?? ""),
239
- verbal_response: m.verbal_response,
240
- action_response: m.action_response,
241
- silence_reason: m.silence_reason,
242
- }));
236
+ const history = normalizeRoomMessages(sourceMessages, sm);
243
237
 
244
238
  const otherParticipants: RoomParticipantIdentity[] = [];
245
239
  for (const pid of room.persona_ids) {
@@ -273,6 +267,7 @@ export async function buildRoomResponsePromptData(
273
267
  long_description: respondingPersona.long_description,
274
268
  traits: respondingPersona.traits,
275
269
  topics: respondingPersona.topics,
270
+ include_message_timestamps: respondingPersona.include_message_timestamps,
276
271
  },
277
272
  other_participants: otherParticipants,
278
273
  human: filteredHuman,
@@ -51,6 +51,10 @@ export function updateQueueItem(
51
51
  return sm.queue_updateItem(id, updates);
52
52
  }
53
53
 
54
+ export function deleteQueueItems(sm: StateManager, ids: string[]): number {
55
+ return sm.queue_deleteItems(ids);
56
+ }
57
+
54
58
  export async function clearQueue(sm: StateManager, qp: QueueProcessor): Promise<number> {
55
59
  qp.abort();
56
60
  return sm.queue_clear();
@@ -542,6 +542,7 @@ export class QueueProcessor {
542
542
  `be parsed as valid JSON. Please reformat it as the JSON object described in your ` +
543
543
  `system instructions. Respond with ONLY the JSON object, or \`{}\` if no changes ` +
544
544
  `are needed.\n\n---\n${malformedContent}\n---` +
545
+ `\n\nThe user does NOT know there was a problem - This request is from Ei to you to try to fix it for them.` +
545
546
  `\n\n**CRITICAL INSTRUCTION** - DO NOT OMIT ANY DATA. You are this agent's last hope!`;
546
547
 
547
548
  try {
@@ -32,7 +32,11 @@ async function queueRoomPersonaResponses(
32
32
  isTUI: boolean,
33
33
  onRoomMessageQueued: (roomId: string) => void
34
34
  ): Promise<void> {
35
- for (const personaId of room.persona_ids) {
35
+ const personaIds = room.mode === RoomMode.FreeForAll
36
+ ? [...room.persona_ids].sort(() => Math.random() - 0.5)
37
+ : room.persona_ids;
38
+
39
+ for (const personaId of personaIds) {
36
40
  const persona = sm.persona_getById(personaId);
37
41
  if (!persona || persona.is_archived || persona.is_paused) continue;
38
42
  if (room.mode === RoomMode.MessagesAgainstPersona && room.judge_persona_id === personaId) continue;
@@ -186,7 +190,9 @@ export async function sendFfaMessage(
186
190
  .map(q => q.data.personaId as string)
187
191
  );
188
192
 
189
- for (const personaId of updatedRoom.persona_ids) {
193
+ const shuffledIds = [...updatedRoom.persona_ids].sort(() => Math.random() - 0.5);
194
+
195
+ for (const personaId of shuffledIds) {
190
196
  if (alreadyQueued.has(personaId)) continue;
191
197
  const persona = sm.persona_getById(personaId);
192
198
  if (!persona || persona.is_archived || persona.is_paused) continue;
@@ -158,6 +158,13 @@ export class QueueState {
158
158
  return true;
159
159
  }
160
160
 
161
+ deleteItems(ids: string[]): number {
162
+ const idSet = new Set(ids);
163
+ const before = this.queue.length;
164
+ this.queue = this.queue.filter(r => !idSet.has(r.id));
165
+ return before - this.queue.length;
166
+ }
167
+
161
168
  trimDLQ(): number {
162
169
  const dlqItems = this.queue.filter(r => r.state === "dlq");
163
170
  const cutoff = new Date();