@codori/client 0.0.2 → 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,135 @@
1
+ import type { CodexUserInput } from './codex-rpc'
2
+ import { encodeProjectIdSegment } from './codori'
3
+ import { resolveApiUrl, shouldUseServerProxy } from './network'
4
+
5
+ export const MAX_ATTACHMENTS_PER_MESSAGE = 8
6
+ export const MAX_ATTACHMENT_BYTES = 20 * 1024 * 1024
7
+
8
+ export type FileLike = {
9
+ name: string
10
+ size: number
11
+ type: string
12
+ }
13
+
14
+ export type AttachmentValidationIssue = {
15
+ code: 'tooMany' | 'unsupportedType' | 'tooLarge'
16
+ fileName: string
17
+ message: string
18
+ }
19
+
20
+ export type PersistedProjectAttachment = {
21
+ filename: string
22
+ mediaType: string | null
23
+ path: string
24
+ }
25
+
26
+ export type ProjectAttachmentUploadResponse = {
27
+ threadId: string
28
+ files: PersistedProjectAttachment[]
29
+ }
30
+
31
+ export const isSupportedAttachmentType = (mediaType: string) =>
32
+ mediaType.toLowerCase().startsWith('image/')
33
+
34
+ export const validateAttachmentSelection = <T extends FileLike>(
35
+ files: T[],
36
+ existingCount: number
37
+ ) => {
38
+ const issues: AttachmentValidationIssue[] = []
39
+ const accepted: T[] = []
40
+
41
+ for (const file of files) {
42
+ if (existingCount + accepted.length >= MAX_ATTACHMENTS_PER_MESSAGE) {
43
+ issues.push({
44
+ code: 'tooMany',
45
+ fileName: file.name,
46
+ message: `You can attach up to ${MAX_ATTACHMENTS_PER_MESSAGE} images per message.`
47
+ })
48
+ continue
49
+ }
50
+
51
+ const mediaType = file.type || 'application/octet-stream'
52
+ if (!isSupportedAttachmentType(mediaType)) {
53
+ issues.push({
54
+ code: 'unsupportedType',
55
+ fileName: file.name,
56
+ message: 'Only image attachments are currently supported.'
57
+ })
58
+ continue
59
+ }
60
+
61
+ if (file.size > MAX_ATTACHMENT_BYTES) {
62
+ issues.push({
63
+ code: 'tooLarge',
64
+ fileName: file.name,
65
+ message: `Each image must be ${Math.floor(MAX_ATTACHMENT_BYTES / (1024 * 1024))} MB or smaller.`
66
+ })
67
+ continue
68
+ }
69
+
70
+ accepted.push(file)
71
+ }
72
+
73
+ return {
74
+ accepted,
75
+ issues
76
+ }
77
+ }
78
+
79
+ export const buildTurnStartInput = (
80
+ text: string,
81
+ attachments: Array<{ path: string }>
82
+ ): CodexUserInput[] => {
83
+ const input: CodexUserInput[] = []
84
+ const trimmedText = text.trim()
85
+
86
+ if (trimmedText) {
87
+ input.push({
88
+ type: 'text',
89
+ text: trimmedText,
90
+ text_elements: []
91
+ })
92
+ }
93
+
94
+ for (const attachment of attachments) {
95
+ input.push({
96
+ type: 'localImage',
97
+ path: attachment.path
98
+ })
99
+ }
100
+
101
+ return input
102
+ }
103
+
104
+ export const resolveAttachmentPreviewUrl = (input: {
105
+ projectId: string
106
+ path: string
107
+ configuredBase?: string | null
108
+ }) => {
109
+ const query = new URLSearchParams({
110
+ path: input.path
111
+ })
112
+ const requestPath = `/projects/${encodeProjectIdSegment(input.projectId)}/attachments/file?${query.toString()}`
113
+
114
+ if (shouldUseServerProxy(input.configuredBase)) {
115
+ return `/api/codori${requestPath}`
116
+ }
117
+
118
+ return resolveApiUrl(
119
+ requestPath,
120
+ input.configuredBase
121
+ )
122
+ }
123
+
124
+ export const resolveAttachmentUploadUrl = (input: {
125
+ projectId: string
126
+ configuredBase?: string | null
127
+ }) => {
128
+ const requestPath = `/projects/${encodeProjectIdSegment(input.projectId)}/attachments`
129
+
130
+ if (shouldUseServerProxy(input.configuredBase)) {
131
+ return `/api/codori${requestPath}`
132
+ }
133
+
134
+ return resolveApiUrl(requestPath, input.configuredBase)
135
+ }
@@ -0,0 +1,339 @@
1
+ export const FALLBACK_REASONING_EFFORTS = [
2
+ 'none',
3
+ 'minimal',
4
+ 'low',
5
+ 'medium',
6
+ 'high',
7
+ 'xhigh'
8
+ ] as const
9
+
10
+ export type ReasoningEffort = typeof FALLBACK_REASONING_EFFORTS[number]
11
+
12
+ export type ModelOption = {
13
+ id: string
14
+ model: string
15
+ displayName: string
16
+ hidden: boolean
17
+ isDefault: boolean
18
+ defaultReasoningEffort: ReasoningEffort
19
+ supportedReasoningEfforts: ReasoningEffort[]
20
+ }
21
+
22
+ export type TokenUsageSnapshot = {
23
+ totalInputTokens: number
24
+ totalCachedInputTokens: number
25
+ totalOutputTokens: number
26
+ lastInputTokens: number
27
+ lastCachedInputTokens: number
28
+ lastOutputTokens: number
29
+ modelContextWindow: number | null
30
+ }
31
+
32
+ type ReasoningEffortOptionRecord = {
33
+ reasoningEffort?: unknown
34
+ }
35
+
36
+ type ModelRecord = {
37
+ id?: unknown
38
+ model?: unknown
39
+ displayName?: unknown
40
+ hidden?: unknown
41
+ isDefault?: unknown
42
+ defaultReasoningEffort?: unknown
43
+ supportedReasoningEfforts?: unknown
44
+ }
45
+
46
+ export const FALLBACK_MODELS: ModelOption[] = [
47
+ {
48
+ id: 'gpt-5.4',
49
+ model: 'gpt-5.4',
50
+ displayName: 'GPT-5.4',
51
+ hidden: false,
52
+ isDefault: true,
53
+ defaultReasoningEffort: 'medium',
54
+ supportedReasoningEfforts: [...FALLBACK_REASONING_EFFORTS]
55
+ },
56
+ {
57
+ id: 'gpt-5.4-mini',
58
+ model: 'gpt-5.4-mini',
59
+ displayName: 'GPT-5.4 Mini',
60
+ hidden: false,
61
+ isDefault: false,
62
+ defaultReasoningEffort: 'medium',
63
+ supportedReasoningEfforts: [...FALLBACK_REASONING_EFFORTS]
64
+ },
65
+ {
66
+ id: 'gpt-5.3-codex',
67
+ model: 'gpt-5.3-codex',
68
+ displayName: 'GPT-5.3 Codex',
69
+ hidden: false,
70
+ isDefault: false,
71
+ defaultReasoningEffort: 'medium',
72
+ supportedReasoningEfforts: [...FALLBACK_REASONING_EFFORTS]
73
+ }
74
+ ]
75
+
76
+ const isObjectRecord = (value: unknown): value is Record<string, unknown> =>
77
+ typeof value === 'object' && value !== null && !Array.isArray(value)
78
+
79
+ const isReasoningEffort = (value: unknown): value is ReasoningEffort =>
80
+ typeof value === 'string' && FALLBACK_REASONING_EFFORTS.includes(value as ReasoningEffort)
81
+
82
+ const toFiniteNumber = (value: unknown) => {
83
+ if (typeof value === 'number' && Number.isFinite(value)) {
84
+ return value
85
+ }
86
+
87
+ if (typeof value === 'bigint') {
88
+ return Number(value)
89
+ }
90
+
91
+ if (typeof value === 'string' && value.trim()) {
92
+ const parsed = Number(value)
93
+ if (Number.isFinite(parsed)) {
94
+ return parsed
95
+ }
96
+ }
97
+
98
+ return null
99
+ }
100
+
101
+ const toReasoningEfforts = (value: unknown): ReasoningEffort[] => {
102
+ if (!Array.isArray(value)) {
103
+ return []
104
+ }
105
+
106
+ return value.flatMap((entry) => {
107
+ if (isReasoningEffort(entry)) {
108
+ return [entry]
109
+ }
110
+
111
+ const record = isObjectRecord(entry) ? entry as ReasoningEffortOptionRecord : null
112
+ return isReasoningEffort(record?.reasoningEffort) ? [record.reasoningEffort] : []
113
+ })
114
+ }
115
+
116
+ const normalizeModel = (value: unknown): ModelOption | null => {
117
+ const record = isObjectRecord(value) ? value as ModelRecord : null
118
+ if (!record || typeof record.model !== 'string') {
119
+ return null
120
+ }
121
+
122
+ const supportedReasoningEfforts = toReasoningEfforts(record.supportedReasoningEfforts)
123
+ const defaultReasoningEffort = isReasoningEffort(record.defaultReasoningEffort)
124
+ ? record.defaultReasoningEffort
125
+ : supportedReasoningEfforts[0] ?? 'medium'
126
+
127
+ return {
128
+ id: typeof record.id === 'string' ? record.id : record.model,
129
+ model: record.model,
130
+ displayName: typeof record.displayName === 'string' && record.displayName.trim()
131
+ ? record.displayName.trim()
132
+ : record.model,
133
+ hidden: Boolean(record.hidden),
134
+ isDefault: Boolean(record.isDefault),
135
+ defaultReasoningEffort,
136
+ supportedReasoningEfforts: supportedReasoningEfforts.length > 0
137
+ ? supportedReasoningEfforts
138
+ : [...FALLBACK_REASONING_EFFORTS]
139
+ }
140
+ }
141
+
142
+ export const normalizeModelList = (value: unknown): ModelOption[] => {
143
+ const data = isObjectRecord(value) && Array.isArray(value.data)
144
+ ? value.data
145
+ : Array.isArray(value)
146
+ ? value
147
+ : []
148
+
149
+ const models = data
150
+ .map(normalizeModel)
151
+ .filter((entry): entry is ModelOption => entry !== null)
152
+
153
+ return models.length > 0 ? models : FALLBACK_MODELS
154
+ }
155
+
156
+ export const ensureModelOption = (
157
+ models: ModelOption[],
158
+ model: string | null | undefined,
159
+ effort?: ReasoningEffort | null
160
+ ) => {
161
+ if (!model || models.some(entry => entry.model === model)) {
162
+ return models
163
+ }
164
+
165
+ return [{
166
+ id: model,
167
+ model,
168
+ displayName: model,
169
+ hidden: false,
170
+ isDefault: false,
171
+ defaultReasoningEffort: effort ?? 'medium',
172
+ supportedReasoningEfforts: [...FALLBACK_REASONING_EFFORTS]
173
+ }, ...models]
174
+ }
175
+
176
+ export const visibleModelOptions = (models: ModelOption[]) => {
177
+ const visible = models.filter(model => !model.hidden)
178
+ return visible.length > 0 ? visible : FALLBACK_MODELS
179
+ }
180
+
181
+ export const resolveSelectedModel = (
182
+ models: ModelOption[],
183
+ preferredModel?: string | null
184
+ ) => {
185
+ if (preferredModel && models.some(model => model.model === preferredModel)) {
186
+ return preferredModel
187
+ }
188
+
189
+ const defaultModel = models.find(model => model.isDefault)?.model
190
+ return defaultModel ?? models[0]?.model ?? FALLBACK_MODELS[0]!.model
191
+ }
192
+
193
+ export const resolveEffortOptions = (
194
+ models: ModelOption[],
195
+ model: string | null | undefined
196
+ ) => {
197
+ const selectedModel = models.find(entry => entry.model === model)
198
+ return selectedModel?.supportedReasoningEfforts.length
199
+ ? selectedModel.supportedReasoningEfforts
200
+ : [...FALLBACK_REASONING_EFFORTS]
201
+ }
202
+
203
+ export const resolveSelectedEffort = (
204
+ models: ModelOption[],
205
+ model: string | null | undefined,
206
+ preferredEffort?: ReasoningEffort | null
207
+ ) => {
208
+ const effortOptions = resolveEffortOptions(models, model)
209
+ if (preferredEffort && effortOptions.includes(preferredEffort)) {
210
+ return preferredEffort
211
+ }
212
+
213
+ const selectedModel = models.find(entry => entry.model === model)
214
+ if (selectedModel && effortOptions.includes(selectedModel.defaultReasoningEffort)) {
215
+ return selectedModel.defaultReasoningEffort
216
+ }
217
+
218
+ return effortOptions[0] ?? 'medium'
219
+ }
220
+
221
+ export const coercePromptSelection = (
222
+ models: ModelOption[],
223
+ preferredModel?: string | null,
224
+ preferredEffort?: ReasoningEffort | null
225
+ ) => {
226
+ const nextModel = resolveSelectedModel(models, preferredModel)
227
+ return {
228
+ model: nextModel,
229
+ effort: resolveSelectedEffort(models, nextModel, preferredEffort)
230
+ }
231
+ }
232
+
233
+ export const normalizeConfigDefaults = (value: unknown) => {
234
+ const config = isObjectRecord(value) && isObjectRecord(value.config)
235
+ ? value.config
236
+ : null
237
+
238
+ return {
239
+ model: typeof config?.model === 'string' ? config.model : null,
240
+ effort: isReasoningEffort(config?.model_reasoning_effort) ? config.model_reasoning_effort : null,
241
+ contextWindow: toFiniteNumber(config?.model_context_window)
242
+ }
243
+ }
244
+
245
+ export const normalizeThreadTokenUsage = (value: unknown): TokenUsageSnapshot | null => {
246
+ const params = isObjectRecord(value) ? value : null
247
+ const tokenUsage = isObjectRecord(params?.tokenUsage) ? params.tokenUsage : null
248
+ if (!tokenUsage) {
249
+ return null
250
+ }
251
+
252
+ const total = isObjectRecord(tokenUsage.total) ? tokenUsage.total : {}
253
+ const last = isObjectRecord(tokenUsage.last) ? tokenUsage.last : {}
254
+
255
+ return {
256
+ totalInputTokens: toFiniteNumber(total.inputTokens) ?? 0,
257
+ totalCachedInputTokens: toFiniteNumber(total.cachedInputTokens) ?? 0,
258
+ totalOutputTokens: toFiniteNumber(total.outputTokens) ?? 0,
259
+ lastInputTokens: toFiniteNumber(last.inputTokens) ?? 0,
260
+ lastCachedInputTokens: toFiniteNumber(last.cachedInputTokens) ?? 0,
261
+ lastOutputTokens: toFiniteNumber(last.outputTokens) ?? 0,
262
+ modelContextWindow: toFiniteNumber(tokenUsage.modelContextWindow)
263
+ }
264
+ }
265
+
266
+ export const buildTurnOverrides = (
267
+ model: string | null | undefined,
268
+ effort: ReasoningEffort | null | undefined
269
+ ) => {
270
+ const overrides: {
271
+ model?: string
272
+ effort?: ReasoningEffort
273
+ } = {}
274
+
275
+ if (model) {
276
+ overrides.model = model
277
+ }
278
+
279
+ if (effort) {
280
+ overrides.effort = effort
281
+ }
282
+
283
+ return overrides
284
+ }
285
+
286
+ export const formatReasoningEffortLabel = (value: ReasoningEffort) => {
287
+ switch (value) {
288
+ case 'xhigh':
289
+ return 'Very high'
290
+ case 'none':
291
+ return 'None'
292
+ default:
293
+ return value.charAt(0).toUpperCase() + value.slice(1)
294
+ }
295
+ }
296
+
297
+ export const formatCompactTokenCount = (value: number) => {
298
+ if (value < 1000) {
299
+ return String(value)
300
+ }
301
+
302
+ const short = value / 1000
303
+ const rounded = short >= 10 ? short.toFixed(0) : short.toFixed(1)
304
+ return `${rounded.replace(/\\.0$/, '')}k`
305
+ }
306
+
307
+ export const resolveContextWindowState = (
308
+ tokenUsage: TokenUsageSnapshot | null,
309
+ fallbackContextWindow: number | null,
310
+ usageKnown = true
311
+ ) => {
312
+ const contextWindow = tokenUsage?.modelContextWindow ?? fallbackContextWindow
313
+ const usedTokens = tokenUsage
314
+ ? tokenUsage.totalInputTokens + tokenUsage.totalOutputTokens
315
+ : usageKnown
316
+ ? 0
317
+ : null
318
+
319
+ if (!contextWindow || usedTokens == null) {
320
+ return {
321
+ contextWindow,
322
+ usedTokens,
323
+ remainingTokens: null,
324
+ usedPercent: null,
325
+ remainingPercent: null
326
+ }
327
+ }
328
+
329
+ const cappedUsedTokens = Math.max(0, Math.min(contextWindow, usedTokens))
330
+ const usedPercent = Math.max(0, Math.min(100, (cappedUsedTokens / contextWindow) * 100))
331
+
332
+ return {
333
+ contextWindow,
334
+ usedTokens: cappedUsedTokens,
335
+ remainingTokens: Math.max(0, contextWindow - cappedUsedTokens),
336
+ usedPercent,
337
+ remainingPercent: Math.max(0, 100 - usedPercent)
338
+ }
339
+ }
@@ -100,6 +100,16 @@ export type ChatPart =
100
100
  text: string
101
101
  state?: 'done' | 'streaming'
102
102
  }
103
+ | {
104
+ type: 'attachment'
105
+ attachment: {
106
+ kind: 'image'
107
+ name: string
108
+ mediaType: string
109
+ url?: string | null
110
+ localPath?: string | null
111
+ }
112
+ }
103
113
  | {
104
114
  type: 'reasoning'
105
115
  summary: string[]
@@ -125,27 +135,39 @@ export type ChatMessage = {
125
135
  export const isSubagentActiveStatus = (status: SubagentAgentStatus) =>
126
136
  status === null || status === 'pendingInit' || status === 'running'
127
137
 
128
- const formatUserInput = (input: CodexUserInput) => {
138
+ const streamingState = (pending?: boolean) => pending ? 'streaming' : 'done'
139
+
140
+ const userInputToParts = (input: CodexUserInput): ChatPart[] => {
129
141
  if (input.type === 'text') {
130
- return input.text
142
+ if (!input.text.trim()) {
143
+ return []
144
+ }
145
+
146
+ return [{
147
+ type: 'text',
148
+ text: input.text,
149
+ state: 'done'
150
+ }]
131
151
  }
132
152
 
133
- return `[local image] ${input.path}`
153
+ return [{
154
+ type: 'attachment',
155
+ attachment: {
156
+ kind: 'image',
157
+ name: input.path.split(/[\\/]/).pop() || 'image',
158
+ mediaType: 'image/*',
159
+ localPath: input.path
160
+ }
161
+ }]
134
162
  }
135
163
 
136
- const streamingState = (pending?: boolean) => pending ? 'streaming' : 'done'
137
-
138
164
  export const itemToMessages = (item: CodexThreadItem): ChatMessage[] => {
139
165
  switch (item.type) {
140
166
  case 'userMessage':
141
167
  return [{
142
168
  id: item.id,
143
169
  role: 'user',
144
- parts: [{
145
- type: 'text',
146
- text: item.content.map(formatUserInput).join('\n').trim(),
147
- state: 'done'
148
- }]
170
+ parts: item.content.flatMap(userInputToParts)
149
171
  }]
150
172
  case 'agentMessage':
151
173
  return [{
@@ -4,6 +4,9 @@ type JsonRpcError = {
4
4
  data?: unknown
5
5
  }
6
6
 
7
+ export type { ReasoningEffort } from './chat-prompt-controls'
8
+ import type { ReasoningEffort } from './chat-prompt-controls'
9
+
7
10
  type JsonRpcRequest = {
8
11
  id: number
9
12
  method: string
@@ -163,10 +166,14 @@ export type ThreadListResponse = {
163
166
 
164
167
  export type ThreadStartResponse = {
165
168
  thread: CodexThread
169
+ model?: string | null
170
+ reasoningEffort?: ReasoningEffort | null
166
171
  }
167
172
 
168
173
  export type ThreadResumeResponse = {
169
174
  thread: CodexThread
175
+ model?: string | null
176
+ reasoningEffort?: ReasoningEffort | null
170
177
  }
171
178
 
172
179
  export type ThreadReadResponse = {
@@ -179,6 +186,18 @@ export type TurnStartResponse = {
179
186
  }
180
187
  }
181
188
 
189
+ export type ModelListResponse = {
190
+ data?: unknown[]
191
+ }
192
+
193
+ export type ConfigReadResponse = {
194
+ config?: {
195
+ model?: string | null
196
+ model_context_window?: number | string | null
197
+ model_reasoning_effort?: ReasoningEffort | null
198
+ } | null
199
+ }
200
+
182
201
  export type CodexRpcNotification = {
183
202
  method: string
184
203
  params?: unknown