@fugood/bricks-project 2.24.1-beta.1 → 2.24.1-beta.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -824,6 +824,10 @@ export const templateActionNameMap = {
824
824
  },
825
825
  GENERATOR_OPENAI_LLM: {
826
826
  GENERATOR_OPENAI_LLM_COMPLETION: {
827
+ apiMode: 'GENERATOR_OPENAI_LLM_API_MODE',
828
+ previousResponseId: 'GENERATOR_OPENAI_LLM_PREVIOUS_RESPONSE_ID',
829
+ promptCacheKey: 'GENERATOR_OPENAI_LLM_PROMPT_CACHE_KEY',
830
+ responseOptions: 'GENERATOR_OPENAI_LLM_RESPONSE_OPTIONS',
827
831
  messages: 'GENERATOR_OPENAI_LLM_MESSAGES',
828
832
  maxTokens: 'GENERATOR_OPENAI_LLM_MAX_TOKENS',
829
833
  temperature: 'GENERATOR_OPENAI_LLM_TEMPERATURE',
package/compile/index.ts CHANGED
@@ -543,6 +543,7 @@ export const compile = async (app: Application) => {
543
543
  const timestamp = Date.now()
544
544
  // Pre-index subspace ids so the canvas-item validation below stays O(1).
545
545
  const subspaceIdSet = new Set(app.subspaces.map((s) => s.id))
546
+ const compiledAutomationMap = app.automationMap ? compileAutomation(app.automationMap) : null
546
547
  const config = {
547
548
  title: `${app.name || 'Unknown'}(${timestamp})`,
548
549
  subspace_map: app.subspaces.reduce((subspaceMap, subspace, subspaceIndex) => {
@@ -1262,12 +1263,10 @@ export const compile = async (app: Application) => {
1262
1263
  fonts: app.fonts,
1263
1264
  ...compileApplicationSettings(app.settings),
1264
1265
  // Use typed automationMap if available, otherwise fall back to TEMP metadata
1265
- test_map: app.automationMap
1266
- ? compileAutomation(app.automationMap)['AUTOMATION_MAP_DEFAULT']?.map || {}
1266
+ test_map: compiledAutomationMap
1267
+ ? compiledAutomationMap['AUTOMATION_MAP_DEFAULT']?.map || {}
1267
1268
  : app.metadata?.TEMP_test_map || {},
1268
- automation_map: app.automationMap
1269
- ? compileAutomation(app.automationMap)
1270
- : app.metadata?.TEMP_automation_map || {},
1269
+ automation_map: compiledAutomationMap || app.metadata?.TEMP_automation_map || {},
1271
1270
  update_timestamp: timestamp,
1272
1271
  }
1273
1272
  return config
package/package.json CHANGED
@@ -1,13 +1,13 @@
1
1
  {
2
2
  "name": "@fugood/bricks-project",
3
- "version": "2.24.1-beta.1",
3
+ "version": "2.24.1-beta.3",
4
4
  "main": "index.ts",
5
5
  "scripts": {
6
6
  "typecheck": "tsc --noEmit",
7
7
  "build": "bun scripts/build.js"
8
8
  },
9
9
  "dependencies": {
10
- "@fugood/bricks-cli": "^2.24.1-beta.0",
10
+ "@fugood/bricks-cli": "^2.24.1-beta.3",
11
11
  "@huggingface/gguf": "^0.3.2",
12
12
  "@iarna/toml": "^3.0.0",
13
13
  "@modelcontextprotocol/sdk": "^1.15.0",
package/package.json.bak CHANGED
@@ -1,13 +1,13 @@
1
1
  {
2
2
  "name": "@fugood/bricks-ctor",
3
- "version": "2.24.1-beta.1",
3
+ "version": "2.24.1-beta.3",
4
4
  "main": "index.ts",
5
5
  "scripts": {
6
6
  "typecheck": "tsc --noEmit",
7
7
  "build": "bun scripts/build.js"
8
8
  },
9
9
  "dependencies": {
10
- "@fugood/bricks-cli": "^2.24.1-beta.0",
10
+ "@fugood/bricks-cli": "^2.24.1-beta.3",
11
11
  "@huggingface/gguf": "^0.3.2",
12
12
  "@iarna/toml": "^3.0.0",
13
13
  "@modelcontextprotocol/sdk": "^1.15.0",
@@ -6,8 +6,11 @@
6
6
  * - Compatible with OpenAI API format
7
7
  * - Supports function calling
8
8
  * - Streaming responses
9
+ * - OpenResponses and OpenAI Responses API support
9
10
  * - Custom API endpoints, like
10
11
  * - OpenAI API: https://platform.openai.com/docs/guides/text?api-mode=chat
12
+ * - OpenAI Responses API: https://platform.openai.com/docs/api-reference/responses/create
13
+ * - OpenResponses API: https://www.openresponses.org/reference
11
14
  * - Anthropic API: https://docs.anthropic.com/en/api/openai-sdk
12
15
  * - Gemini API: https://ai.google.dev/gemini-api/docs/openai
13
16
  * - llama.cpp server: https://github.com/ggml-org/llama.cpp/tree/master/tools/server
@@ -29,6 +32,26 @@ import type { TemplateEventPropsMap } from '../../utils/event-props'
29
32
  export type GeneratorOpenAILLMActionCompletion = ActionWithParams & {
30
33
  __actionName: 'GENERATOR_OPENAI_LLM_COMPLETION'
31
34
  params?: Array<
35
+ | {
36
+ input: 'apiMode'
37
+ value?: string | DataLink | EventProperty
38
+ mapping?: string
39
+ }
40
+ | {
41
+ input: 'previousResponseId'
42
+ value?: string | DataLink | EventProperty
43
+ mapping?: string
44
+ }
45
+ | {
46
+ input: 'promptCacheKey'
47
+ value?: string | DataLink | EventProperty
48
+ mapping?: string
49
+ }
50
+ | {
51
+ input: 'responseOptions'
52
+ value?: {} | DataLink | EventProperty
53
+ mapping?: string
54
+ }
32
55
  | {
33
56
  input: 'messages'
34
57
  value?: Array<any> | DataLink | EventProperty
@@ -98,6 +121,7 @@ Default property:
98
121
  {
99
122
  "apiEndpoint": "https://api.openai.com/v1",
100
123
  "model": "gpt-4o",
124
+ "apiMode": "chat_completions",
101
125
  "completionMessages": [
102
126
  {
103
127
  "role": "system",
@@ -117,6 +141,14 @@ Default property:
117
141
  apiKey?: string | DataLink
118
142
  /* Model name (Default: gpt-4o-mini) */
119
143
  model?: string | DataLink
144
+ /* API mode. Keep chat_completions for legacy OpenAI-compatible endpoints, or use responses for OpenResponses/OpenAI Responses endpoints. */
145
+ apiMode?: 'chat_completions' | 'responses' | DataLink
146
+ /* Previous response ID for continuing a Responses/OpenResponses conversation. */
147
+ previousResponseId?: string | DataLink
148
+ /* Prompt cache key for Responses/OpenResponses prompt cache optimization. */
149
+ promptCacheKey?: string | DataLink
150
+ /* Additional Responses/OpenResponses request options merged into the /responses payload, such as instructions, metadata, reasoning, truncation, service_tier, store, and text. */
151
+ responseOptions?: {} | DataLink
120
152
  /* Chat messages */
121
153
  completionMessages?:
122
154
  | Array<
@@ -162,11 +194,11 @@ Default property:
162
194
  completionTemperature?: number | DataLink
163
195
  /* Top P sampling */
164
196
  completionTopP?: number | DataLink
165
- /* Frequency penalty */
197
+ /* Frequency penalty. Only sent in chat_completions mode. */
166
198
  completionFrequencyPenalty?: number | DataLink
167
- /* Presence penalty */
199
+ /* Presence penalty. Only sent in chat_completions mode. */
168
200
  completionPresencePenalty?: number | DataLink
169
- /* Stop sequences */
201
+ /* Stop sequences. Only sent in chat_completions mode. */
170
202
  completionStop?: Array<string | DataLink> | DataLink
171
203
  }
172
204
  events?: {
@@ -184,6 +216,30 @@ Default property:
184
216
  onCompletionFunctionCall?: Array<
185
217
  EventAction<string & keyof TemplateEventPropsMap['OpenaiLlm']['onCompletionFunctionCall']>
186
218
  >
219
+ /* Response created event */
220
+ onResponseCreated?: Array<
221
+ EventAction<string & keyof TemplateEventPropsMap['OpenaiLlm']['onResponseCreated']>
222
+ >
223
+ /* Output item added event */
224
+ onOutputItemAdded?: Array<
225
+ EventAction<string & keyof TemplateEventPropsMap['OpenaiLlm']['onOutputItemAdded']>
226
+ >
227
+ /* Output text delta event */
228
+ onOutputTextDelta?: Array<
229
+ EventAction<string & keyof TemplateEventPropsMap['OpenaiLlm']['onOutputTextDelta']>
230
+ >
231
+ /* Output item done event */
232
+ onOutputItemDone?: Array<
233
+ EventAction<string & keyof TemplateEventPropsMap['OpenaiLlm']['onOutputItemDone']>
234
+ >
235
+ /* Response completed event */
236
+ onResponseCompleted?: Array<
237
+ EventAction<string & keyof TemplateEventPropsMap['OpenaiLlm']['onResponseCompleted']>
238
+ >
239
+ /* Response failed event */
240
+ onResponseFailed?: Array<
241
+ EventAction<string & keyof TemplateEventPropsMap['OpenaiLlm']['onResponseFailed']>
242
+ >
187
243
  }
188
244
  outlets?: {
189
245
  /* Evaluating outlet */
@@ -208,6 +264,10 @@ Default property:
208
264
  }>
209
265
  [key: string]: any
210
266
  }>
267
+ /* Response ID outlet */
268
+ responseId?: () => Data<string>
269
+ /* Output items outlet */
270
+ outputItems?: () => Data<Array<any>>
211
271
  }
212
272
  }
213
273
 
@@ -217,8 +277,11 @@ Default property:
217
277
  - Compatible with OpenAI API format
218
278
  - Supports function calling
219
279
  - Streaming responses
280
+ - OpenResponses and OpenAI Responses API support
220
281
  - Custom API endpoints, like
221
282
  - OpenAI API: https://platform.openai.com/docs/guides/text?api-mode=chat
283
+ - OpenAI Responses API: https://platform.openai.com/docs/api-reference/responses/create
284
+ - OpenResponses API: https://www.openresponses.org/reference
222
285
  - Anthropic API: https://docs.anthropic.com/en/api/openai-sdk
223
286
  - Gemini API: https://ai.google.dev/gemini-api/docs/openai
224
287
  - llama.cpp server: https://github.com/ggml-org/llama.cpp/tree/master/tools/server */
@@ -235,7 +298,12 @@ export type GeneratorOpenAILLM = Generator &
235
298
  | SwitchCondData
236
299
  | {
237
300
  __typename: 'SwitchCondInnerStateOutlet'
238
- outlet: 'isEvaluating' | 'completionResult' | 'completionDetails'
301
+ outlet:
302
+ | 'isEvaluating'
303
+ | 'completionResult'
304
+ | 'completionDetails'
305
+ | 'responseId'
306
+ | 'outputItems'
239
307
  value: any
240
308
  }
241
309
  }>
@@ -762,6 +762,46 @@ export const templateEventPropsMap = {
762
762
  GENERATOR_OPENAI_LLM_COMPLETION_FUNCTION_CALL_NAME: 'string',
763
763
  GENERATOR_OPENAI_LLM_COMPLETION_FUNCTION_ARGUMENTS: 'string',
764
764
  },
765
+ onResponseCreated: {
766
+ GENERATOR_OPENAI_LLM_RESPONSE_ID: 'string',
767
+ GENERATOR_OPENAI_LLM_RESPONSE_STATUS: 'string',
768
+ GENERATOR_OPENAI_LLM_RESPONSE: '{}',
769
+ },
770
+ onOutputItemAdded: {
771
+ GENERATOR_OPENAI_LLM_RESPONSE_ID: 'string',
772
+ GENERATOR_OPENAI_LLM_OUTPUT_ITEM_ID: 'string',
773
+ GENERATOR_OPENAI_LLM_OUTPUT_ITEM_TYPE: 'string',
774
+ GENERATOR_OPENAI_LLM_OUTPUT_ITEM: '{}',
775
+ },
776
+ onOutputTextDelta: {
777
+ GENERATOR_OPENAI_LLM_RESPONSE_ID: 'string',
778
+ GENERATOR_OPENAI_LLM_OUTPUT_ITEM_ID: 'string',
779
+ GENERATOR_OPENAI_LLM_OUTPUT_INDEX: 'number',
780
+ GENERATOR_OPENAI_LLM_CONTENT_INDEX: 'number',
781
+ GENERATOR_OPENAI_LLM_COMPLETION_TOKEN: 'string',
782
+ GENERATOR_OPENAI_LLM_COMPLETION_RESULT: 'string',
783
+ GENERATOR_OPENAI_LLM_COMPLETION_DETAILS: '{}',
784
+ },
785
+ onOutputItemDone: {
786
+ GENERATOR_OPENAI_LLM_RESPONSE_ID: 'string',
787
+ GENERATOR_OPENAI_LLM_OUTPUT_ITEM_ID: 'string',
788
+ GENERATOR_OPENAI_LLM_OUTPUT_ITEM_TYPE: 'string',
789
+ GENERATOR_OPENAI_LLM_OUTPUT_ITEM: '{}',
790
+ },
791
+ onResponseCompleted: {
792
+ GENERATOR_OPENAI_LLM_RESPONSE_ID: 'string',
793
+ GENERATOR_OPENAI_LLM_RESPONSE_STATUS: 'string',
794
+ GENERATOR_OPENAI_LLM_COMPLETION_RESULT: 'string',
795
+ GENERATOR_OPENAI_LLM_TOOL_CALLS: 'Array<any>',
796
+ GENERATOR_OPENAI_LLM_OUTPUT_ITEMS: 'Array<any>',
797
+ GENERATOR_OPENAI_LLM_RESPONSE: '{}',
798
+ },
799
+ onResponseFailed: {
800
+ GENERATOR_OPENAI_LLM_RESPONSE_ID: 'string',
801
+ GENERATOR_OPENAI_LLM_RESPONSE_STATUS: 'string',
802
+ GENERATOR_OPENAI_LLM_ERROR: 'string',
803
+ GENERATOR_OPENAI_LLM_RESPONSE: '{}',
804
+ },
765
805
  },
766
806
  OpenaiTts: {
767
807
  onContextStateChange: { GENERATOR_OPENAI_TTS_CONTEXT_STATE: 'string' },