@kjerneverk/execution-openai 1.0.5-dev.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/.kodrdriv-test-cache.json +6 -0
  2. package/LICENSE +18 -0
  3. package/README.md +80 -0
  4. package/dist/index.d.ts +85 -0
  5. package/dist/index.js +103 -0
  6. package/dist/index.js.map +1 -0
  7. package/output/kodrdriv/.kodrdriv-parallel-context.json +589 -0
  8. package/output/kodrdriv/260110-1751-release-notes.md +39 -0
  9. package/output/kodrdriv/260112-2207-commit-message.md +1 -0
  10. package/output/kodrdriv/260112-2208-release-notes.md +36 -0
  11. package/output/kodrdriv/260112-2341-commit-message.md +1 -0
  12. package/output/kodrdriv/260113-0029-commit-message.md +1 -0
  13. package/output/kodrdriv/260113-0030-release-notes.md +5 -0
  14. package/output/kodrdriv/260114-0656-commit-message.md +6 -0
  15. package/output/kodrdriv/260115-0616-commit-message.md +1 -0
  16. package/output/kodrdriv/260115-0739-commit-message.md +1 -0
  17. package/output/kodrdriv/260126-0852-commit-message.md +1 -0
  18. package/output/kodrdriv/260126-0853-release-notes.md +47 -0
  19. package/output/kodrdriv/260126-0854-release-notes.md +50 -0
  20. package/output/kodrdriv/260128-0929-commit-message.md +1 -0
  21. package/output/kodrdriv/260128-0929-release-notes.md +24 -0
  22. package/output/kodrdriv/260128-0959-commit-message.md +6 -0
  23. package/output/kodrdriv/260128-1127-commit-message.md +1 -0
  24. package/output/kodrdriv/260128-1127-release-notes.md +30 -0
  25. package/output/kodrdriv/260130-1639-commit-message.md +5 -0
  26. package/output/kodrdriv/260130-1652-commit-message.md +1 -0
  27. package/output/kodrdriv/260130-1652-release-notes.md +27 -0
  28. package/output/kodrdriv/RELEASE_NOTES.md +25 -0
  29. package/output/kodrdriv/RELEASE_TITLE.md +1 -0
  30. package/output/kodrdriv/agentic-reflection-commit-2026-01-13T06-07-12-251Z.md +97 -0
  31. package/output/kodrdriv/agentic-reflection-commit-2026-01-13T07-41-08-151Z.md +97 -0
  32. package/output/kodrdriv/agentic-reflection-commit-2026-01-13T08-29-26-449Z.md +97 -0
  33. package/output/kodrdriv/agentic-reflection-commit-2026-01-14T14-56-03-314Z.md +102 -0
  34. package/output/kodrdriv/agentic-reflection-commit-2026-01-15T14-16-42-406Z.md +97 -0
  35. package/output/kodrdriv/agentic-reflection-commit-2026-01-15T15-39-56-545Z.md +114 -0
  36. package/output/kodrdriv/agentic-reflection-commit-2026-01-26T16-52-42-503Z.md +152 -0
  37. package/output/kodrdriv/agentic-reflection-commit-2026-01-28T17-59-30-913Z.md +204 -0
  38. package/output/kodrdriv/agentic-reflection-commit-2026-01-31T00-39-30-443Z.md +245 -0
  39. package/output/kodrdriv/agentic-reflection-commit-2026-01-31T00-52-07-165Z.md +152 -0
  40. package/output/kodrdriv/agentic-reflection-release-2026-01-11T01-51-19-115Z.md +277 -0
  41. package/output/kodrdriv/agentic-reflection-release-2026-01-13T06-08-15-507Z.md +289 -0
  42. package/output/kodrdriv/agentic-reflection-release-2026-01-13T08-30-05-728Z.md +312 -0
  43. package/output/kodrdriv/agentic-reflection-release-2026-01-26T16-53-34-128Z.md +417 -0
  44. package/output/kodrdriv/agentic-reflection-release-2026-01-26T16-53-55-634Z.md +391 -0
  45. package/output/kodrdriv/agentic-reflection-release-2026-01-26T16-54-13-700Z.md +456 -0
  46. package/output/kodrdriv/agentic-reflection-release-2026-01-26T16-54-22-655Z.md +496 -0
  47. package/output/kodrdriv/agentic-reflection-release-2026-01-26T16-54-32-261Z.md +490 -0
  48. package/output/kodrdriv/agentic-reflection-release-2026-01-31T00-52-29-053Z.md +334 -0
  49. package/package.json +58 -0
  50. package/package.json~ +57 -0
@@ -0,0 +1,6 @@
1
+ {
2
+ "/Users/tobrien/gitw/kjerneverk/execution-openai": {
3
+ "lastTestRun": 1768074156142,
4
+ "lastCommitHash": "7948ae0dfcf8860fcbcc9965dff01f047e52a5c2"
5
+ }
6
+ }
package/LICENSE ADDED
@@ -0,0 +1,18 @@
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ Copyright 2025 Tim O'Brien
6
+
7
+ Licensed under the Apache License, Version 2.0 (the "License");
8
+ you may not use this file except in compliance with the License.
9
+ You may obtain a copy of the License at
10
+
11
+ http://www.apache.org/licenses/LICENSE-2.0
12
+
13
+ Unless required by applicable law or agreed to in writing, software
14
+ distributed under the License is distributed on an "AS IS" BASIS,
15
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ See the License for the specific language governing permissions and
17
+ limitations under the License.
18
+
package/README.md ADDED
@@ -0,0 +1,80 @@
1
+ # execution-openai
2
+
3
+ OpenAI provider implementation for LLM execution. Implements the `Provider` interface from the `execution` package.
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ npm install execution-openai openai
9
+ ```
10
+
11
+ ## Usage
12
+
13
+ ```typescript
14
+ import { OpenAIProvider, createOpenAIProvider } from 'execution-openai';
15
+
16
+ // Create provider
17
+ const provider = createOpenAIProvider();
18
+
19
+ // Or use the class directly
20
+ const provider = new OpenAIProvider();
21
+
22
+ // Execute a request
23
+ const response = await provider.execute(
24
+ {
25
+ model: 'gpt-4o',
26
+ messages: [
27
+ { role: 'system', content: 'You are helpful.' },
28
+ { role: 'user', content: 'Hello!' }
29
+ ],
30
+ addMessage: () => {},
31
+ },
32
+ {
33
+ apiKey: process.env.OPENAI_API_KEY,
34
+ temperature: 0.7,
35
+ maxTokens: 1000,
36
+ }
37
+ );
38
+
39
+ console.log(response.content);
40
+ console.log(response.usage); // { inputTokens: X, outputTokens: Y }
41
+ ```
42
+
43
+ ## Supported Models
44
+
45
+ The provider supports all OpenAI models:
46
+ - GPT-4 family (gpt-4, gpt-4o, gpt-4-turbo, etc.)
47
+ - O-series (o1, o1-preview, o1-mini, o3, etc.)
48
+ - GPT-3.5 family
49
+
50
+ ## API Key
51
+
52
+ Set via:
53
+ 1. `options.apiKey` parameter
54
+ 2. `OPENAI_API_KEY` environment variable
55
+
56
+ ## Response Format
57
+
58
+ ```typescript
59
+ interface ProviderResponse {
60
+ content: string;
61
+ model: string;
62
+ usage?: {
63
+ inputTokens: number;
64
+ outputTokens: number;
65
+ };
66
+ toolCalls?: ToolCall[];
67
+ }
68
+ ```
69
+
70
+ ## Related Packages
71
+
72
+ - `execution` - Core interfaces (no SDK dependencies)
73
+ - `execution-anthropic` - Anthropic provider
74
+ - `execution-gemini` - Google Gemini provider
75
+
76
+ ## License
77
+
78
+ Apache-2.0
79
+
80
+ <!-- v1.0.0 -->
@@ -0,0 +1,85 @@
1
+ /**
2
+ * Execution OpenAI Package
3
+ *
4
+ * OpenAI provider implementation for LLM execution.
5
+ *
6
+ * @packageDocumentation
7
+ */
8
+
9
+ /**
10
+ * Create a new OpenAI provider instance
11
+ */
12
+ export declare function createOpenAIProvider(): OpenAIProvider;
13
+
14
+ export declare interface ExecutionOptions {
15
+ apiKey?: string;
16
+ model?: string;
17
+ temperature?: number;
18
+ maxTokens?: number;
19
+ timeout?: number;
20
+ retries?: number;
21
+ }
22
+
23
+ export declare interface Message {
24
+ role: 'user' | 'assistant' | 'system' | 'developer' | 'tool';
25
+ content: string | string[] | null;
26
+ name?: string;
27
+ }
28
+
29
+ export declare type Model = string;
30
+
31
+ /**
32
+ * OpenAI Provider implementation
33
+ */
34
+ declare class OpenAIProvider implements Provider {
35
+ readonly name = "openai";
36
+ /**
37
+ * Check if this provider supports a given model
38
+ */
39
+ supportsModel(model: Model): boolean;
40
+ /**
41
+ * Execute a request against OpenAI
42
+ */
43
+ execute(request: Request_2, options?: ExecutionOptions): Promise<ProviderResponse>;
44
+ }
45
+ export { OpenAIProvider }
46
+ export default OpenAIProvider;
47
+
48
+ export declare interface Provider {
49
+ readonly name: string;
50
+ execute(request: Request_2, options?: ExecutionOptions): Promise<ProviderResponse>;
51
+ supportsModel?(model: Model): boolean;
52
+ }
53
+
54
+ export declare interface ProviderResponse {
55
+ content: string;
56
+ model: string;
57
+ usage?: {
58
+ inputTokens: number;
59
+ outputTokens: number;
60
+ };
61
+ toolCalls?: Array<{
62
+ id: string;
63
+ type: 'function';
64
+ function: {
65
+ name: string;
66
+ arguments: string;
67
+ };
68
+ }>;
69
+ }
70
+
71
+ declare interface Request_2 {
72
+ messages: Message[];
73
+ model: Model;
74
+ responseFormat?: any;
75
+ validator?: any;
76
+ addMessage(message: Message): void;
77
+ }
78
+ export { Request_2 as Request }
79
+
80
+ /**
81
+ * Package version
82
+ */
83
+ export declare const VERSION = "0.0.1";
84
+
85
+ export { }
package/dist/index.js ADDED
@@ -0,0 +1,103 @@
1
+ import OpenAI from "openai";
2
+ import { getRedactor } from "@utilarium/offrecord";
3
+ import { configureErrorSanitizer, configureSecretGuard, createSafeError } from "@utilarium/spotclean";
4
+ const redactor = getRedactor();
5
+ redactor.register({
6
+ name: "openai",
7
+ patterns: [
8
+ /sk-[a-zA-Z0-9]{20,}/g,
9
+ /sk-proj-[a-zA-Z0-9_-]+/g
10
+ ],
11
+ validator: (key) => /^sk-(proj-)?[a-zA-Z0-9_-]{20,}$/.test(key),
12
+ envVar: "OPENAI_API_KEY",
13
+ description: "OpenAI API keys"
14
+ });
15
+ configureErrorSanitizer({
16
+ enabled: true,
17
+ environment: process.env.NODE_ENV === "production" ? "production" : "development",
18
+ includeCorrelationId: true,
19
+ sanitizeStackTraces: process.env.NODE_ENV === "production",
20
+ maxMessageLength: 500
21
+ });
22
+ configureSecretGuard({
23
+ enabled: true,
24
+ redactionText: "[REDACTED]",
25
+ preservePartial: false,
26
+ preserveLength: 0,
27
+ customPatterns: [
28
+ { name: "openai", pattern: /sk-[a-zA-Z0-9]{20,}/g, description: "OpenAI API key" },
29
+ { name: "openai-proj", pattern: /sk-proj-[a-zA-Z0-9_-]+/g, description: "OpenAI project key" }
30
+ ]
31
+ });
32
+ class OpenAIProvider {
33
+ name = "openai";
34
+ /**
35
+ * Check if this provider supports a given model
36
+ */
37
+ supportsModel(model) {
38
+ if (!model) return true;
39
+ return model.startsWith("gpt") || model.startsWith("o1") || model.startsWith("o3") || model.startsWith("o4");
40
+ }
41
+ /**
42
+ * Execute a request against OpenAI
43
+ */
44
+ async execute(request, options = {}) {
45
+ const apiKey = options.apiKey || process.env.OPENAI_API_KEY;
46
+ if (!apiKey) {
47
+ throw new Error("OpenAI API key is required. Set OPENAI_API_KEY environment variable.");
48
+ }
49
+ const validation = redactor.validateKey(apiKey, "openai");
50
+ if (!validation.valid) {
51
+ throw new Error("Invalid OpenAI API key format");
52
+ }
53
+ try {
54
+ const client = new OpenAI({ apiKey });
55
+ const model = options.model || request.model || "gpt-4";
56
+ const messages = request.messages.map((msg) => {
57
+ const role = msg.role === "developer" ? "system" : msg.role;
58
+ return {
59
+ role,
60
+ content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content),
61
+ name: msg.name
62
+ };
63
+ });
64
+ const response = await client.chat.completions.create({
65
+ model,
66
+ messages,
67
+ temperature: options.temperature,
68
+ max_tokens: options.maxTokens,
69
+ response_format: request.responseFormat
70
+ });
71
+ const choice = response.choices[0];
72
+ return {
73
+ content: choice.message.content || "",
74
+ model: response.model,
75
+ usage: response.usage ? {
76
+ inputTokens: response.usage.prompt_tokens,
77
+ outputTokens: response.usage.completion_tokens
78
+ } : void 0,
79
+ toolCalls: choice.message.tool_calls?.filter((tc) => tc.type === "function").map((tc) => ({
80
+ id: tc.id,
81
+ type: "function",
82
+ function: {
83
+ name: tc.function.name,
84
+ arguments: tc.function.arguments
85
+ }
86
+ }))
87
+ };
88
+ } catch (error) {
89
+ throw createSafeError(error, { provider: "openai" });
90
+ }
91
+ }
92
+ }
93
+ function createOpenAIProvider() {
94
+ return new OpenAIProvider();
95
+ }
96
+ const VERSION = "0.0.1";
97
+ export {
98
+ OpenAIProvider,
99
+ VERSION,
100
+ createOpenAIProvider,
101
+ OpenAIProvider as default
102
+ };
103
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.js","sources":["../src/index.ts"],"sourcesContent":["/**\n * Execution OpenAI Package\n *\n * OpenAI provider implementation for LLM execution.\n *\n * @packageDocumentation\n */\n\nimport OpenAI from 'openai';\nimport { getRedactor } from '@utilarium/offrecord';\nimport { \n createSafeError, \n configureErrorSanitizer,\n configureSecretGuard,\n} from '@utilarium/spotclean';\n\n// Register OpenAI API key patterns on module load\nconst redactor = getRedactor();\nredactor.register({\n name: 'openai',\n patterns: [\n /sk-[a-zA-Z0-9]{20,}/g,\n /sk-proj-[a-zA-Z0-9_-]+/g,\n ],\n validator: (key: string) => /^sk-(proj-)?[a-zA-Z0-9_-]{20,}$/.test(key),\n envVar: 'OPENAI_API_KEY',\n description: 'OpenAI API keys',\n});\n\n// Configure spotclean for error sanitization\nconfigureErrorSanitizer({\n enabled: true,\n environment: process.env.NODE_ENV === 'production' ? 'production' : 'development',\n includeCorrelationId: true,\n sanitizeStackTraces: process.env.NODE_ENV === 'production',\n maxMessageLength: 500,\n});\n\nconfigureSecretGuard({\n enabled: true,\n redactionText: '[REDACTED]',\n preservePartial: false,\n preserveLength: 0,\n customPatterns: [\n { name: 'openai', pattern: /sk-[a-zA-Z0-9]{20,}/g, description: 'OpenAI API key' },\n { name: 'openai-proj', pattern: /sk-proj-[a-zA-Z0-9_-]+/g, description: 'OpenAI project key' },\n ],\n});\n\n// ===== INLINE TYPES (from 'execution' package) =====\n// These types are duplicated here for build independence.\n// When 'execution' is published, these can be imported from there.\n\nexport type Model = string;\n\nexport interface Message {\n role: 'user' | 'assistant' | 'system' | 'developer' | 'tool';\n content: string | string[] | null;\n name?: string;\n}\n\nexport interface Request {\n messages: Message[];\n model: Model;\n responseFormat?: any;\n validator?: any;\n addMessage(message: Message): void;\n}\n\nexport interface ProviderResponse {\n content: string;\n model: string;\n usage?: {\n inputTokens: number;\n outputTokens: number;\n };\n toolCalls?: Array<{\n id: string;\n type: 'function';\n function: {\n name: string;\n arguments: string;\n };\n }>;\n}\n\nexport interface ExecutionOptions {\n apiKey?: string;\n model?: string;\n temperature?: number;\n maxTokens?: number;\n timeout?: number;\n retries?: number;\n}\n\nexport interface Provider {\n readonly name: string;\n execute(request: Request, options?: ExecutionOptions): Promise<ProviderResponse>;\n supportsModel?(model: Model): boolean;\n}\n\n/**\n * OpenAI Provider implementation\n */\nexport class OpenAIProvider implements Provider {\n readonly name = 'openai';\n\n /**\n * Check if this provider supports a given model\n */\n supportsModel(model: Model): boolean {\n if (!model) return true; // Default to OpenAI\n return (\n model.startsWith('gpt') ||\n model.startsWith('o1') ||\n model.startsWith('o3') ||\n model.startsWith('o4')\n );\n }\n\n /**\n * Execute a request against OpenAI\n */\n async execute(\n request: Request,\n options: ExecutionOptions = {}\n ): Promise<ProviderResponse> {\n const apiKey = options.apiKey || process.env.OPENAI_API_KEY;\n \n if (!apiKey) {\n throw new Error('OpenAI API key is required. Set OPENAI_API_KEY environment variable.');\n }\n\n // Validate key format\n const validation = redactor.validateKey(apiKey, 'openai');\n if (!validation.valid) {\n throw new Error('Invalid OpenAI API key format');\n }\n\n try {\n const client = new OpenAI({ apiKey });\n\n const model = options.model || request.model || 'gpt-4';\n\n // Convert messages to OpenAI format\n const messages = request.messages.map((msg) => {\n const role =\n msg.role === 'developer' ? 'system' : msg.role;\n\n return {\n role: role,\n content:\n typeof msg.content === 'string'\n ? msg.content\n : JSON.stringify(msg.content),\n name: msg.name,\n } as any;\n });\n\n const response = await client.chat.completions.create({\n model: model,\n messages: messages,\n temperature: options.temperature,\n max_tokens: options.maxTokens,\n response_format: request.responseFormat,\n });\n\n const choice = response.choices[0];\n\n return {\n content: choice.message.content || '',\n model: response.model,\n usage: response.usage\n ? {\n inputTokens: response.usage.prompt_tokens,\n outputTokens: response.usage.completion_tokens,\n }\n : undefined,\n toolCalls: choice.message.tool_calls\n ?.filter((tc) => tc.type === 'function')\n .map((tc) => ({\n id: tc.id,\n type: 'function' as const,\n function: {\n name: (tc as any).function.name,\n arguments: (tc as any).function.arguments,\n },\n })),\n };\n } catch (error) {\n // Sanitize error to remove any API keys from error messages\n // Use spotclean for comprehensive error sanitization\n throw createSafeError(error as Error, { provider: 'openai' });\n }\n }\n}\n\n/**\n * Create a new OpenAI provider instance\n */\nexport function createOpenAIProvider(): OpenAIProvider {\n return new OpenAIProvider();\n}\n\n/**\n * Package version\n */\nexport const VERSION = '0.0.1';\n\nexport default OpenAIProvider;\n"],"names":[],"mappings":";;;AAiBA,MAAM,WAAW,YAAA;AACjB,SAAS,SAAS;AAAA,EACd,MAAM;AAAA,EACN,UAAU;AAAA,IACN;AAAA,IACA;AAAA,EAAA;AAAA,EAEJ,WAAW,CAAC,QAAgB,kCAAkC,KAAK,GAAG;AAAA,EACtE,QAAQ;AAAA,EACR,aAAa;AACjB,CAAC;AAGD,wBAAwB;AAAA,EACpB,SAAS;AAAA,EACT,aAAa,QAAQ,IAAI,aAAa,eAAe,eAAe;AAAA,EACpE,sBAAsB;AAAA,EACtB,qBAAqB,QAAQ,IAAI,aAAa;AAAA,EAC9C,kBAAkB;AACtB,CAAC;AAED,qBAAqB;AAAA,EACjB,SAAS;AAAA,EACT,eAAe;AAAA,EACf,iBAAiB;AAAA,EACjB,gBAAgB;AAAA,EAChB,gBAAgB;AAAA,IACZ,EAAE,MAAM,UAAU,SAAS,wBAAwB,aAAa,iBAAA;AAAA,IAChE,EAAE,MAAM,eAAe,SAAS,2BAA2B,aAAa,qBAAA;AAAA,EAAqB;AAErG,CAAC;AAyDM,MAAM,eAAmC;AAAA,EACnC,OAAO;AAAA;AAAA;AAAA;AAAA,EAKhB,cAAc,OAAuB;AACjC,QAAI,CAAC,MAAO,QAAO;AACnB,WACI,MAAM,WAAW,KAAK,KACtB,MAAM,WAAW,IAAI,KACrB,MAAM,WAAW,IAAI,KACrB,MAAM,WAAW,IAAI;AAAA,EAE7B;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,QACF,SACA,UAA4B,IACH;AACzB,UAAM,SAAS,QAAQ,UAAU,QAAQ,IAAI;AAE7C,QAAI,CAAC,QAAQ;AACT,YAAM,IAAI,MAAM,sEAAsE;AAAA,IAC1F;AAGA,UAAM,aAAa,SAAS,YAAY,QAAQ,QAAQ;AACxD,QAAI,CAAC,WAAW,OAAO;AACnB,YAAM,IAAI,MAAM,+BAA+B;AAAA,IACnD;AAEA,QAAI;AACA,YAAM,SAAS,IAAI,OAAO,EAAE,QAAQ;AAEpC,YAAM,QAAQ,QAAQ,SAAS,QAAQ,SAAS;AAGhD,YAAM,WAAW,QAAQ,SAAS,IAAI,CAAC,QAAQ;AAC3C,cAAM,OACF,IAAI,SAAS,cAAc,WAAW,IAAI;AAE9C,eAAO;AAAA,UACH;AAAA,UACA,SACI,OAAO,IAAI,YAAY,WACjB,IAAI,UACJ,KAAK,UAAU,IAAI,OAAO;AAAA,UACpC,MAAM,IAAI;AAAA,QAAA;AAAA,MAElB,CAAC;AAED,YAAM,WAAW,MAAM,OAAO,KAAK,YAAY,OAAO;AAAA,QAClD;AAAA,QACA;AAAA,QACA,aAAa,QAAQ;AAAA,QACrB,YAAY,QAAQ;AAAA,QACpB,iBAAiB,QAAQ;AAAA,MAAA,CAC5B;AAED,YAAM,SAAS,SAAS,QAAQ,CAAC;AAEjC,aAAO;AAAA,QACH,SAAS,OAAO,QAAQ,WAAW;AAAA,QACnC,OAAO,SAAS;AAAA,QAChB,OAAO,SAAS,QACV;AAAA,UACE,aAAa,SAAS,MAAM;AAAA,UAC5B,cAAc,SAAS,MAAM;AAAA,QAAA,IAE/B;AAAA,QACN,WAAW,OAAO,QAAQ,YACpB,OAAO,CAAC,OAAO,GAAG,SAAS,UAAU,EACtC,IAAI,CAAC,QAAQ;AAAA,UACV,IAAI,GAAG;AAAA,UACP,MAAM;AAAA,UACN,UAAU;AAAA,YACN,MAAO,GAAW,SAAS;AAAA,YAC3B,WAAY,GAAW,SAAS;AAAA,UAAA;AAAA,QACpC,EACF;AAAA,MAAA;AAAA,IAEd,SAAS,OAAO;AAGZ,YAAM,gBAAgB,OAAgB,EAAE,UAAU,UAAU;AAAA,IAChE;AAAA,EACJ;AACJ;AAKO,SAAS,uBAAuC;AACnD,SAAO,IAAI,eAAA;AACf;AAKO,MAAM,UAAU;"}