@almadar/llm 2.3.3 → 2.3.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{chunk-KH4JNOLT.js → chunk-3OVQNNPN.js} +1 -1
- package/dist/{chunk-KH4JNOLT.js.map → chunk-3OVQNNPN.js.map} +1 -1
- package/dist/{chunk-M4RXWVN7.js → chunk-FVMB5NND.js} +1 -1
- package/dist/{chunk-M4RXWVN7.js.map → chunk-FVMB5NND.js.map} +1 -1
- package/dist/{chunk-QOOSH67G.js → chunk-QHJ3T46X.js} +1 -1
- package/dist/{chunk-QOOSH67G.js.map → chunk-QHJ3T46X.js.map} +1 -1
- package/dist/client.d.ts +63 -0
- package/dist/client.js +1 -1
- package/dist/index.d.ts +42 -0
- package/dist/index.js +3 -3
- package/dist/index.js.map +1 -1
- package/dist/providers/index.d.ts +8 -0
- package/dist/providers/index.js +1 -1
- package/dist/structured-output.d.ts +8 -0
- package/dist/structured-output.js +1 -1
- package/package.json +1 -1
- package/src/client.ts +63 -0
- package/src/continuation.ts +32 -0
- package/src/providers/masar.ts +8 -0
- package/src/structured-output.ts +8 -0
- package/src/truncation-detector.ts +10 -0
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/structured-output.ts"],"sourcesContent":["/**\n * Structured Output Client for OpenAI\n *\n * Uses OpenAI's structured outputs feature (json_schema response_format)\n * to guarantee schema compliance at generation time.\n *\n * The system prompt builder is injectable so consumers can provide\n * domain-specific prompts (e.g., orbital schema references).\n *\n * @packageDocumentation\n */\n\nimport OpenAI from 'openai';\nimport type { ChatCompletionCreateParamsNonStreaming } from 'openai/resources/chat/completions';\nimport { z } from 'zod';\nimport {\n RateLimiter,\n getGlobalRateLimiter,\n type RateLimiterOptions,\n} from './rate-limiter.js';\nimport { TokenTracker, getGlobalTokenTracker } from './token-tracker.js';\n\n// ============================================================================\n// Types\n// ============================================================================\n\n/**\n * JSON Schema type used for OpenAI structured outputs.\n */\nexport interface JsonSchema {\n type?: string | string[];\n properties?: Record<string, JsonSchema>;\n required?: string[];\n items?: JsonSchema;\n enum?: unknown[];\n const?: unknown;\n anyOf?: JsonSchema[];\n oneOf?: JsonSchema[];\n allOf?: JsonSchema[];\n $ref?: string;\n $defs?: Record<string, JsonSchema>;\n definitions?: Record<string, JsonSchema>;\n additionalProperties?: boolean | JsonSchema;\n description?: string;\n default?: unknown;\n minItems?: number;\n maxItems?: number;\n minLength?: number;\n}\n\nexport interface StructuredOutputOptions {\n model?: string;\n temperature?: number;\n maxTokens?: number;\n rateLimiter?: RateLimiterOptions;\n useGlobalRateLimiter?: boolean;\n trackTokens?: boolean;\n}\n\nexport interface StructuredGenerationOptions {\n /** User's natural language request */\n userRequest: string;\n /** Model to use (overrides client default) */\n model?: string;\n /** Temperature (overrides client default) */\n temperature?: number;\n /** Maximum tokens (overrides client default) */\n maxTokens?: number;\n /** JSON Schema for structured output */\n jsonSchema?: JsonSchema;\n /** Schema name for the json_schema response format */\n schemaName?: string;\n /** System prompt override */\n systemPrompt?: string;\n /** System prompt builder function (called dynamically) */\n buildSystemPrompt?: () => string;\n /** Additional system prompt instructions */\n additionalInstructions?: string;\n /** Existing context for updates (e.g., existing schema JSON) */\n existingContext?: string;\n /** Skip post-generation validation (default: false) */\n skipValidation?: boolean;\n}\n\nexport interface StructuredGenerationResult<T = unknown> {\n /** Generated data (guaranteed to match JSON Schema structure) */\n data: T;\n /** Raw JSON string from API */\n raw: string;\n /** Token usage statistics */\n usage: {\n promptTokens: number;\n completionTokens: number;\n totalTokens: number;\n };\n /** Generation latency in milliseconds */\n latencyMs: number;\n /** Model used for generation */\n model: string;\n /** Zod validation result (if not skipped) */\n zodValidation?: {\n success: boolean;\n errors?: z.ZodError['errors'];\n };\n}\n\nexport const STRUCTURED_OUTPUT_MODELS = {\n GPT5_MINI: 'gpt-5-mini',\n GPT4O_MINI: 'gpt-4o-mini',\n GPT4O: 'gpt-4o',\n GPT4O_2024_08_06: 'gpt-4o-2024-08-06',\n} as const;\n\n// ============================================================================\n// Default System Prompt\n// ============================================================================\n\nconst DEFAULT_SYSTEM_PROMPT = `You are an expert application architect that generates structured schemas from natural language requirements.\n\nGenerate a complete, well-structured schema based on the user's requirements. Follow the JSON Schema structure exactly.`;\n\n// ============================================================================\n// Structured Output Client\n// ============================================================================\n\nexport class StructuredOutputClient {\n private openai: OpenAI;\n private rateLimiter: RateLimiter;\n private tokenTracker: TokenTracker | null;\n private defaultModel: string;\n private defaultTemperature: number;\n private defaultMaxTokens: number;\n\n constructor(options: StructuredOutputOptions = {}) {\n const apiKey = process.env.OPENAI_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'OPENAI_API_KEY environment variable is required for StructuredOutputClient',\n );\n }\n\n this.openai = new OpenAI({ apiKey });\n this.defaultModel = options.model || STRUCTURED_OUTPUT_MODELS.GPT5_MINI;\n this.defaultTemperature = options.temperature ?? 0.3;\n this.defaultMaxTokens = options.maxTokens ?? 16384;\n\n this.rateLimiter =\n options.useGlobalRateLimiter !== false\n ? getGlobalRateLimiter(options.rateLimiter)\n : new RateLimiter(options.rateLimiter);\n\n this.tokenTracker =\n options.trackTokens !== false\n ? getGlobalTokenTracker(this.defaultModel)\n : null;\n\n console.log(\n `[StructuredOutputClient] Initialized with model: ${this.defaultModel}`,\n );\n }\n\n private usesMaxCompletionTokens(model: string): boolean {\n const m = model.toLowerCase();\n return (\n m.startsWith('o1') ||\n m.startsWith('gpt-5') ||\n m.includes('o1-') ||\n m.includes('o3')\n );\n }\n\n /**\n * Generate structured output with guaranteed JSON Schema compliance.\n */\n async generate<T = unknown>(\n options: StructuredGenerationOptions,\n ): Promise<StructuredGenerationResult<T>> {\n const model = options.model || this.defaultModel;\n const temperature = options.temperature ?? this.defaultTemperature;\n const maxTokens = options.maxTokens ?? this.defaultMaxTokens;\n const startTime = Date.now();\n\n const jsonSchema: JsonSchema = options.jsonSchema || {\n type: 'object',\n properties: {},\n required: [],\n additionalProperties: false,\n };\n\n // Build system prompt\n let systemPrompt: string;\n if (options.systemPrompt) {\n systemPrompt = options.systemPrompt;\n } else if (options.buildSystemPrompt) {\n systemPrompt = options.buildSystemPrompt();\n } else {\n systemPrompt = DEFAULT_SYSTEM_PROMPT;\n }\n\n if (options.additionalInstructions) {\n systemPrompt += `\\n\\n## Additional Instructions\\n${options.additionalInstructions}`;\n }\n\n // Build user prompt\n let userPrompt = options.userRequest;\n if (options.existingContext) {\n userPrompt += `\\n\\n## Existing Context\\nUpdate based on the above request:\\n\\`\\`\\`json\\n${options.existingContext}\\n\\`\\`\\``;\n }\n\n const schemaName = options.schemaName || 'structured_output';\n\n console.log(\n `[StructuredOutputClient] Generating with ${model}...`,\n );\n console.log(\n `[StructuredOutputClient] Request: \"${options.userRequest.slice(0, 80)}...\"`,\n );\n\n const response = await this.rateLimiter.execute(async () => {\n const isReasoningModel = this.usesMaxCompletionTokens(model);\n\n const tokenParam = isReasoningModel\n ? { max_completion_tokens: maxTokens }\n : { max_tokens: maxTokens };\n\n const tempParam = isReasoningModel ? {} : { temperature };\n\n const params: ChatCompletionCreateParamsNonStreaming = {\n model,\n messages: [\n { role: 'system', content: systemPrompt },\n { role: 'user', content: userPrompt },\n ],\n response_format: {\n type: 'json_schema',\n json_schema: {\n name: schemaName,\n strict: true,\n schema: jsonSchema as Record<string, unknown>,\n },\n },\n ...tempParam,\n ...tokenParam,\n };\n\n return this.openai.chat.completions.create(params);\n });\n\n const latencyMs = Date.now() - startTime;\n\n const content = response.choices[0]?.message?.content;\n if (!content) {\n throw new Error('No content in OpenAI response');\n }\n\n let data: T;\n try {\n data = JSON.parse(content) as T;\n } catch (error) {\n throw new Error(`Failed to parse response JSON: ${error}`);\n }\n\n const usage = {\n promptTokens: response.usage?.prompt_tokens || 0,\n completionTokens: response.usage?.completion_tokens || 0,\n totalTokens: response.usage?.total_tokens || 0,\n };\n\n if (this.tokenTracker) {\n this.tokenTracker.addUsage(usage.promptTokens, usage.completionTokens);\n }\n\n console.log(\n `[StructuredOutputClient] Generated in ${latencyMs}ms, ${usage.totalTokens} tokens`,\n );\n\n let zodValidation: StructuredGenerationResult['zodValidation'];\n if (!options.skipValidation) {\n zodValidation = { success: true };\n }\n\n return {\n data,\n raw: content,\n usage,\n latencyMs,\n model,\n zodValidation,\n };\n }\n\n getModel(): string {\n return this.defaultModel;\n }\n\n getRateLimiterStatus() {\n return this.rateLimiter.getStatus();\n }\n\n getTokenUsage() {\n return this.tokenTracker?.getSummary() ?? null;\n }\n}\n\n// ============================================================================\n// Singleton Instance\n// ============================================================================\n\nlet sharedClient: StructuredOutputClient | null = null;\n\nexport function getStructuredOutputClient(\n options?: StructuredOutputOptions,\n): StructuredOutputClient {\n if (!sharedClient) {\n sharedClient = new StructuredOutputClient(options);\n }\n return sharedClient;\n}\n\nexport function resetStructuredOutputClient(): void {\n sharedClient = null;\n}\n\n// ============================================================================\n// Convenience Functions\n// ============================================================================\n\nexport function isStructuredOutputAvailable(): boolean {\n return !!process.env.OPENAI_API_KEY;\n}\n"],"mappings":";;;;;;;AAYA,OAAO,YAAY;AA8FZ,IAAM,2BAA2B;AAAA,EACtC,WAAW;AAAA,EACX,YAAY;AAAA,EACZ,OAAO;AAAA,EACP,kBAAkB;AACpB;AAMA,IAAM,wBAAwB;AAAA;AAAA;AAQvB,IAAM,yBAAN,MAA6B;AAAA,EAQlC,YAAY,UAAmC,CAAC,GAAG;AACjD,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,SAAK,SAAS,IAAI,OAAO,EAAE,OAAO,CAAC;AACnC,SAAK,eAAe,QAAQ,SAAS,yBAAyB;AAC9D,SAAK,qBAAqB,QAAQ,eAAe;AACjD,SAAK,mBAAmB,QAAQ,aAAa;AAE7C,SAAK,cACH,QAAQ,yBAAyB,QAC7B,qBAAqB,QAAQ,WAAW,IACxC,IAAI,YAAY,QAAQ,WAAW;AAEzC,SAAK,eACH,QAAQ,gBAAgB,QACpB,sBAAsB,KAAK,YAAY,IACvC;AAEN,YAAQ;AAAA,MACN,oDAAoD,KAAK,YAAY;AAAA,IACvE;AAAA,EACF;AAAA,EAEQ,wBAAwB,OAAwB;AACtD,UAAM,IAAI,MAAM,YAAY;AAC5B,WACE,EAAE,WAAW,IAAI,KACjB,EAAE,WAAW,OAAO,KACpB,EAAE,SAAS,KAAK,KAChB,EAAE,SAAS,IAAI;AAAA,EAEnB;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,SACJ,SACwC;AACxC,UAAM,QAAQ,QAAQ,SAAS,KAAK;AACpC,UAAM,cAAc,QAAQ,eAAe,KAAK;AAChD,UAAM,YAAY,QAAQ,aAAa,KAAK;AAC5C,UAAM,YAAY,KAAK,IAAI;AAE3B,UAAM,aAAyB,QAAQ,cAAc;AAAA,MACnD,MAAM;AAAA,MACN,YAAY,CAAC;AAAA,MACb,UAAU,CAAC;AAAA,MACX,sBAAsB;AAAA,IACxB;AAGA,QAAI;AACJ,QAAI,QAAQ,cAAc;AACxB,qBAAe,QAAQ;AAAA,IACzB,WAAW,QAAQ,mBAAmB;AACpC,qBAAe,QAAQ,kBAAkB;AAAA,IAC3C,OAAO;AACL,qBAAe;AAAA,IACjB;AAEA,QAAI,QAAQ,wBAAwB;AAClC,sBAAgB;AAAA;AAAA;AAAA,EAAmC,QAAQ,sBAAsB;AAAA,IACnF;AAGA,QAAI,aAAa,QAAQ;AACzB,QAAI,QAAQ,iBAAiB;AAC3B,oBAAc;AAAA;AAAA;AAAA;AAAA;AAAA,EAA4E,QAAQ,eAAe;AAAA;AAAA,IACnH;AAEA,UAAM,aAAa,QAAQ,cAAc;AAEzC,YAAQ;AAAA,MACN,4CAA4C,KAAK;AAAA,IACnD;AACA,YAAQ;AAAA,MACN,sCAAsC,QAAQ,YAAY,MAAM,GAAG,EAAE,CAAC;AAAA,IACxE;AAEA,UAAM,WAAW,MAAM,KAAK,YAAY,QAAQ,YAAY;AAC1D,YAAM,mBAAmB,KAAK,wBAAwB,KAAK;AAE3D,YAAM,aAAa,mBACf,EAAE,uBAAuB,UAAU,IACnC,EAAE,YAAY,UAAU;AAE5B,YAAM,YAAY,mBAAmB,CAAC,IAAI,EAAE,YAAY;AAExD,YAAM,SAAiD;AAAA,QACrD;AAAA,QACA,UAAU;AAAA,UACR,EAAE,MAAM,UAAU,SAAS,aAAa;AAAA,UACxC,EAAE,MAAM,QAAQ,SAAS,WAAW;AAAA,QACtC;AAAA,QACA,iBAAiB;AAAA,UACf,MAAM;AAAA,UACN,aAAa;AAAA,YACX,MAAM;AAAA,YACN,QAAQ;AAAA,YACR,QAAQ;AAAA,UACV;AAAA,QACF;AAAA,QACA,GAAG;AAAA,QACH,GAAG;AAAA,MACL;AAEA,aAAO,KAAK,OAAO,KAAK,YAAY,OAAO,MAAM;AAAA,IACnD,CAAC;AAED,UAAM,YAAY,KAAK,IAAI,IAAI;AAE/B,UAAM,UAAU,SAAS,QAAQ,CAAC,GAAG,SAAS;AAC9C,QAAI,CAAC,SAAS;AACZ,YAAM,IAAI,MAAM,+BAA+B;AAAA,IACjD;AAEA,QAAI;AACJ,QAAI;AACF,aAAO,KAAK,MAAM,OAAO;AAAA,IAC3B,SAAS,OAAO;AACd,YAAM,IAAI,MAAM,kCAAkC,KAAK,EAAE;AAAA,IAC3D;AAEA,UAAM,QAAQ;AAAA,MACZ,cAAc,SAAS,OAAO,iBAAiB;AAAA,MAC/C,kBAAkB,SAAS,OAAO,qBAAqB;AAAA,MACvD,aAAa,SAAS,OAAO,gBAAgB;AAAA,IAC/C;AAEA,QAAI,KAAK,cAAc;AACrB,WAAK,aAAa,SAAS,MAAM,cAAc,MAAM,gBAAgB;AAAA,IACvE;AAEA,YAAQ;AAAA,MACN,yCAAyC,SAAS,OAAO,MAAM,WAAW;AAAA,IAC5E;AAEA,QAAI;AACJ,QAAI,CAAC,QAAQ,gBAAgB;AAC3B,sBAAgB,EAAE,SAAS,KAAK;AAAA,IAClC;AAEA,WAAO;AAAA,MACL;AAAA,MACA,KAAK;AAAA,MACL;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAAA,EAEA,WAAmB;AACjB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,uBAAuB;AACrB,WAAO,KAAK,YAAY,UAAU;AAAA,EACpC;AAAA,EAEA,gBAAgB;AACd,WAAO,KAAK,cAAc,WAAW,KAAK;AAAA,EAC5C;AACF;AAMA,IAAI,eAA8C;AAE3C,SAAS,0BACd,SACwB;AACxB,MAAI,CAAC,cAAc;AACjB,mBAAe,IAAI,uBAAuB,OAAO;AAAA,EACnD;AACA,SAAO;AACT;AAEO,SAAS,8BAAoC;AAClD,iBAAe;AACjB;AAMO,SAAS,8BAAuC;AACrD,SAAO,CAAC,CAAC,QAAQ,IAAI;AACvB;","names":[]}
|
|
1
|
+
{"version":3,"sources":["../src/structured-output.ts"],"sourcesContent":["/**\n * Structured Output Client for OpenAI\n *\n * Uses OpenAI's structured outputs feature (json_schema response_format)\n * to guarantee schema compliance at generation time.\n *\n * The system prompt builder is injectable so consumers can provide\n * domain-specific prompts (e.g., orbital schema references).\n *\n * @packageDocumentation\n */\n\nimport OpenAI from 'openai';\nimport type { ChatCompletionCreateParamsNonStreaming } from 'openai/resources/chat/completions';\nimport { z } from 'zod';\nimport {\n RateLimiter,\n getGlobalRateLimiter,\n type RateLimiterOptions,\n} from './rate-limiter.js';\nimport { TokenTracker, getGlobalTokenTracker } from './token-tracker.js';\n\n// ============================================================================\n// Types\n// ============================================================================\n\n/**\n * JSON Schema type used for OpenAI structured outputs.\n */\nexport interface JsonSchema {\n type?: string | string[];\n properties?: Record<string, JsonSchema>;\n required?: string[];\n items?: JsonSchema;\n enum?: unknown[];\n const?: unknown;\n anyOf?: JsonSchema[];\n oneOf?: JsonSchema[];\n allOf?: JsonSchema[];\n $ref?: string;\n $defs?: Record<string, JsonSchema>;\n definitions?: Record<string, JsonSchema>;\n additionalProperties?: boolean | JsonSchema;\n description?: string;\n default?: unknown;\n minItems?: number;\n maxItems?: number;\n minLength?: number;\n}\n\nexport interface StructuredOutputOptions {\n model?: string;\n temperature?: number;\n maxTokens?: number;\n rateLimiter?: RateLimiterOptions;\n useGlobalRateLimiter?: boolean;\n trackTokens?: boolean;\n}\n\nexport interface StructuredGenerationOptions {\n /** User's natural language request */\n userRequest: string;\n /** Model to use (overrides client default) */\n model?: string;\n /** Temperature (overrides client default) */\n temperature?: number;\n /** Maximum tokens (overrides client default) */\n maxTokens?: number;\n /** JSON Schema for structured output */\n jsonSchema?: JsonSchema;\n /** Schema name for the json_schema response format */\n schemaName?: string;\n /** System prompt override */\n systemPrompt?: string;\n /** System prompt builder function (called dynamically) */\n buildSystemPrompt?: () => string;\n /** Additional system prompt instructions */\n additionalInstructions?: string;\n /** Existing context for updates (e.g., existing schema JSON) */\n existingContext?: string;\n /** Skip post-generation validation (default: false) */\n skipValidation?: boolean;\n}\n\nexport interface StructuredGenerationResult<T = unknown> {\n /** Generated data (guaranteed to match JSON Schema structure) */\n data: T;\n /** Raw JSON string from API */\n raw: string;\n /** Token usage statistics */\n usage: {\n promptTokens: number;\n completionTokens: number;\n totalTokens: number;\n };\n /** Generation latency in milliseconds */\n latencyMs: number;\n /** Model used for generation */\n model: string;\n /** Zod validation result (if not skipped) */\n zodValidation?: {\n success: boolean;\n errors?: z.ZodError['errors'];\n };\n}\n\nexport const STRUCTURED_OUTPUT_MODELS = {\n GPT5_MINI: 'gpt-5-mini',\n GPT4O_MINI: 'gpt-4o-mini',\n GPT4O: 'gpt-4o',\n GPT4O_2024_08_06: 'gpt-4o-2024-08-06',\n} as const;\n\n// ============================================================================\n// Default System Prompt\n// ============================================================================\n\nconst DEFAULT_SYSTEM_PROMPT = `You are an expert application architect that generates structured schemas from natural language requirements.\n\nGenerate a complete, well-structured schema based on the user's requirements. Follow the JSON Schema structure exactly.`;\n\n// ============================================================================\n// Structured Output Client\n// ============================================================================\n\nexport class StructuredOutputClient {\n private openai: OpenAI;\n private rateLimiter: RateLimiter;\n private tokenTracker: TokenTracker | null;\n private defaultModel: string;\n private defaultTemperature: number;\n private defaultMaxTokens: number;\n\n constructor(options: StructuredOutputOptions = {}) {\n const apiKey = process.env.OPENAI_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'OPENAI_API_KEY environment variable is required for StructuredOutputClient',\n );\n }\n\n this.openai = new OpenAI({ apiKey });\n this.defaultModel = options.model || STRUCTURED_OUTPUT_MODELS.GPT5_MINI;\n this.defaultTemperature = options.temperature ?? 0.3;\n this.defaultMaxTokens = options.maxTokens ?? 16384;\n\n this.rateLimiter =\n options.useGlobalRateLimiter !== false\n ? getGlobalRateLimiter(options.rateLimiter)\n : new RateLimiter(options.rateLimiter);\n\n this.tokenTracker =\n options.trackTokens !== false\n ? getGlobalTokenTracker(this.defaultModel)\n : null;\n\n console.log(\n `[StructuredOutputClient] Initialized with model: ${this.defaultModel}`,\n );\n }\n\n private usesMaxCompletionTokens(model: string): boolean {\n const m = model.toLowerCase();\n return (\n m.startsWith('o1') ||\n m.startsWith('gpt-5') ||\n m.includes('o1-') ||\n m.includes('o3')\n );\n }\n\n /**\n * Generate structured output with guaranteed JSON Schema compliance.\n */\n async generate<T = unknown>(\n options: StructuredGenerationOptions,\n ): Promise<StructuredGenerationResult<T>> {\n const model = options.model || this.defaultModel;\n const temperature = options.temperature ?? this.defaultTemperature;\n const maxTokens = options.maxTokens ?? this.defaultMaxTokens;\n const startTime = Date.now();\n\n const jsonSchema: JsonSchema = options.jsonSchema || {\n type: 'object',\n properties: {},\n required: [],\n additionalProperties: false,\n };\n\n // Build system prompt\n let systemPrompt: string;\n if (options.systemPrompt) {\n systemPrompt = options.systemPrompt;\n } else if (options.buildSystemPrompt) {\n systemPrompt = options.buildSystemPrompt();\n } else {\n systemPrompt = DEFAULT_SYSTEM_PROMPT;\n }\n\n if (options.additionalInstructions) {\n systemPrompt += `\\n\\n## Additional Instructions\\n${options.additionalInstructions}`;\n }\n\n // Build user prompt\n let userPrompt = options.userRequest;\n if (options.existingContext) {\n userPrompt += `\\n\\n## Existing Context\\nUpdate based on the above request:\\n\\`\\`\\`json\\n${options.existingContext}\\n\\`\\`\\``;\n }\n\n const schemaName = options.schemaName || 'structured_output';\n\n console.log(\n `[StructuredOutputClient] Generating with ${model}...`,\n );\n console.log(\n `[StructuredOutputClient] Request: \"${options.userRequest.slice(0, 80)}...\"`,\n );\n\n const response = await this.rateLimiter.execute(async () => {\n const isReasoningModel = this.usesMaxCompletionTokens(model);\n\n const tokenParam = isReasoningModel\n ? { max_completion_tokens: maxTokens }\n : { max_tokens: maxTokens };\n\n const tempParam = isReasoningModel ? {} : { temperature };\n\n const params: ChatCompletionCreateParamsNonStreaming = {\n model,\n messages: [\n { role: 'system', content: systemPrompt },\n { role: 'user', content: userPrompt },\n ],\n response_format: {\n type: 'json_schema',\n json_schema: {\n name: schemaName,\n strict: true,\n schema: jsonSchema as Record<string, unknown>,\n },\n },\n ...tempParam,\n ...tokenParam,\n };\n\n return this.openai.chat.completions.create(params);\n });\n\n const latencyMs = Date.now() - startTime;\n\n const content = response.choices[0]?.message?.content;\n if (!content) {\n throw new Error('No content in OpenAI response');\n }\n\n let data: T;\n try {\n data = JSON.parse(content) as T;\n } catch (error) {\n throw new Error(`Failed to parse response JSON: ${error}`);\n }\n\n const usage = {\n promptTokens: response.usage?.prompt_tokens || 0,\n completionTokens: response.usage?.completion_tokens || 0,\n totalTokens: response.usage?.total_tokens || 0,\n };\n\n if (this.tokenTracker) {\n this.tokenTracker.addUsage(usage.promptTokens, usage.completionTokens);\n }\n\n console.log(\n `[StructuredOutputClient] Generated in ${latencyMs}ms, ${usage.totalTokens} tokens`,\n );\n\n let zodValidation: StructuredGenerationResult['zodValidation'];\n if (!options.skipValidation) {\n zodValidation = { success: true };\n }\n\n return {\n data,\n raw: content,\n usage,\n latencyMs,\n model,\n zodValidation,\n };\n }\n\n getModel(): string {\n return this.defaultModel;\n }\n\n getRateLimiterStatus() {\n return this.rateLimiter.getStatus();\n }\n\n getTokenUsage() {\n return this.tokenTracker?.getSummary() ?? null;\n }\n}\n\n// ============================================================================\n// Singleton Instance\n// ============================================================================\n\nlet sharedClient: StructuredOutputClient | null = null;\n\n/**\n * Get the singleton structured output client instance.\n *\n * Creates the instance on first call, returns cached instance thereafter.\n *\n * @param {StructuredOutputOptions} [options] - Client configuration options\n * @returns {StructuredOutputClient} The structured output client instance\n */\nexport function getStructuredOutputClient(\n options?: StructuredOutputOptions,\n): StructuredOutputClient {\n if (!sharedClient) {\n sharedClient = new StructuredOutputClient(options);\n }\n return sharedClient;\n}\n\nexport function resetStructuredOutputClient(): void {\n sharedClient = null;\n}\n\n// ============================================================================\n// Convenience Functions\n// ============================================================================\n\nexport function isStructuredOutputAvailable(): boolean {\n return !!process.env.OPENAI_API_KEY;\n}\n"],"mappings":";;;;;;;AAYA,OAAO,YAAY;AA8FZ,IAAM,2BAA2B;AAAA,EACtC,WAAW;AAAA,EACX,YAAY;AAAA,EACZ,OAAO;AAAA,EACP,kBAAkB;AACpB;AAMA,IAAM,wBAAwB;AAAA;AAAA;AAQvB,IAAM,yBAAN,MAA6B;AAAA,EAQlC,YAAY,UAAmC,CAAC,GAAG;AACjD,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,SAAK,SAAS,IAAI,OAAO,EAAE,OAAO,CAAC;AACnC,SAAK,eAAe,QAAQ,SAAS,yBAAyB;AAC9D,SAAK,qBAAqB,QAAQ,eAAe;AACjD,SAAK,mBAAmB,QAAQ,aAAa;AAE7C,SAAK,cACH,QAAQ,yBAAyB,QAC7B,qBAAqB,QAAQ,WAAW,IACxC,IAAI,YAAY,QAAQ,WAAW;AAEzC,SAAK,eACH,QAAQ,gBAAgB,QACpB,sBAAsB,KAAK,YAAY,IACvC;AAEN,YAAQ;AAAA,MACN,oDAAoD,KAAK,YAAY;AAAA,IACvE;AAAA,EACF;AAAA,EAEQ,wBAAwB,OAAwB;AACtD,UAAM,IAAI,MAAM,YAAY;AAC5B,WACE,EAAE,WAAW,IAAI,KACjB,EAAE,WAAW,OAAO,KACpB,EAAE,SAAS,KAAK,KAChB,EAAE,SAAS,IAAI;AAAA,EAEnB;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,SACJ,SACwC;AACxC,UAAM,QAAQ,QAAQ,SAAS,KAAK;AACpC,UAAM,cAAc,QAAQ,eAAe,KAAK;AAChD,UAAM,YAAY,QAAQ,aAAa,KAAK;AAC5C,UAAM,YAAY,KAAK,IAAI;AAE3B,UAAM,aAAyB,QAAQ,cAAc;AAAA,MACnD,MAAM;AAAA,MACN,YAAY,CAAC;AAAA,MACb,UAAU,CAAC;AAAA,MACX,sBAAsB;AAAA,IACxB;AAGA,QAAI;AACJ,QAAI,QAAQ,cAAc;AACxB,qBAAe,QAAQ;AAAA,IACzB,WAAW,QAAQ,mBAAmB;AACpC,qBAAe,QAAQ,kBAAkB;AAAA,IAC3C,OAAO;AACL,qBAAe;AAAA,IACjB;AAEA,QAAI,QAAQ,wBAAwB;AAClC,sBAAgB;AAAA;AAAA;AAAA,EAAmC,QAAQ,sBAAsB;AAAA,IACnF;AAGA,QAAI,aAAa,QAAQ;AACzB,QAAI,QAAQ,iBAAiB;AAC3B,oBAAc;AAAA;AAAA;AAAA;AAAA;AAAA,EAA4E,QAAQ,eAAe;AAAA;AAAA,IACnH;AAEA,UAAM,aAAa,QAAQ,cAAc;AAEzC,YAAQ;AAAA,MACN,4CAA4C,KAAK;AAAA,IACnD;AACA,YAAQ;AAAA,MACN,sCAAsC,QAAQ,YAAY,MAAM,GAAG,EAAE,CAAC;AAAA,IACxE;AAEA,UAAM,WAAW,MAAM,KAAK,YAAY,QAAQ,YAAY;AAC1D,YAAM,mBAAmB,KAAK,wBAAwB,KAAK;AAE3D,YAAM,aAAa,mBACf,EAAE,uBAAuB,UAAU,IACnC,EAAE,YAAY,UAAU;AAE5B,YAAM,YAAY,mBAAmB,CAAC,IAAI,EAAE,YAAY;AAExD,YAAM,SAAiD;AAAA,QACrD;AAAA,QACA,UAAU;AAAA,UACR,EAAE,MAAM,UAAU,SAAS,aAAa;AAAA,UACxC,EAAE,MAAM,QAAQ,SAAS,WAAW;AAAA,QACtC;AAAA,QACA,iBAAiB;AAAA,UACf,MAAM;AAAA,UACN,aAAa;AAAA,YACX,MAAM;AAAA,YACN,QAAQ;AAAA,YACR,QAAQ;AAAA,UACV;AAAA,QACF;AAAA,QACA,GAAG;AAAA,QACH,GAAG;AAAA,MACL;AAEA,aAAO,KAAK,OAAO,KAAK,YAAY,OAAO,MAAM;AAAA,IACnD,CAAC;AAED,UAAM,YAAY,KAAK,IAAI,IAAI;AAE/B,UAAM,UAAU,SAAS,QAAQ,CAAC,GAAG,SAAS;AAC9C,QAAI,CAAC,SAAS;AACZ,YAAM,IAAI,MAAM,+BAA+B;AAAA,IACjD;AAEA,QAAI;AACJ,QAAI;AACF,aAAO,KAAK,MAAM,OAAO;AAAA,IAC3B,SAAS,OAAO;AACd,YAAM,IAAI,MAAM,kCAAkC,KAAK,EAAE;AAAA,IAC3D;AAEA,UAAM,QAAQ;AAAA,MACZ,cAAc,SAAS,OAAO,iBAAiB;AAAA,MAC/C,kBAAkB,SAAS,OAAO,qBAAqB;AAAA,MACvD,aAAa,SAAS,OAAO,gBAAgB;AAAA,IAC/C;AAEA,QAAI,KAAK,cAAc;AACrB,WAAK,aAAa,SAAS,MAAM,cAAc,MAAM,gBAAgB;AAAA,IACvE;AAEA,YAAQ;AAAA,MACN,yCAAyC,SAAS,OAAO,MAAM,WAAW;AAAA,IAC5E;AAEA,QAAI;AACJ,QAAI,CAAC,QAAQ,gBAAgB;AAC3B,sBAAgB,EAAE,SAAS,KAAK;AAAA,IAClC;AAEA,WAAO;AAAA,MACL;AAAA,MACA,KAAK;AAAA,MACL;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAAA,EAEA,WAAmB;AACjB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,uBAAuB;AACrB,WAAO,KAAK,YAAY,UAAU;AAAA,EACpC;AAAA,EAEA,gBAAgB;AACd,WAAO,KAAK,cAAc,WAAW,KAAK;AAAA,EAC5C;AACF;AAMA,IAAI,eAA8C;AAU3C,SAAS,0BACd,SACwB;AACxB,MAAI,CAAC,cAAc;AACjB,mBAAe,IAAI,uBAAuB,OAAO;AAAA,EACnD;AACA,SAAO;AACT;AAEO,SAAS,8BAAoC;AAClD,iBAAe;AACjB;AAMO,SAAS,8BAAuC;AACrD,SAAO,CAAC,CAAC,QAAQ,IAAI;AACvB;","names":[]}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/client.ts"],"sourcesContent":["/**\n * Shared LLM Client\n *\n * Multi-provider LLM client with:\n * - OpenAI, DeepSeek, Anthropic, and Kimi support\n * - Anthropic prompt caching (CachingChatAnthropic)\n * - Rate limiting and retry logic\n * - Token tracking\n * - Structured output parsing with Zod\n *\n * @packageDocumentation\n */\n\nimport { ChatOpenAI } from '@langchain/openai';\nimport { ChatAnthropic } from '@langchain/anthropic';\nimport type { BaseMessageLike } from '@langchain/core/messages';\nimport Anthropic from '@anthropic-ai/sdk';\nimport { z } from 'zod';\nimport {\n RateLimiter,\n getGlobalRateLimiter,\n type RateLimiterOptions,\n} from './rate-limiter.js';\nimport { TokenTracker, getGlobalTokenTracker } from './token-tracker.js';\nimport { parseJsonResponse } from './json-parser.js';\n\n// ============================================================================\n// Anthropic Cache Control Helper\n// ============================================================================\n\nfunction addCacheControlToSystemMessages(\n messages: Array<{ role: string; content: string }>,\n): BaseMessageLike[] {\n return messages.map((msg) => {\n if (msg.role !== 'system') {\n return msg as BaseMessageLike;\n }\n\n return {\n role: msg.role,\n content: [\n {\n type: 'text' as const,\n text: msg.content,\n cache_control: { type: 'ephemeral' },\n },\n ],\n } as BaseMessageLike;\n });\n}\n\ntype ChatModel = ChatOpenAI | ChatAnthropic;\n\n// ============================================================================\n// Types\n// ============================================================================\n\nexport type LLMProvider = 'openai' | 'deepseek' | 'anthropic' | 'kimi' | 'openrouter' | 'orbgen';\n\nexport interface ProviderConfig {\n apiKey: string;\n baseUrl?: string;\n defaultModel: string;\n}\n\nexport interface LLMClientOptions {\n provider?: LLMProvider;\n model?: string;\n temperature?: number;\n streaming?: boolean;\n rateLimiter?: RateLimiterOptions;\n useGlobalRateLimiter?: boolean;\n trackTokens?: boolean;\n}\n\nexport interface LLMCallOptions<T = unknown> {\n systemPrompt: string;\n userPrompt: string;\n schema?: z.ZodSchema<T>;\n maxRetries?: number;\n retryWithContext?: boolean;\n maxTokens?: number;\n skipSchemaValidation?: boolean;\n temperature?: number;\n}\n\nexport interface CacheableBlock {\n type: 'text';\n text: string;\n cache_control?: { type: 'ephemeral' };\n}\n\nexport interface CacheAwareLLMCallOptions<T = unknown>\n extends LLMCallOptions<T> {\n systemBlocks?: CacheableBlock[];\n userBlocks?: CacheableBlock[];\n rawText?: boolean;\n}\n\nexport interface LLMUsage {\n promptTokens: number;\n completionTokens: number;\n totalTokens: number;\n}\n\nexport type LLMFinishReason =\n | 'stop'\n | 'length'\n | 'content_filter'\n | 'tool_calls'\n | null;\n\nexport interface LLMResponse<T> {\n data: T;\n raw: string;\n finishReason: LLMFinishReason;\n usage: LLMUsage | null;\n}\n\nexport interface LLMStreamOptions {\n systemPrompt: string;\n messages: Array<{ role: 'system' | 'user' | 'assistant'; content: string }>;\n maxTokens?: number;\n temperature?: number;\n}\n\nexport interface LLMStreamChunk {\n content: string;\n done: boolean;\n}\n\n// ============================================================================\n// Provider Configuration\n// ============================================================================\n\nconst PROVIDER_CONFIGS: Record<LLMProvider, () => ProviderConfig> = {\n openai: () => {\n const apiKey = process.env.OPENAI_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'OPENAI_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return { apiKey, baseUrl: undefined, defaultModel: 'gpt-4o' };\n },\n deepseek: () => {\n const apiKey = process.env.DEEPSEEK_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'DEEPSEEK_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return {\n apiKey,\n baseUrl: 'https://api.deepseek.com/v1',\n defaultModel: 'deepseek-chat',\n };\n },\n anthropic: () => {\n const apiKey = process.env.ANTHROPIC_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'ANTHROPIC_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return {\n apiKey,\n baseUrl: undefined,\n defaultModel: 'claude-sonnet-4-5-20250929',\n };\n },\n kimi: () => {\n const apiKey = process.env.KIMI_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'KIMI_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return {\n apiKey,\n baseUrl: 'https://api.moonshot.ai/v1',\n defaultModel: 'kimi-k2.5',\n };\n },\n openrouter: () => {\n const apiKey = process.env.OPEN_ROUTER_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'OPEN_ROUTER_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return {\n apiKey,\n baseUrl: 'https://openrouter.ai/api/v1',\n defaultModel: 'qwen/qwen-2.5-72b-instruct', // Default to Qwen 2.5\n };\n },\n orbgen: () => {\n const baseUrl = process.env.ORBGEN_URL;\n if (!baseUrl) {\n throw new Error(\n 'ORBGEN_URL environment variable is not set. ' +\n 'Set it to the OrbGen Cloud Run URL (e.g., https://orbgen-v2-xxx.run.app)',\n );\n }\n return {\n apiKey: 'not-needed',\n baseUrl: `${baseUrl}/v1`,\n defaultModel: 'orbgen-v2',\n };\n },\n};\n\nexport const DEEPSEEK_MODELS = {\n CHAT: 'deepseek-chat',\n CODER: 'deepseek-coder',\n REASONER: 'deepseek-reasoner',\n} as const;\n\nexport const OPENAI_MODELS = {\n GPT4O: 'gpt-4o',\n GPT4O_MINI: 'gpt-4o-mini',\n GPT4_TURBO: 'gpt-4-turbo',\n GPT35_TURBO: 'gpt-3.5-turbo',\n GPT_5_1: 'gpt-5.1',\n} as const;\n\nexport const ANTHROPIC_MODELS = {\n CLAUDE_SONNET_4_5: 'claude-sonnet-4-5-20250929',\n CLAUDE_SONNET_4: 'claude-sonnet-4-20250514',\n CLAUDE_OPUS_4_5: 'claude-opus-4-5-20250929',\n CLAUDE_3_5_HAIKU: 'claude-3-5-haiku-20241022',\n} as const;\n\nexport const KIMI_MODELS = {\n K2_5: 'kimi-k2.5',\n} as const;\n\nexport const OPENROUTER_MODELS = {\n // Qwen models - JSON/structured data specialists\n QWEN_2_5_72B: 'qwen/qwen-2.5-72b-instruct',\n QWEN_2_5_CODER_32B: 'qwen/qwen-2.5-coder-32b-instruct',\n QWEN_3_235B: 'qwen/qwen3-235b-a22b',\n\n // Gemma models - best small models for structured JSON output\n // Gemma 3 4B: 6/6 on complex decomposition, 100% behavior matching, free, fastest\n GEMMA_3_4B: 'google/gemma-3-4b-it',\n GEMMA_3_12B: 'google/gemma-3-12b-it',\n GEMMA_3_27B: 'google/gemma-3-27b-it',\n\n // Mistral models - strong structured output, function calling\n // Mistral Small 3.1: 6/6 on complex decomposition, picked std-kanban for tasks\n MISTRAL_SMALL_3_1: 'mistralai/mistral-small-3.1-24b-instruct',\n // Mistral Medium 3.1: next tier up from Small, stronger reasoning, tool calling\n MISTRAL_MEDIUM_3_1: 'mistralai/mistral-medium-3.1',\n\n // Llama models - agentic workhorses\n LLAMA_3_3_70B: 'meta-llama/llama-3.3-70b-instruct',\n LLAMA_3_1_405B: 'meta-llama/llama-3.1-405b-instruct',\n LLAMA_4_MAVERICK: 'meta-llama/llama-4-maverick',\n LLAMA_4_SCOUT: 'meta-llama/llama-4-scout',\n\n // Kimi models - strong reasoning\n KIMI_K2: 'moonshotai/kimi-k2',\n\n // Zhipu GLM models - via OpenRouter\n GLM_4_7: 'z-ai/glm-4.7',\n} as const;\n\nconst DEFAULT_TEMPERATURE = 0.3;\n\n// ============================================================================\n// LLM Client\n// ============================================================================\n\nexport class LLMClient {\n private model: ChatModel;\n private rateLimiter: RateLimiter;\n private tokenTracker: TokenTracker | null;\n private modelName: string;\n private provider: LLMProvider;\n private providerConfig: ProviderConfig;\n private temperature: number;\n private streaming: boolean;\n\n constructor(options: LLMClientOptions = {}) {\n this.provider = options.provider || 'openai';\n // Kimi: 0.6 when thinking disabled (our default), 1.0 when thinking enabled\n this.temperature = options.temperature ?? \n (this.provider === 'kimi' ? 0.6 : DEFAULT_TEMPERATURE);\n this.streaming = options.streaming ?? false;\n\n this.providerConfig = PROVIDER_CONFIGS[this.provider]();\n this.modelName = options.model || this.providerConfig.defaultModel;\n\n const keyPreview = this.providerConfig.apiKey.slice(-4);\n console.log(\n `[LLMClient] Provider: ${this.provider}, Model: ${this.modelName}, Key: ****${keyPreview}`,\n );\n if (this.providerConfig.baseUrl) {\n console.log(\n `[LLMClient] Using custom base URL: ${this.providerConfig.baseUrl}`,\n );\n }\n\n this.model = this.createModel();\n\n this.rateLimiter =\n options.useGlobalRateLimiter !== false\n ? getGlobalRateLimiter(options.rateLimiter)\n : new RateLimiter(options.rateLimiter);\n\n this.tokenTracker =\n options.trackTokens !== false\n ? getGlobalTokenTracker(this.modelName)\n : null;\n }\n\n private usesMaxCompletionTokens(): boolean {\n const model = this.modelName.toLowerCase();\n return (\n model.startsWith('o1') ||\n model.startsWith('gpt-5') ||\n model.includes('o1-') ||\n model.includes('o3')\n );\n }\n\n private createModel(options?: {\n maxTokens?: number;\n temperature?: number;\n }): ChatModel {\n const maxTokens = options?.maxTokens;\n const temperature = options?.temperature ?? this.temperature;\n\n if (this.provider === 'anthropic') {\n return new ChatAnthropic({\n apiKey: this.providerConfig.apiKey,\n model: this.modelName,\n temperature,\n streaming: this.streaming,\n maxTokens: maxTokens || 8192,\n callbacks: [\n {\n handleLLMEnd: (output) => {\n const generation = output.generations?.[0]?.[0];\n const usage = (\n generation as unknown as {\n message?: {\n usage_metadata?: {\n cache_creation_input_tokens?: number;\n cache_read_input_tokens?: number;\n input_tokens?: number;\n output_tokens?: number;\n };\n };\n }\n )?.message?.usage_metadata;\n\n if (usage) {\n const cacheCreated = usage.cache_creation_input_tokens ?? 0;\n const cacheRead = usage.cache_read_input_tokens ?? 0;\n const inputTokens = usage.input_tokens ?? 0;\n const outputTokens = usage.output_tokens ?? 0;\n\n if (cacheCreated > 0) {\n console.log(\n `[LLMClient:Anthropic] Cache WRITE: ${cacheCreated} tokens cached`,\n );\n }\n if (cacheRead > 0) {\n const savingsPercent = Math.round(\n (cacheRead / (cacheRead + inputTokens)) * 100,\n );\n console.log(\n `[LLMClient:Anthropic] Cache HIT: ${cacheRead} tokens (~${savingsPercent}% of prompt)`,\n );\n }\n if (cacheCreated === 0 && cacheRead === 0 && inputTokens > 0) {\n if (inputTokens < 500) {\n console.log(\n `[LLMClient:Anthropic] ${inputTokens} input, ${outputTokens} output tokens (likely cached)`,\n );\n } else {\n console.log(\n `[LLMClient:Anthropic] ${inputTokens} input, ${outputTokens} output tokens`,\n );\n }\n }\n }\n },\n },\n ],\n });\n }\n\n const useCompletionTokens = this.usesMaxCompletionTokens();\n\n const tokenConfig = maxTokens\n ? useCompletionTokens\n ? { modelKwargs: { max_completion_tokens: maxTokens } }\n : { maxTokens }\n : {};\n\n const timeout = this.provider === 'deepseek' ? 600000 : undefined;\n\n // Kimi-k2.5: disable thinking to avoid reasoning_content issues with tool calls\n // When thinking is disabled, temperature must be 0.6 (not 1.0)\n const isKimi = this.provider === 'kimi';\n const effectiveTemp = isKimi ? 0.6 : temperature;\n\n // Build modelKwargs incrementally to avoid spread conflicts\n const modelKwargs: Record<string, unknown> = {};\n if (useCompletionTokens && maxTokens) {\n modelKwargs.max_completion_tokens = maxTokens;\n }\n if (isKimi) {\n modelKwargs.thinking = { type: 'disabled' };\n }\n // OpenRouter (Qwen): explicit tool_choice so the model doesn't ignore tool definitions\n if (this.provider === 'openrouter') {\n modelKwargs.tool_choice = 'auto';\n }\n\n return new ChatOpenAI({\n apiKey: this.providerConfig.apiKey,\n model: this.modelName,\n temperature: useCompletionTokens ? undefined : effectiveTemp,\n streaming: this.streaming,\n timeout,\n ...(Object.keys(modelKwargs).length > 0 ? { modelKwargs } : {}),\n ...(useCompletionTokens ? {} : maxTokens ? { maxTokens } : {}),\n configuration: {\n apiKey: this.providerConfig.apiKey,\n ...(this.providerConfig.baseUrl\n ? { baseURL: this.providerConfig.baseUrl }\n : {}),\n },\n });\n }\n\n private getModelWithOptions(options: {\n maxTokens?: number;\n temperature?: number;\n }): ChatModel {\n return this.createModel(options);\n }\n\n /**\n * Check if this model is a Qwen3.5 thinking model.\n * These models burn all output tokens on internal reasoning\n * unless thinking is explicitly disabled via /no_think prefix.\n */\n private isQwenThinkingModel(): boolean {\n return this.modelName.includes('qwen3.5');\n }\n\n /**\n * Prepare user prompt with provider-specific adjustments.\n * Qwen3.5 models require /no_think to disable reasoning mode.\n */\n private prepareUserPrompt(prompt: string): string {\n if (this.isQwenThinkingModel()) {\n return `/no_think\\n${prompt}`;\n }\n return prompt;\n }\n\n getProvider(): LLMProvider {\n return this.provider;\n }\n\n getModelName(): string {\n return this.modelName;\n }\n\n getModel(): ChatModel {\n return this.model;\n }\n\n getRateLimiterStatus() {\n return this.rateLimiter.getStatus();\n }\n\n getTokenUsage() {\n return this.tokenTracker?.getSummary() ?? null;\n }\n\n async call<T>(options: LLMCallOptions<T>): Promise<T> {\n const response = await this.callWithMetadata(options);\n return response.data;\n }\n\n async callWithMetadata<T>(options: LLMCallOptions<T>): Promise<LLMResponse<T>> {\n const {\n systemPrompt,\n userPrompt,\n schema,\n maxRetries = 2,\n retryWithContext = true,\n maxTokens,\n skipSchemaValidation = false,\n temperature,\n } = options;\n\n let currentPrompt = userPrompt;\n let lastError: Error | null = null;\n\n console.log(\n `[LLMClient:call] Starting call to ${this.provider}/${this.modelName}`,\n );\n console.log(`[LLMClient:call] Prompt length: ${userPrompt.length} chars`);\n if (maxTokens) {\n console.log(`[LLMClient:call] Max tokens: ${maxTokens}`);\n }\n\n for (let attempt = 0; attempt <= maxRetries; attempt++) {\n try {\n console.log(\n `[LLMClient:call] Attempt ${attempt + 1}/${maxRetries + 1}...`,\n );\n const attemptStartTime = Date.now();\n\n const result = await this.rateLimiter.execute(async () => {\n console.log(`[LLMClient:call] Invoking model...`);\n const invokeStartTime = Date.now();\n\n const modelToUse =\n maxTokens || temperature !== undefined\n ? this.getModelWithOptions({ maxTokens, temperature })\n : this.model;\n\n const messages = [\n { role: 'system', content: systemPrompt },\n { role: 'user', content: this.prepareUserPrompt(currentPrompt) },\n ];\n const response = await modelToUse.invoke(\n this.provider === 'anthropic'\n ? addCacheControlToSystemMessages(messages)\n : messages,\n );\n\n console.log(\n `[LLMClient:call] Model responded in ${Date.now() - invokeStartTime}ms`,\n );\n\n let usage: LLMUsage | null = null;\n if (response.usage_metadata) {\n const usageMeta = response.usage_metadata as {\n input_tokens?: number;\n output_tokens?: number;\n };\n usage = {\n promptTokens: usageMeta.input_tokens || 0,\n completionTokens: usageMeta.output_tokens || 0,\n totalTokens:\n (usageMeta.input_tokens || 0) +\n (usageMeta.output_tokens || 0),\n };\n console.log(\n `[LLMClient:call] Tokens used: ${usage.promptTokens} in, ${usage.completionTokens} out`,\n );\n\n if (this.tokenTracker) {\n this.tokenTracker.addUsage(\n usage.promptTokens,\n usage.completionTokens,\n );\n }\n }\n\n const finishReason = this.extractFinishReason(response);\n if (finishReason === 'length') {\n console.warn(\n `[LLMClient:call] Response truncated (finish_reason=length)`,\n );\n }\n\n const content =\n typeof response.content === 'string'\n ? response.content\n : JSON.stringify(response.content);\n\n console.log(\n `[LLMClient:call] Response length: ${content.length} chars, finish_reason: ${finishReason}`,\n );\n\n return { content, finishReason, usage };\n });\n\n console.log(\n `[LLMClient:call] Attempt ${attempt + 1} completed in ${Date.now() - attemptStartTime}ms, parsing response...`,\n );\n\n const parsed = skipSchemaValidation\n ? (parseJsonResponse(result.content, undefined) as T)\n : parseJsonResponse(result.content, schema);\n console.log(\n `[LLMClient:call] Response parsed successfully${skipSchemaValidation ? ' (schema validation skipped)' : ''}`,\n );\n\n return {\n data: parsed,\n raw: result.content,\n finishReason: result.finishReason,\n usage: result.usage,\n };\n } catch (error) {\n lastError = error instanceof Error ? error : new Error(String(error));\n console.error(\n `[LLMClient:call] Attempt ${attempt + 1} failed:`,\n lastError.message,\n );\n\n if (this.isRateLimitError(lastError)) {\n console.error(`[LLMClient:call] Rate limit error, not retrying`);\n throw lastError;\n }\n\n if (attempt < maxRetries && retryWithContext) {\n console.log(`[LLMClient:call] Will retry with error context`);\n currentPrompt =\n `${userPrompt}\\n\\n` +\n `[Previous attempt failed with: ${lastError.message}]\\n` +\n `Please output valid JSON that matches the expected schema.`;\n }\n }\n }\n\n console.error(`[LLMClient:call] All attempts exhausted, throwing error`);\n throw lastError;\n }\n\n private extractFinishReason(\n response: Awaited<ReturnType<ChatOpenAI['invoke']>>,\n ): LLMFinishReason {\n const metadata = response.response_metadata as\n | Record<string, unknown>\n | undefined;\n if (metadata?.finish_reason) {\n const reason = metadata.finish_reason as string;\n if (\n reason === 'stop' ||\n reason === 'length' ||\n reason === 'content_filter' ||\n reason === 'tool_calls'\n ) {\n return reason;\n }\n }\n return null;\n }\n\n async callRaw(options: {\n systemPrompt: string;\n userPrompt: string;\n maxTokens?: number;\n }): Promise<string> {\n const response = await this.callRawWithMetadata(options);\n return response.raw;\n }\n\n async callRawWithMetadata(options: {\n systemPrompt: string;\n userPrompt: string;\n maxTokens?: number;\n }): Promise<Omit<LLMResponse<string>, 'data'> & { raw: string }> {\n const { systemPrompt, userPrompt, maxTokens } = options;\n\n return this.rateLimiter.execute(async () => {\n const modelToUse = maxTokens\n ? this.getModelWithOptions({ maxTokens })\n : this.model;\n\n const messages = [\n { role: 'system', content: systemPrompt },\n { role: 'user', content: this.prepareUserPrompt(userPrompt) },\n ];\n const response = await modelToUse.invoke(\n this.provider === 'anthropic'\n ? addCacheControlToSystemMessages(messages)\n : messages,\n );\n\n let usage: LLMUsage | null = null;\n if (response.usage_metadata) {\n const usageMeta = response.usage_metadata as {\n input_tokens?: number;\n output_tokens?: number;\n };\n usage = {\n promptTokens: usageMeta.input_tokens || 0,\n completionTokens: usageMeta.output_tokens || 0,\n totalTokens:\n (usageMeta.input_tokens || 0) + (usageMeta.output_tokens || 0),\n };\n\n if (this.tokenTracker) {\n this.tokenTracker.addUsage(\n usage.promptTokens,\n usage.completionTokens,\n );\n }\n }\n\n const finishReason = this.extractFinishReason(response);\n const content =\n typeof response.content === 'string'\n ? response.content\n : JSON.stringify(response.content);\n\n return { raw: content, finishReason, usage };\n });\n }\n\n /**\n * Stream a raw text response as an async iterator of content chunks.\n * Uses the underlying LangChain model's .stream() method.\n *\n * @param options - System prompt plus full message history\n * @yields LLMStreamChunk with content deltas and a done flag\n */\n async *streamRaw(options: LLMStreamOptions): AsyncGenerator<LLMStreamChunk> {\n const { messages, maxTokens, temperature } = options;\n\n const modelToUse = (maxTokens || temperature !== undefined)\n ? this.getModelWithOptions({ maxTokens, temperature })\n : this.model;\n\n const langchainMessages = this.provider === 'anthropic'\n ? addCacheControlToSystemMessages(messages)\n : messages;\n\n const stream = await modelToUse.stream(langchainMessages);\n\n for await (const chunk of stream) {\n const content = typeof chunk.content === 'string'\n ? chunk.content\n : Array.isArray(chunk.content)\n ? chunk.content\n .filter((c): c is { type: 'text'; text: string } => typeof c === 'object' && c !== null && 'text' in c)\n .map((c) => c.text)\n .join('')\n : '';\n\n if (content) {\n yield { content, done: false };\n }\n }\n\n yield { content: '', done: true };\n }\n\n private isRateLimitError(error: Error): boolean {\n const message = error.message.toLowerCase();\n return (\n message.includes('rate limit') ||\n message.includes('429') ||\n message.includes('quota exceeded')\n );\n }\n\n // ==========================================================================\n // Anthropic Cache Control Support\n // ==========================================================================\n\n async callWithCache<T>(\n options: CacheAwareLLMCallOptions<T>,\n ): Promise<LLMResponse<T>> {\n const {\n systemPrompt,\n userPrompt,\n systemBlocks,\n userBlocks,\n schema,\n maxRetries = 2,\n maxTokens,\n skipSchemaValidation = false,\n temperature,\n rawText = false,\n } = options;\n\n if (this.provider !== 'anthropic') {\n console.log(\n `[LLMClient:callWithCache] Provider ${this.provider} doesn't support caching, using regular call`,\n );\n return this.callWithMetadata(options);\n }\n\n const cacheableCount =\n (systemBlocks || []).filter((b) => b.cache_control).length +\n (userBlocks || []).filter((b) => b.cache_control).length;\n console.log(\n `[LLMClient:callWithCache] ${cacheableCount} cacheable block(s)`,\n );\n\n let lastError: Error | null = null;\n\n for (let attempt = 0; attempt <= maxRetries; attempt++) {\n try {\n console.log(\n `[LLMClient:callWithCache] Attempt ${attempt + 1}/${maxRetries + 1}...`,\n );\n\n const result = await this.rateLimiter.execute(async () => {\n const anthropic = new Anthropic();\n\n const systemContent =\n systemBlocks && systemBlocks.length > 0\n ? systemBlocks.map((b) => ({\n type: 'text' as const,\n text: b.text,\n ...(b.cache_control\n ? { cache_control: b.cache_control }\n : {}),\n }))\n : systemPrompt\n ? [{ type: 'text' as const, text: systemPrompt }]\n : [];\n\n const userContent =\n userBlocks && userBlocks.length > 0\n ? userBlocks.map((b) => ({\n type: 'text' as const,\n text: b.text,\n ...(b.cache_control\n ? { cache_control: b.cache_control }\n : {}),\n }))\n : userPrompt\n ? [{ type: 'text' as const, text: userPrompt }]\n : [];\n\n const response = await anthropic.messages.create({\n model: this.modelName,\n max_tokens: maxTokens || 8192,\n temperature: temperature ?? 0,\n system: systemContent,\n messages: [{ role: 'user', content: userContent }],\n });\n\n const textContent = response.content.find((c) => c.type === 'text');\n const content =\n textContent && 'text' in textContent ? textContent.text : '';\n\n const apiUsage = response.usage as {\n input_tokens: number;\n output_tokens: number;\n cache_creation_input_tokens?: number;\n cache_read_input_tokens?: number;\n };\n\n const cacheRead = apiUsage.cache_read_input_tokens || 0;\n const cacheCreation = apiUsage.cache_creation_input_tokens || 0;\n\n if (cacheCreation > 0) {\n console.log(\n `[LLMClient:callWithCache] Cache WRITE: ${cacheCreation} tokens`,\n );\n }\n if (cacheRead > 0) {\n const savingsPercent = Math.round(\n (cacheRead / (cacheRead + apiUsage.input_tokens)) * 100,\n );\n console.log(\n `[LLMClient:callWithCache] Cache HIT: ${cacheRead} tokens (~${savingsPercent}% of prompt)`,\n );\n }\n if (cacheCreation === 0 && cacheRead === 0) {\n console.log(\n `[LLMClient:callWithCache] No caching: ${apiUsage.input_tokens} input tokens`,\n );\n }\n\n const usage: LLMUsage = {\n promptTokens: apiUsage.input_tokens,\n completionTokens: apiUsage.output_tokens,\n totalTokens: apiUsage.input_tokens + apiUsage.output_tokens,\n };\n\n if (this.tokenTracker) {\n this.tokenTracker.addUsage(\n usage.promptTokens,\n usage.completionTokens,\n );\n }\n\n const finishReason =\n response.stop_reason === 'end_turn'\n ? 'stop'\n : response.stop_reason;\n\n return {\n content,\n finishReason: finishReason as LLMFinishReason,\n usage,\n };\n });\n\n let parsed: T;\n if (rawText) {\n parsed = result.content as unknown as T;\n } else if (skipSchemaValidation) {\n parsed = parseJsonResponse(result.content, undefined) as T;\n } else {\n parsed = parseJsonResponse(result.content, schema);\n }\n\n return {\n data: parsed,\n raw: result.content,\n finishReason: result.finishReason,\n usage: result.usage,\n };\n } catch (error) {\n lastError = error instanceof Error ? error : new Error(String(error));\n console.error(\n `[LLMClient:callWithCache] Attempt ${attempt + 1} failed:`,\n lastError.message,\n );\n\n if (this.isRateLimitError(lastError)) {\n throw lastError;\n }\n }\n }\n\n throw lastError;\n }\n\n static cacheableBlock(text: string, cache = true): CacheableBlock {\n return cache\n ? { type: 'text', text, cache_control: { type: 'ephemeral' } }\n : { type: 'text', text };\n }\n}\n\n// ============================================================================\n// Singleton Instances\n// ============================================================================\n\nconst sharedClients: Partial<Record<LLMProvider, LLMClient>> = {};\n\nexport function getSharedLLMClient(options?: LLMClientOptions): LLMClient {\n const provider = options?.provider || 'openai';\n if (!sharedClients[provider]) {\n sharedClients[provider] = new LLMClient(options);\n }\n return sharedClients[provider]!;\n}\n\nexport function resetSharedLLMClient(provider?: LLMProvider): void {\n if (provider) {\n delete sharedClients[provider];\n } else {\n for (const key of Object.keys(sharedClients) as LLMProvider[]) {\n delete sharedClients[key];\n }\n }\n}\n\n// ============================================================================\n// Provider Detection\n// ============================================================================\n\nexport function getAvailableProvider(): LLMProvider {\n if (process.env.ANTHROPIC_API_KEY) return 'anthropic';\n if (process.env.DEEPSEEK_API_KEY) return 'deepseek';\n if (process.env.KIMI_API_KEY) return 'kimi';\n if (process.env.OPENAI_API_KEY) return 'openai';\n throw new Error(\n 'No LLM API key found. Please set ANTHROPIC_API_KEY, OPENAI_API_KEY, DEEPSEEK_API_KEY, or KIMI_API_KEY.',\n );\n}\n\nexport function isProviderAvailable(provider: LLMProvider): boolean {\n switch (provider) {\n case 'openai':\n return !!process.env.OPENAI_API_KEY;\n case 'deepseek':\n return !!process.env.DEEPSEEK_API_KEY;\n case 'anthropic':\n return !!process.env.ANTHROPIC_API_KEY;\n case 'kimi':\n return !!process.env.KIMI_API_KEY;\n case 'openrouter':\n return !!process.env.OPEN_ROUTER_API_KEY;\n case 'orbgen':\n return !!process.env.ORBGEN_URL;\n default:\n return false;\n }\n}\n\n// ============================================================================\n// Convenience Functions\n// ============================================================================\n\nexport function createRequirementsClient(\n options?: Partial<LLMClientOptions>,\n): LLMClient {\n const provider = options?.provider || getAvailableProvider();\n const defaultModel =\n provider === 'deepseek' ? DEEPSEEK_MODELS.CHAT : OPENAI_MODELS.GPT_5_1;\n return new LLMClient({\n provider,\n model: defaultModel,\n temperature: 0.3,\n ...options,\n });\n}\n\nexport function createCreativeClient(\n options?: Partial<LLMClientOptions>,\n): LLMClient {\n const provider = options?.provider || getAvailableProvider();\n const defaultModel =\n provider === 'deepseek' ? DEEPSEEK_MODELS.REASONER : OPENAI_MODELS.GPT4O;\n return new LLMClient({\n provider,\n model: defaultModel,\n temperature: 0.7,\n ...options,\n });\n}\n\nexport function createFixClient(\n options?: Partial<LLMClientOptions>,\n): LLMClient {\n const provider = options?.provider || getAvailableProvider();\n const defaultModel =\n provider === 'deepseek'\n ? DEEPSEEK_MODELS.CHAT\n : OPENAI_MODELS.GPT4O_MINI;\n return new LLMClient({\n provider,\n model: defaultModel,\n temperature: 0.2,\n ...options,\n });\n}\n\nexport function createDeepSeekClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'deepseek',\n model: DEEPSEEK_MODELS.CHAT,\n ...options,\n });\n}\n\nexport function createOpenAIClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'openai',\n model: OPENAI_MODELS.GPT4O,\n ...options,\n });\n}\n\nexport function createAnthropicClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'anthropic',\n model: ANTHROPIC_MODELS.CLAUDE_SONNET_4_5,\n ...options,\n });\n}\n\nexport function createKimiClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'kimi',\n model: KIMI_MODELS.K2_5,\n ...options,\n });\n}\n\nexport function createOpenRouterClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'openrouter',\n model: OPENROUTER_MODELS.QWEN_2_5_72B,\n ...options,\n });\n}\n\nexport function createZhipuClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'openrouter',\n model: OPENROUTER_MODELS.GLM_4_7,\n ...options,\n });\n}\n"],"mappings":";;;;;;;;;;AAaA,SAAS,kBAAkB;AAC3B,SAAS,qBAAqB;AAE9B,OAAO,eAAe;AActB,SAAS,gCACP,UACmB;AACnB,SAAO,SAAS,IAAI,CAAC,QAAQ;AAC3B,QAAI,IAAI,SAAS,UAAU;AACzB,aAAO;AAAA,IACT;AAEA,WAAO;AAAA,MACL,MAAM,IAAI;AAAA,MACV,SAAS;AAAA,QACP;AAAA,UACE,MAAM;AAAA,UACN,MAAM,IAAI;AAAA,UACV,eAAe,EAAE,MAAM,YAAY;AAAA,QACrC;AAAA,MACF;AAAA,IACF;AAAA,EACF,CAAC;AACH;AAsFA,IAAM,mBAA8D;AAAA,EAClE,QAAQ,MAAM;AACZ,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO,EAAE,QAAQ,SAAS,QAAW,cAAc,SAAS;AAAA,EAC9D;AAAA,EACA,UAAU,MAAM;AACd,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL;AAAA,MACA,SAAS;AAAA,MACT,cAAc;AAAA,IAChB;AAAA,EACF;AAAA,EACA,WAAW,MAAM;AACf,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL;AAAA,MACA,SAAS;AAAA,MACT,cAAc;AAAA,IAChB;AAAA,EACF;AAAA,EACA,MAAM,MAAM;AACV,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL;AAAA,MACA,SAAS;AAAA,MACT,cAAc;AAAA,IAChB;AAAA,EACF;AAAA,EACA,YAAY,MAAM;AAChB,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL;AAAA,MACA,SAAS;AAAA,MACT,cAAc;AAAA;AAAA,IAChB;AAAA,EACF;AAAA,EACA,QAAQ,MAAM;AACZ,UAAM,UAAU,QAAQ,IAAI;AAC5B,QAAI,CAAC,SAAS;AACZ,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL,QAAQ;AAAA,MACR,SAAS,GAAG,OAAO;AAAA,MACnB,cAAc;AAAA,IAChB;AAAA,EACF;AACF;AAEO,IAAM,kBAAkB;AAAA,EAC7B,MAAM;AAAA,EACN,OAAO;AAAA,EACP,UAAU;AACZ;AAEO,IAAM,gBAAgB;AAAA,EAC3B,OAAO;AAAA,EACP,YAAY;AAAA,EACZ,YAAY;AAAA,EACZ,aAAa;AAAA,EACb,SAAS;AACX;AAEO,IAAM,mBAAmB;AAAA,EAC9B,mBAAmB;AAAA,EACnB,iBAAiB;AAAA,EACjB,iBAAiB;AAAA,EACjB,kBAAkB;AACpB;AAEO,IAAM,cAAc;AAAA,EACzB,MAAM;AACR;AAEO,IAAM,oBAAoB;AAAA;AAAA,EAE/B,cAAc;AAAA,EACd,oBAAoB;AAAA,EACpB,aAAa;AAAA;AAAA;AAAA,EAIb,YAAY;AAAA,EACZ,aAAa;AAAA,EACb,aAAa;AAAA;AAAA;AAAA,EAIb,mBAAmB;AAAA;AAAA,EAEnB,oBAAoB;AAAA;AAAA,EAGpB,eAAe;AAAA,EACf,gBAAgB;AAAA,EAChB,kBAAkB;AAAA,EAClB,eAAe;AAAA;AAAA,EAGf,SAAS;AAAA;AAAA,EAGT,SAAS;AACX;AAEA,IAAM,sBAAsB;AAMrB,IAAM,YAAN,MAAgB;AAAA,EAUrB,YAAY,UAA4B,CAAC,GAAG;AAC1C,SAAK,WAAW,QAAQ,YAAY;AAEpC,SAAK,cAAc,QAAQ,gBACxB,KAAK,aAAa,SAAS,MAAM;AACpC,SAAK,YAAY,QAAQ,aAAa;AAEtC,SAAK,iBAAiB,iBAAiB,KAAK,QAAQ,EAAE;AACtD,SAAK,YAAY,QAAQ,SAAS,KAAK,eAAe;AAEtD,UAAM,aAAa,KAAK,eAAe,OAAO,MAAM,EAAE;AACtD,YAAQ;AAAA,MACN,yBAAyB,KAAK,QAAQ,YAAY,KAAK,SAAS,cAAc,UAAU;AAAA,IAC1F;AACA,QAAI,KAAK,eAAe,SAAS;AAC/B,cAAQ;AAAA,QACN,sCAAsC,KAAK,eAAe,OAAO;AAAA,MACnE;AAAA,IACF;AAEA,SAAK,QAAQ,KAAK,YAAY;AAE9B,SAAK,cACH,QAAQ,yBAAyB,QAC7B,qBAAqB,QAAQ,WAAW,IACxC,IAAI,YAAY,QAAQ,WAAW;AAEzC,SAAK,eACH,QAAQ,gBAAgB,QACpB,sBAAsB,KAAK,SAAS,IACpC;AAAA,EACR;AAAA,EAEQ,0BAAmC;AACzC,UAAM,QAAQ,KAAK,UAAU,YAAY;AACzC,WACE,MAAM,WAAW,IAAI,KACrB,MAAM,WAAW,OAAO,KACxB,MAAM,SAAS,KAAK,KACpB,MAAM,SAAS,IAAI;AAAA,EAEvB;AAAA,EAEQ,YAAY,SAGN;AACZ,UAAM,YAAY,SAAS;AAC3B,UAAM,cAAc,SAAS,eAAe,KAAK;AAEjD,QAAI,KAAK,aAAa,aAAa;AACjC,aAAO,IAAI,cAAc;AAAA,QACvB,QAAQ,KAAK,eAAe;AAAA,QAC5B,OAAO,KAAK;AAAA,QACZ;AAAA,QACA,WAAW,KAAK;AAAA,QAChB,WAAW,aAAa;AAAA,QACxB,WAAW;AAAA,UACT;AAAA,YACE,cAAc,CAAC,WAAW;AACxB,oBAAM,aAAa,OAAO,cAAc,CAAC,IAAI,CAAC;AAC9C,oBAAM,QACJ,YAUC,SAAS;AAEZ,kBAAI,OAAO;AACT,sBAAM,eAAe,MAAM,+BAA+B;AAC1D,sBAAM,YAAY,MAAM,2BAA2B;AACnD,sBAAM,cAAc,MAAM,gBAAgB;AAC1C,sBAAM,eAAe,MAAM,iBAAiB;AAE5C,oBAAI,eAAe,GAAG;AACpB,0BAAQ;AAAA,oBACN,sCAAsC,YAAY;AAAA,kBACpD;AAAA,gBACF;AACA,oBAAI,YAAY,GAAG;AACjB,wBAAM,iBAAiB,KAAK;AAAA,oBACzB,aAAa,YAAY,eAAgB;AAAA,kBAC5C;AACA,0BAAQ;AAAA,oBACN,oCAAoC,SAAS,aAAa,cAAc;AAAA,kBAC1E;AAAA,gBACF;AACA,oBAAI,iBAAiB,KAAK,cAAc,KAAK,cAAc,GAAG;AAC5D,sBAAI,cAAc,KAAK;AACrB,4BAAQ;AAAA,sBACN,yBAAyB,WAAW,WAAW,YAAY;AAAA,oBAC7D;AAAA,kBACF,OAAO;AACL,4BAAQ;AAAA,sBACN,yBAAyB,WAAW,WAAW,YAAY;AAAA,oBAC7D;AAAA,kBACF;AAAA,gBACF;AAAA,cACF;AAAA,YACF;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACH;AAEA,UAAM,sBAAsB,KAAK,wBAAwB;AAEzD,UAAM,cAAc,YAChB,sBACE,EAAE,aAAa,EAAE,uBAAuB,UAAU,EAAE,IACpD,EAAE,UAAU,IACd,CAAC;AAEL,UAAM,UAAU,KAAK,aAAa,aAAa,MAAS;AAIxD,UAAM,SAAS,KAAK,aAAa;AACjC,UAAM,gBAAgB,SAAS,MAAM;AAGrC,UAAM,cAAuC,CAAC;AAC9C,QAAI,uBAAuB,WAAW;AACpC,kBAAY,wBAAwB;AAAA,IACtC;AACA,QAAI,QAAQ;AACV,kBAAY,WAAW,EAAE,MAAM,WAAW;AAAA,IAC5C;AAEA,QAAI,KAAK,aAAa,cAAc;AAClC,kBAAY,cAAc;AAAA,IAC5B;AAEA,WAAO,IAAI,WAAW;AAAA,MACpB,QAAQ,KAAK,eAAe;AAAA,MAC5B,OAAO,KAAK;AAAA,MACZ,aAAa,sBAAsB,SAAY;AAAA,MAC/C,WAAW,KAAK;AAAA,MAChB;AAAA,MACA,GAAI,OAAO,KAAK,WAAW,EAAE,SAAS,IAAI,EAAE,YAAY,IAAI,CAAC;AAAA,MAC7D,GAAI,sBAAsB,CAAC,IAAI,YAAY,EAAE,UAAU,IAAI,CAAC;AAAA,MAC5D,eAAe;AAAA,QACb,QAAQ,KAAK,eAAe;AAAA,QAC5B,GAAI,KAAK,eAAe,UACpB,EAAE,SAAS,KAAK,eAAe,QAAQ,IACvC,CAAC;AAAA,MACP;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEQ,oBAAoB,SAGd;AACZ,WAAO,KAAK,YAAY,OAAO;AAAA,EACjC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOQ,sBAA+B;AACrC,WAAO,KAAK,UAAU,SAAS,SAAS;AAAA,EAC1C;AAAA;AAAA;AAAA;AAAA;AAAA,EAMQ,kBAAkB,QAAwB;AAChD,QAAI,KAAK,oBAAoB,GAAG;AAC9B,aAAO;AAAA,EAAc,MAAM;AAAA,IAC7B;AACA,WAAO;AAAA,EACT;AAAA,EAEA,cAA2B;AACzB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,eAAuB;AACrB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,WAAsB;AACpB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,uBAAuB;AACrB,WAAO,KAAK,YAAY,UAAU;AAAA,EACpC;AAAA,EAEA,gBAAgB;AACd,WAAO,KAAK,cAAc,WAAW,KAAK;AAAA,EAC5C;AAAA,EAEA,MAAM,KAAQ,SAAwC;AACpD,UAAM,WAAW,MAAM,KAAK,iBAAiB,OAAO;AACpD,WAAO,SAAS;AAAA,EAClB;AAAA,EAEA,MAAM,iBAAoB,SAAqD;AAC7E,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA,aAAa;AAAA,MACb,mBAAmB;AAAA,MACnB;AAAA,MACA,uBAAuB;AAAA,MACvB;AAAA,IACF,IAAI;AAEJ,QAAI,gBAAgB;AACpB,QAAI,YAA0B;AAE9B,YAAQ;AAAA,MACN,qCAAqC,KAAK,QAAQ,IAAI,KAAK,SAAS;AAAA,IACtE;AACA,YAAQ,IAAI,mCAAmC,WAAW,MAAM,QAAQ;AACxE,QAAI,WAAW;AACb,cAAQ,IAAI,gCAAgC,SAAS,EAAE;AAAA,IACzD;AAEA,aAAS,UAAU,GAAG,WAAW,YAAY,WAAW;AACtD,UAAI;AACF,gBAAQ;AAAA,UACN,4BAA4B,UAAU,CAAC,IAAI,aAAa,CAAC;AAAA,QAC3D;AACA,cAAM,mBAAmB,KAAK,IAAI;AAElC,cAAM,SAAS,MAAM,KAAK,YAAY,QAAQ,YAAY;AACxD,kBAAQ,IAAI,oCAAoC;AAChD,gBAAM,kBAAkB,KAAK,IAAI;AAEjC,gBAAM,aACJ,aAAa,gBAAgB,SACzB,KAAK,oBAAoB,EAAE,WAAW,YAAY,CAAC,IACnD,KAAK;AAEX,gBAAM,WAAW;AAAA,YACf,EAAE,MAAM,UAAU,SAAS,aAAa;AAAA,YACxC,EAAE,MAAM,QAAQ,SAAS,KAAK,kBAAkB,aAAa,EAAE;AAAA,UACjE;AACA,gBAAM,WAAW,MAAM,WAAW;AAAA,YAChC,KAAK,aAAa,cACd,gCAAgC,QAAQ,IACxC;AAAA,UACN;AAEA,kBAAQ;AAAA,YACN,uCAAuC,KAAK,IAAI,IAAI,eAAe;AAAA,UACrE;AAEA,cAAI,QAAyB;AAC7B,cAAI,SAAS,gBAAgB;AAC3B,kBAAM,YAAY,SAAS;AAI3B,oBAAQ;AAAA,cACN,cAAc,UAAU,gBAAgB;AAAA,cACxC,kBAAkB,UAAU,iBAAiB;AAAA,cAC7C,cACG,UAAU,gBAAgB,MAC1B,UAAU,iBAAiB;AAAA,YAChC;AACA,oBAAQ;AAAA,cACN,iCAAiC,MAAM,YAAY,QAAQ,MAAM,gBAAgB;AAAA,YACnF;AAEA,gBAAI,KAAK,cAAc;AACrB,mBAAK,aAAa;AAAA,gBAChB,MAAM;AAAA,gBACN,MAAM;AAAA,cACR;AAAA,YACF;AAAA,UACF;AAEA,gBAAM,eAAe,KAAK,oBAAoB,QAAQ;AACtD,cAAI,iBAAiB,UAAU;AAC7B,oBAAQ;AAAA,cACN;AAAA,YACF;AAAA,UACF;AAEA,gBAAM,UACJ,OAAO,SAAS,YAAY,WACxB,SAAS,UACT,KAAK,UAAU,SAAS,OAAO;AAErC,kBAAQ;AAAA,YACN,qCAAqC,QAAQ,MAAM,0BAA0B,YAAY;AAAA,UAC3F;AAEA,iBAAO,EAAE,SAAS,cAAc,MAAM;AAAA,QACxC,CAAC;AAED,gBAAQ;AAAA,UACN,4BAA4B,UAAU,CAAC,iBAAiB,KAAK,IAAI,IAAI,gBAAgB;AAAA,QACvF;AAEA,cAAM,SAAS,uBACV,kBAAkB,OAAO,SAAS,MAAS,IAC5C,kBAAkB,OAAO,SAAS,MAAM;AAC5C,gBAAQ;AAAA,UACN,gDAAgD,uBAAuB,iCAAiC,EAAE;AAAA,QAC5G;AAEA,eAAO;AAAA,UACL,MAAM;AAAA,UACN,KAAK,OAAO;AAAA,UACZ,cAAc,OAAO;AAAA,UACrB,OAAO,OAAO;AAAA,QAChB;AAAA,MACF,SAAS,OAAO;AACd,oBAAY,iBAAiB,QAAQ,QAAQ,IAAI,MAAM,OAAO,KAAK,CAAC;AACpE,gBAAQ;AAAA,UACN,4BAA4B,UAAU,CAAC;AAAA,UACvC,UAAU;AAAA,QACZ;AAEA,YAAI,KAAK,iBAAiB,SAAS,GAAG;AACpC,kBAAQ,MAAM,iDAAiD;AAC/D,gBAAM;AAAA,QACR;AAEA,YAAI,UAAU,cAAc,kBAAkB;AAC5C,kBAAQ,IAAI,gDAAgD;AAC5D,0BACE,GAAG,UAAU;AAAA;AAAA,iCACqB,UAAU,OAAO;AAAA;AAAA,QAEvD;AAAA,MACF;AAAA,IACF;AAEA,YAAQ,MAAM,yDAAyD;AACvE,UAAM;AAAA,EACR;AAAA,EAEQ,oBACN,UACiB;AACjB,UAAM,WAAW,SAAS;AAG1B,QAAI,UAAU,eAAe;AAC3B,YAAM,SAAS,SAAS;AACxB,UACE,WAAW,UACX,WAAW,YACX,WAAW,oBACX,WAAW,cACX;AACA,eAAO;AAAA,MACT;AAAA,IACF;AACA,WAAO;AAAA,EACT;AAAA,EAEA,MAAM,QAAQ,SAIM;AAClB,UAAM,WAAW,MAAM,KAAK,oBAAoB,OAAO;AACvD,WAAO,SAAS;AAAA,EAClB;AAAA,EAEA,MAAM,oBAAoB,SAIuC;AAC/D,UAAM,EAAE,cAAc,YAAY,UAAU,IAAI;AAEhD,WAAO,KAAK,YAAY,QAAQ,YAAY;AAC1C,YAAM,aAAa,YACf,KAAK,oBAAoB,EAAE,UAAU,CAAC,IACtC,KAAK;AAET,YAAM,WAAW;AAAA,QACf,EAAE,MAAM,UAAU,SAAS,aAAa;AAAA,QACxC,EAAE,MAAM,QAAQ,SAAS,KAAK,kBAAkB,UAAU,EAAE;AAAA,MAC9D;AACA,YAAM,WAAW,MAAM,WAAW;AAAA,QAChC,KAAK,aAAa,cACd,gCAAgC,QAAQ,IACxC;AAAA,MACN;AAEA,UAAI,QAAyB;AAC7B,UAAI,SAAS,gBAAgB;AAC3B,cAAM,YAAY,SAAS;AAI3B,gBAAQ;AAAA,UACN,cAAc,UAAU,gBAAgB;AAAA,UACxC,kBAAkB,UAAU,iBAAiB;AAAA,UAC7C,cACG,UAAU,gBAAgB,MAAM,UAAU,iBAAiB;AAAA,QAChE;AAEA,YAAI,KAAK,cAAc;AACrB,eAAK,aAAa;AAAA,YAChB,MAAM;AAAA,YACN,MAAM;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAEA,YAAM,eAAe,KAAK,oBAAoB,QAAQ;AACtD,YAAM,UACJ,OAAO,SAAS,YAAY,WACxB,SAAS,UACT,KAAK,UAAU,SAAS,OAAO;AAErC,aAAO,EAAE,KAAK,SAAS,cAAc,MAAM;AAAA,IAC7C,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,UAAU,SAA2D;AAC1E,UAAM,EAAE,UAAU,WAAW,YAAY,IAAI;AAE7C,UAAM,aAAc,aAAa,gBAAgB,SAC7C,KAAK,oBAAoB,EAAE,WAAW,YAAY,CAAC,IACnD,KAAK;AAET,UAAM,oBAAoB,KAAK,aAAa,cACxC,gCAAgC,QAAQ,IACxC;AAEJ,UAAM,SAAS,MAAM,WAAW,OAAO,iBAAiB;AAExD,qBAAiB,SAAS,QAAQ;AAChC,YAAM,UAAU,OAAO,MAAM,YAAY,WACrC,MAAM,UACN,MAAM,QAAQ,MAAM,OAAO,IACzB,MAAM,QACH,OAAO,CAAC,MAA2C,OAAO,MAAM,YAAY,MAAM,QAAQ,UAAU,CAAC,EACrG,IAAI,CAAC,MAAM,EAAE,IAAI,EACjB,KAAK,EAAE,IACV;AAEN,UAAI,SAAS;AACX,cAAM,EAAE,SAAS,MAAM,MAAM;AAAA,MAC/B;AAAA,IACF;AAEA,UAAM,EAAE,SAAS,IAAI,MAAM,KAAK;AAAA,EAClC;AAAA,EAEQ,iBAAiB,OAAuB;AAC9C,UAAM,UAAU,MAAM,QAAQ,YAAY;AAC1C,WACE,QAAQ,SAAS,YAAY,KAC7B,QAAQ,SAAS,KAAK,KACtB,QAAQ,SAAS,gBAAgB;AAAA,EAErC;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,cACJ,SACyB;AACzB,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,aAAa;AAAA,MACb;AAAA,MACA,uBAAuB;AAAA,MACvB;AAAA,MACA,UAAU;AAAA,IACZ,IAAI;AAEJ,QAAI,KAAK,aAAa,aAAa;AACjC,cAAQ;AAAA,QACN,sCAAsC,KAAK,QAAQ;AAAA,MACrD;AACA,aAAO,KAAK,iBAAiB,OAAO;AAAA,IACtC;AAEA,UAAM,kBACH,gBAAgB,CAAC,GAAG,OAAO,CAAC,MAAM,EAAE,aAAa,EAAE,UACnD,cAAc,CAAC,GAAG,OAAO,CAAC,MAAM,EAAE,aAAa,EAAE;AACpD,YAAQ;AAAA,MACN,6BAA6B,cAAc;AAAA,IAC7C;AAEA,QAAI,YAA0B;AAE9B,aAAS,UAAU,GAAG,WAAW,YAAY,WAAW;AACtD,UAAI;AACF,gBAAQ;AAAA,UACN,qCAAqC,UAAU,CAAC,IAAI,aAAa,CAAC;AAAA,QACpE;AAEA,cAAM,SAAS,MAAM,KAAK,YAAY,QAAQ,YAAY;AACxD,gBAAM,YAAY,IAAI,UAAU;AAEhC,gBAAM,gBACJ,gBAAgB,aAAa,SAAS,IAClC,aAAa,IAAI,CAAC,OAAO;AAAA,YACvB,MAAM;AAAA,YACN,MAAM,EAAE;AAAA,YACR,GAAI,EAAE,gBACF,EAAE,eAAe,EAAE,cAAc,IACjC,CAAC;AAAA,UACP,EAAE,IACF,eACE,CAAC,EAAE,MAAM,QAAiB,MAAM,aAAa,CAAC,IAC9C,CAAC;AAET,gBAAM,cACJ,cAAc,WAAW,SAAS,IAC9B,WAAW,IAAI,CAAC,OAAO;AAAA,YACrB,MAAM;AAAA,YACN,MAAM,EAAE;AAAA,YACR,GAAI,EAAE,gBACF,EAAE,eAAe,EAAE,cAAc,IACjC,CAAC;AAAA,UACP,EAAE,IACF,aACE,CAAC,EAAE,MAAM,QAAiB,MAAM,WAAW,CAAC,IAC5C,CAAC;AAET,gBAAM,WAAW,MAAM,UAAU,SAAS,OAAO;AAAA,YAC/C,OAAO,KAAK;AAAA,YACZ,YAAY,aAAa;AAAA,YACzB,aAAa,eAAe;AAAA,YAC5B,QAAQ;AAAA,YACR,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,YAAY,CAAC;AAAA,UACnD,CAAC;AAED,gBAAM,cAAc,SAAS,QAAQ,KAAK,CAAC,MAAM,EAAE,SAAS,MAAM;AAClE,gBAAM,UACJ,eAAe,UAAU,cAAc,YAAY,OAAO;AAE5D,gBAAM,WAAW,SAAS;AAO1B,gBAAM,YAAY,SAAS,2BAA2B;AACtD,gBAAM,gBAAgB,SAAS,+BAA+B;AAE9D,cAAI,gBAAgB,GAAG;AACrB,oBAAQ;AAAA,cACN,0CAA0C,aAAa;AAAA,YACzD;AAAA,UACF;AACA,cAAI,YAAY,GAAG;AACjB,kBAAM,iBAAiB,KAAK;AAAA,cACzB,aAAa,YAAY,SAAS,gBAAiB;AAAA,YACtD;AACA,oBAAQ;AAAA,cACN,wCAAwC,SAAS,aAAa,cAAc;AAAA,YAC9E;AAAA,UACF;AACA,cAAI,kBAAkB,KAAK,cAAc,GAAG;AAC1C,oBAAQ;AAAA,cACN,yCAAyC,SAAS,YAAY;AAAA,YAChE;AAAA,UACF;AAEA,gBAAM,QAAkB;AAAA,YACtB,cAAc,SAAS;AAAA,YACvB,kBAAkB,SAAS;AAAA,YAC3B,aAAa,SAAS,eAAe,SAAS;AAAA,UAChD;AAEA,cAAI,KAAK,cAAc;AACrB,iBAAK,aAAa;AAAA,cAChB,MAAM;AAAA,cACN,MAAM;AAAA,YACR;AAAA,UACF;AAEA,gBAAM,eACJ,SAAS,gBAAgB,aACrB,SACA,SAAS;AAEf,iBAAO;AAAA,YACL;AAAA,YACA;AAAA,YACA;AAAA,UACF;AAAA,QACF,CAAC;AAED,YAAI;AACJ,YAAI,SAAS;AACX,mBAAS,OAAO;AAAA,QAClB,WAAW,sBAAsB;AAC/B,mBAAS,kBAAkB,OAAO,SAAS,MAAS;AAAA,QACtD,OAAO;AACL,mBAAS,kBAAkB,OAAO,SAAS,MAAM;AAAA,QACnD;AAEA,eAAO;AAAA,UACL,MAAM;AAAA,UACN,KAAK,OAAO;AAAA,UACZ,cAAc,OAAO;AAAA,UACrB,OAAO,OAAO;AAAA,QAChB;AAAA,MACF,SAAS,OAAO;AACd,oBAAY,iBAAiB,QAAQ,QAAQ,IAAI,MAAM,OAAO,KAAK,CAAC;AACpE,gBAAQ;AAAA,UACN,qCAAqC,UAAU,CAAC;AAAA,UAChD,UAAU;AAAA,QACZ;AAEA,YAAI,KAAK,iBAAiB,SAAS,GAAG;AACpC,gBAAM;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,UAAM;AAAA,EACR;AAAA,EAEA,OAAO,eAAe,MAAc,QAAQ,MAAsB;AAChE,WAAO,QACH,EAAE,MAAM,QAAQ,MAAM,eAAe,EAAE,MAAM,YAAY,EAAE,IAC3D,EAAE,MAAM,QAAQ,KAAK;AAAA,EAC3B;AACF;AAMA,IAAM,gBAAyD,CAAC;AAEzD,SAAS,mBAAmB,SAAuC;AACxE,QAAM,WAAW,SAAS,YAAY;AACtC,MAAI,CAAC,cAAc,QAAQ,GAAG;AAC5B,kBAAc,QAAQ,IAAI,IAAI,UAAU,OAAO;AAAA,EACjD;AACA,SAAO,cAAc,QAAQ;AAC/B;AAEO,SAAS,qBAAqB,UAA8B;AACjE,MAAI,UAAU;AACZ,WAAO,cAAc,QAAQ;AAAA,EAC/B,OAAO;AACL,eAAW,OAAO,OAAO,KAAK,aAAa,GAAoB;AAC7D,aAAO,cAAc,GAAG;AAAA,IAC1B;AAAA,EACF;AACF;AAMO,SAAS,uBAAoC;AAClD,MAAI,QAAQ,IAAI,kBAAmB,QAAO;AAC1C,MAAI,QAAQ,IAAI,iBAAkB,QAAO;AACzC,MAAI,QAAQ,IAAI,aAAc,QAAO;AACrC,MAAI,QAAQ,IAAI,eAAgB,QAAO;AACvC,QAAM,IAAI;AAAA,IACR;AAAA,EACF;AACF;AAEO,SAAS,oBAAoB,UAAgC;AAClE,UAAQ,UAAU;AAAA,IAChB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB;AACE,aAAO;AAAA,EACX;AACF;AAMO,SAAS,yBACd,SACW;AACX,QAAM,WAAW,SAAS,YAAY,qBAAqB;AAC3D,QAAM,eACJ,aAAa,aAAa,gBAAgB,OAAO,cAAc;AACjE,SAAO,IAAI,UAAU;AAAA,IACnB;AAAA,IACA,OAAO;AAAA,IACP,aAAa;AAAA,IACb,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,qBACd,SACW;AACX,QAAM,WAAW,SAAS,YAAY,qBAAqB;AAC3D,QAAM,eACJ,aAAa,aAAa,gBAAgB,WAAW,cAAc;AACrE,SAAO,IAAI,UAAU;AAAA,IACnB;AAAA,IACA,OAAO;AAAA,IACP,aAAa;AAAA,IACb,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,gBACd,SACW;AACX,QAAM,WAAW,SAAS,YAAY,qBAAqB;AAC3D,QAAM,eACJ,aAAa,aACT,gBAAgB,OAChB,cAAc;AACpB,SAAO,IAAI,UAAU;AAAA,IACnB;AAAA,IACA,OAAO;AAAA,IACP,aAAa;AAAA,IACb,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,qBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,gBAAgB;AAAA,IACvB,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,mBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,cAAc;AAAA,IACrB,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,sBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,iBAAiB;AAAA,IACxB,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,iBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,YAAY;AAAA,IACnB,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,uBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,kBAAkB;AAAA,IACzB,GAAG;AAAA,EACL,CAAC;AACH;AAEO,SAAS,kBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,kBAAkB;AAAA,IACzB,GAAG;AAAA,EACL,CAAC;AACH;","names":[]}
|
|
1
|
+
{"version":3,"sources":["../src/client.ts"],"sourcesContent":["/**\n * Shared LLM Client\n *\n * Multi-provider LLM client with:\n * - OpenAI, DeepSeek, Anthropic, and Kimi support\n * - Anthropic prompt caching (CachingChatAnthropic)\n * - Rate limiting and retry logic\n * - Token tracking\n * - Structured output parsing with Zod\n *\n * @packageDocumentation\n */\n\nimport { ChatOpenAI } from '@langchain/openai';\nimport { ChatAnthropic } from '@langchain/anthropic';\nimport type { BaseMessageLike } from '@langchain/core/messages';\nimport Anthropic from '@anthropic-ai/sdk';\nimport { z } from 'zod';\nimport {\n RateLimiter,\n getGlobalRateLimiter,\n type RateLimiterOptions,\n} from './rate-limiter.js';\nimport { TokenTracker, getGlobalTokenTracker } from './token-tracker.js';\nimport { parseJsonResponse } from './json-parser.js';\n\n// ============================================================================\n// Anthropic Cache Control Helper\n// ============================================================================\n\nfunction addCacheControlToSystemMessages(\n messages: Array<{ role: string; content: string }>,\n): BaseMessageLike[] {\n return messages.map((msg) => {\n if (msg.role !== 'system') {\n return msg as BaseMessageLike;\n }\n\n return {\n role: msg.role,\n content: [\n {\n type: 'text' as const,\n text: msg.content,\n cache_control: { type: 'ephemeral' },\n },\n ],\n } as BaseMessageLike;\n });\n}\n\ntype ChatModel = ChatOpenAI | ChatAnthropic;\n\n// ============================================================================\n// Types\n// ============================================================================\n\nexport type LLMProvider = 'openai' | 'deepseek' | 'anthropic' | 'kimi' | 'openrouter' | 'orbgen';\n\nexport interface ProviderConfig {\n apiKey: string;\n baseUrl?: string;\n defaultModel: string;\n}\n\nexport interface LLMClientOptions {\n provider?: LLMProvider;\n model?: string;\n temperature?: number;\n streaming?: boolean;\n rateLimiter?: RateLimiterOptions;\n useGlobalRateLimiter?: boolean;\n trackTokens?: boolean;\n}\n\nexport interface LLMCallOptions<T = unknown> {\n systemPrompt: string;\n userPrompt: string;\n schema?: z.ZodSchema<T>;\n maxRetries?: number;\n retryWithContext?: boolean;\n maxTokens?: number;\n skipSchemaValidation?: boolean;\n temperature?: number;\n}\n\nexport interface CacheableBlock {\n type: 'text';\n text: string;\n cache_control?: { type: 'ephemeral' };\n}\n\nexport interface CacheAwareLLMCallOptions<T = unknown>\n extends LLMCallOptions<T> {\n systemBlocks?: CacheableBlock[];\n userBlocks?: CacheableBlock[];\n rawText?: boolean;\n}\n\nexport interface LLMUsage {\n promptTokens: number;\n completionTokens: number;\n totalTokens: number;\n}\n\nexport type LLMFinishReason =\n | 'stop'\n | 'length'\n | 'content_filter'\n | 'tool_calls'\n | null;\n\nexport interface LLMResponse<T> {\n data: T;\n raw: string;\n finishReason: LLMFinishReason;\n usage: LLMUsage | null;\n}\n\nexport interface LLMStreamOptions {\n systemPrompt: string;\n messages: Array<{ role: 'system' | 'user' | 'assistant'; content: string }>;\n maxTokens?: number;\n temperature?: number;\n}\n\nexport interface LLMStreamChunk {\n content: string;\n done: boolean;\n}\n\n// ============================================================================\n// Provider Configuration\n// ============================================================================\n\nconst PROVIDER_CONFIGS: Record<LLMProvider, () => ProviderConfig> = {\n openai: () => {\n const apiKey = process.env.OPENAI_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'OPENAI_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return { apiKey, baseUrl: undefined, defaultModel: 'gpt-4o' };\n },\n deepseek: () => {\n const apiKey = process.env.DEEPSEEK_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'DEEPSEEK_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return {\n apiKey,\n baseUrl: 'https://api.deepseek.com/v1',\n defaultModel: 'deepseek-chat',\n };\n },\n anthropic: () => {\n const apiKey = process.env.ANTHROPIC_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'ANTHROPIC_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return {\n apiKey,\n baseUrl: undefined,\n defaultModel: 'claude-sonnet-4-5-20250929',\n };\n },\n kimi: () => {\n const apiKey = process.env.KIMI_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'KIMI_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return {\n apiKey,\n baseUrl: 'https://api.moonshot.ai/v1',\n defaultModel: 'kimi-k2.5',\n };\n },\n openrouter: () => {\n const apiKey = process.env.OPEN_ROUTER_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'OPEN_ROUTER_API_KEY environment variable is not set. ' +\n 'Please set it in your .env file or environment.',\n );\n }\n return {\n apiKey,\n baseUrl: 'https://openrouter.ai/api/v1',\n defaultModel: 'qwen/qwen-2.5-72b-instruct', // Default to Qwen 2.5\n };\n },\n orbgen: () => {\n const baseUrl = process.env.ORBGEN_URL;\n if (!baseUrl) {\n throw new Error(\n 'ORBGEN_URL environment variable is not set. ' +\n 'Set it to the OrbGen Cloud Run URL (e.g., https://orbgen-v2-xxx.run.app)',\n );\n }\n return {\n apiKey: 'not-needed',\n baseUrl: `${baseUrl}/v1`,\n defaultModel: 'orbgen-v2',\n };\n },\n};\n\nexport const DEEPSEEK_MODELS = {\n CHAT: 'deepseek-chat',\n CODER: 'deepseek-coder',\n REASONER: 'deepseek-reasoner',\n} as const;\n\nexport const OPENAI_MODELS = {\n GPT4O: 'gpt-4o',\n GPT4O_MINI: 'gpt-4o-mini',\n GPT4_TURBO: 'gpt-4-turbo',\n GPT35_TURBO: 'gpt-3.5-turbo',\n GPT_5_1: 'gpt-5.1',\n} as const;\n\nexport const ANTHROPIC_MODELS = {\n CLAUDE_SONNET_4_5: 'claude-sonnet-4-5-20250929',\n CLAUDE_SONNET_4: 'claude-sonnet-4-20250514',\n CLAUDE_OPUS_4_5: 'claude-opus-4-5-20250929',\n CLAUDE_3_5_HAIKU: 'claude-3-5-haiku-20241022',\n} as const;\n\nexport const KIMI_MODELS = {\n K2_5: 'kimi-k2.5',\n} as const;\n\nexport const OPENROUTER_MODELS = {\n // Qwen models - JSON/structured data specialists\n QWEN_2_5_72B: 'qwen/qwen-2.5-72b-instruct',\n QWEN_2_5_CODER_32B: 'qwen/qwen-2.5-coder-32b-instruct',\n QWEN_3_235B: 'qwen/qwen3-235b-a22b',\n\n // Gemma models - best small models for structured JSON output\n // Gemma 3 4B: 6/6 on complex decomposition, 100% behavior matching, free, fastest\n GEMMA_3_4B: 'google/gemma-3-4b-it',\n GEMMA_3_12B: 'google/gemma-3-12b-it',\n GEMMA_3_27B: 'google/gemma-3-27b-it',\n\n // Mistral models - strong structured output, function calling\n // Mistral Small 3.1: 6/6 on complex decomposition, picked std-kanban for tasks\n MISTRAL_SMALL_3_1: 'mistralai/mistral-small-3.1-24b-instruct',\n // Mistral Medium 3.1: next tier up from Small, stronger reasoning, tool calling\n MISTRAL_MEDIUM_3_1: 'mistralai/mistral-medium-3.1',\n\n // Llama models - agentic workhorses\n LLAMA_3_3_70B: 'meta-llama/llama-3.3-70b-instruct',\n LLAMA_3_1_405B: 'meta-llama/llama-3.1-405b-instruct',\n LLAMA_4_MAVERICK: 'meta-llama/llama-4-maverick',\n LLAMA_4_SCOUT: 'meta-llama/llama-4-scout',\n\n // Kimi models - strong reasoning\n KIMI_K2: 'moonshotai/kimi-k2',\n\n // Zhipu GLM models - via OpenRouter\n GLM_4_7: 'z-ai/glm-4.7',\n} as const;\n\nconst DEFAULT_TEMPERATURE = 0.3;\n\n// ============================================================================\n// LLM Client\n// ============================================================================\n\nexport class LLMClient {\n private model: ChatModel;\n private rateLimiter: RateLimiter;\n private tokenTracker: TokenTracker | null;\n private modelName: string;\n private provider: LLMProvider;\n private providerConfig: ProviderConfig;\n private temperature: number;\n private streaming: boolean;\n\n constructor(options: LLMClientOptions = {}) {\n this.provider = options.provider || 'openai';\n // Kimi: 0.6 when thinking disabled (our default), 1.0 when thinking enabled\n this.temperature = options.temperature ?? \n (this.provider === 'kimi' ? 0.6 : DEFAULT_TEMPERATURE);\n this.streaming = options.streaming ?? false;\n\n this.providerConfig = PROVIDER_CONFIGS[this.provider]();\n this.modelName = options.model || this.providerConfig.defaultModel;\n\n const keyPreview = this.providerConfig.apiKey.slice(-4);\n console.log(\n `[LLMClient] Provider: ${this.provider}, Model: ${this.modelName}, Key: ****${keyPreview}`,\n );\n if (this.providerConfig.baseUrl) {\n console.log(\n `[LLMClient] Using custom base URL: ${this.providerConfig.baseUrl}`,\n );\n }\n\n this.model = this.createModel();\n\n this.rateLimiter =\n options.useGlobalRateLimiter !== false\n ? getGlobalRateLimiter(options.rateLimiter)\n : new RateLimiter(options.rateLimiter);\n\n this.tokenTracker =\n options.trackTokens !== false\n ? getGlobalTokenTracker(this.modelName)\n : null;\n }\n\n private usesMaxCompletionTokens(): boolean {\n const model = this.modelName.toLowerCase();\n return (\n model.startsWith('o1') ||\n model.startsWith('gpt-5') ||\n model.includes('o1-') ||\n model.includes('o3')\n );\n }\n\n private createModel(options?: {\n maxTokens?: number;\n temperature?: number;\n }): ChatModel {\n const maxTokens = options?.maxTokens;\n const temperature = options?.temperature ?? this.temperature;\n\n if (this.provider === 'anthropic') {\n return new ChatAnthropic({\n apiKey: this.providerConfig.apiKey,\n model: this.modelName,\n temperature,\n streaming: this.streaming,\n maxTokens: maxTokens || 8192,\n callbacks: [\n {\n handleLLMEnd: (output) => {\n const generation = output.generations?.[0]?.[0];\n const usage = (\n generation as unknown as {\n message?: {\n usage_metadata?: {\n cache_creation_input_tokens?: number;\n cache_read_input_tokens?: number;\n input_tokens?: number;\n output_tokens?: number;\n };\n };\n }\n )?.message?.usage_metadata;\n\n if (usage) {\n const cacheCreated = usage.cache_creation_input_tokens ?? 0;\n const cacheRead = usage.cache_read_input_tokens ?? 0;\n const inputTokens = usage.input_tokens ?? 0;\n const outputTokens = usage.output_tokens ?? 0;\n\n if (cacheCreated > 0) {\n console.log(\n `[LLMClient:Anthropic] Cache WRITE: ${cacheCreated} tokens cached`,\n );\n }\n if (cacheRead > 0) {\n const savingsPercent = Math.round(\n (cacheRead / (cacheRead + inputTokens)) * 100,\n );\n console.log(\n `[LLMClient:Anthropic] Cache HIT: ${cacheRead} tokens (~${savingsPercent}% of prompt)`,\n );\n }\n if (cacheCreated === 0 && cacheRead === 0 && inputTokens > 0) {\n if (inputTokens < 500) {\n console.log(\n `[LLMClient:Anthropic] ${inputTokens} input, ${outputTokens} output tokens (likely cached)`,\n );\n } else {\n console.log(\n `[LLMClient:Anthropic] ${inputTokens} input, ${outputTokens} output tokens`,\n );\n }\n }\n }\n },\n },\n ],\n });\n }\n\n const useCompletionTokens = this.usesMaxCompletionTokens();\n\n const tokenConfig = maxTokens\n ? useCompletionTokens\n ? { modelKwargs: { max_completion_tokens: maxTokens } }\n : { maxTokens }\n : {};\n\n const timeout = this.provider === 'deepseek' ? 600000 : undefined;\n\n // Kimi-k2.5: disable thinking to avoid reasoning_content issues with tool calls\n // When thinking is disabled, temperature must be 0.6 (not 1.0)\n const isKimi = this.provider === 'kimi';\n const effectiveTemp = isKimi ? 0.6 : temperature;\n\n // Build modelKwargs incrementally to avoid spread conflicts\n const modelKwargs: Record<string, unknown> = {};\n if (useCompletionTokens && maxTokens) {\n modelKwargs.max_completion_tokens = maxTokens;\n }\n if (isKimi) {\n modelKwargs.thinking = { type: 'disabled' };\n }\n // OpenRouter (Qwen): explicit tool_choice so the model doesn't ignore tool definitions\n if (this.provider === 'openrouter') {\n modelKwargs.tool_choice = 'auto';\n }\n\n return new ChatOpenAI({\n apiKey: this.providerConfig.apiKey,\n model: this.modelName,\n temperature: useCompletionTokens ? undefined : effectiveTemp,\n streaming: this.streaming,\n timeout,\n ...(Object.keys(modelKwargs).length > 0 ? { modelKwargs } : {}),\n ...(useCompletionTokens ? {} : maxTokens ? { maxTokens } : {}),\n configuration: {\n apiKey: this.providerConfig.apiKey,\n ...(this.providerConfig.baseUrl\n ? { baseURL: this.providerConfig.baseUrl }\n : {}),\n },\n });\n }\n\n private getModelWithOptions(options: {\n maxTokens?: number;\n temperature?: number;\n }): ChatModel {\n return this.createModel(options);\n }\n\n /**\n * Check if this model is a Qwen3.5 thinking model.\n * These models burn all output tokens on internal reasoning\n * unless thinking is explicitly disabled via /no_think prefix.\n */\n private isQwenThinkingModel(): boolean {\n return this.modelName.includes('qwen3.5');\n }\n\n /**\n * Prepare user prompt with provider-specific adjustments.\n * Qwen3.5 models require /no_think to disable reasoning mode.\n */\n private prepareUserPrompt(prompt: string): string {\n if (this.isQwenThinkingModel()) {\n return `/no_think\\n${prompt}`;\n }\n return prompt;\n }\n\n getProvider(): LLMProvider {\n return this.provider;\n }\n\n getModelName(): string {\n return this.modelName;\n }\n\n getModel(): ChatModel {\n return this.model;\n }\n\n getRateLimiterStatus() {\n return this.rateLimiter.getStatus();\n }\n\n getTokenUsage() {\n return this.tokenTracker?.getSummary() ?? null;\n }\n\n async call<T>(options: LLMCallOptions<T>): Promise<T> {\n const response = await this.callWithMetadata(options);\n return response.data;\n }\n\n async callWithMetadata<T>(options: LLMCallOptions<T>): Promise<LLMResponse<T>> {\n const {\n systemPrompt,\n userPrompt,\n schema,\n maxRetries = 2,\n retryWithContext = true,\n maxTokens,\n skipSchemaValidation = false,\n temperature,\n } = options;\n\n let currentPrompt = userPrompt;\n let lastError: Error | null = null;\n\n console.log(\n `[LLMClient:call] Starting call to ${this.provider}/${this.modelName}`,\n );\n console.log(`[LLMClient:call] Prompt length: ${userPrompt.length} chars`);\n if (maxTokens) {\n console.log(`[LLMClient:call] Max tokens: ${maxTokens}`);\n }\n\n for (let attempt = 0; attempt <= maxRetries; attempt++) {\n try {\n console.log(\n `[LLMClient:call] Attempt ${attempt + 1}/${maxRetries + 1}...`,\n );\n const attemptStartTime = Date.now();\n\n const result = await this.rateLimiter.execute(async () => {\n console.log(`[LLMClient:call] Invoking model...`);\n const invokeStartTime = Date.now();\n\n const modelToUse =\n maxTokens || temperature !== undefined\n ? this.getModelWithOptions({ maxTokens, temperature })\n : this.model;\n\n const messages = [\n { role: 'system', content: systemPrompt },\n { role: 'user', content: this.prepareUserPrompt(currentPrompt) },\n ];\n const response = await modelToUse.invoke(\n this.provider === 'anthropic'\n ? addCacheControlToSystemMessages(messages)\n : messages,\n );\n\n console.log(\n `[LLMClient:call] Model responded in ${Date.now() - invokeStartTime}ms`,\n );\n\n let usage: LLMUsage | null = null;\n if (response.usage_metadata) {\n const usageMeta = response.usage_metadata as {\n input_tokens?: number;\n output_tokens?: number;\n };\n usage = {\n promptTokens: usageMeta.input_tokens || 0,\n completionTokens: usageMeta.output_tokens || 0,\n totalTokens:\n (usageMeta.input_tokens || 0) +\n (usageMeta.output_tokens || 0),\n };\n console.log(\n `[LLMClient:call] Tokens used: ${usage.promptTokens} in, ${usage.completionTokens} out`,\n );\n\n if (this.tokenTracker) {\n this.tokenTracker.addUsage(\n usage.promptTokens,\n usage.completionTokens,\n );\n }\n }\n\n const finishReason = this.extractFinishReason(response);\n if (finishReason === 'length') {\n console.warn(\n `[LLMClient:call] Response truncated (finish_reason=length)`,\n );\n }\n\n const content =\n typeof response.content === 'string'\n ? response.content\n : JSON.stringify(response.content);\n\n console.log(\n `[LLMClient:call] Response length: ${content.length} chars, finish_reason: ${finishReason}`,\n );\n\n return { content, finishReason, usage };\n });\n\n console.log(\n `[LLMClient:call] Attempt ${attempt + 1} completed in ${Date.now() - attemptStartTime}ms, parsing response...`,\n );\n\n const parsed = skipSchemaValidation\n ? (parseJsonResponse(result.content, undefined) as T)\n : parseJsonResponse(result.content, schema);\n console.log(\n `[LLMClient:call] Response parsed successfully${skipSchemaValidation ? ' (schema validation skipped)' : ''}`,\n );\n\n return {\n data: parsed,\n raw: result.content,\n finishReason: result.finishReason,\n usage: result.usage,\n };\n } catch (error) {\n lastError = error instanceof Error ? error : new Error(String(error));\n console.error(\n `[LLMClient:call] Attempt ${attempt + 1} failed:`,\n lastError.message,\n );\n\n if (this.isRateLimitError(lastError)) {\n console.error(`[LLMClient:call] Rate limit error, not retrying`);\n throw lastError;\n }\n\n if (attempt < maxRetries && retryWithContext) {\n console.log(`[LLMClient:call] Will retry with error context`);\n currentPrompt =\n `${userPrompt}\\n\\n` +\n `[Previous attempt failed with: ${lastError.message}]\\n` +\n `Please output valid JSON that matches the expected schema.`;\n }\n }\n }\n\n console.error(`[LLMClient:call] All attempts exhausted, throwing error`);\n throw lastError;\n }\n\n private extractFinishReason(\n response: Awaited<ReturnType<ChatOpenAI['invoke']>>,\n ): LLMFinishReason {\n const metadata = response.response_metadata as\n | Record<string, unknown>\n | undefined;\n if (metadata?.finish_reason) {\n const reason = metadata.finish_reason as string;\n if (\n reason === 'stop' ||\n reason === 'length' ||\n reason === 'content_filter' ||\n reason === 'tool_calls'\n ) {\n return reason;\n }\n }\n return null;\n }\n\n async callRaw(options: {\n systemPrompt: string;\n userPrompt: string;\n maxTokens?: number;\n }): Promise<string> {\n const response = await this.callRawWithMetadata(options);\n return response.raw;\n }\n\n async callRawWithMetadata(options: {\n systemPrompt: string;\n userPrompt: string;\n maxTokens?: number;\n }): Promise<Omit<LLMResponse<string>, 'data'> & { raw: string }> {\n const { systemPrompt, userPrompt, maxTokens } = options;\n\n return this.rateLimiter.execute(async () => {\n const modelToUse = maxTokens\n ? this.getModelWithOptions({ maxTokens })\n : this.model;\n\n const messages = [\n { role: 'system', content: systemPrompt },\n { role: 'user', content: this.prepareUserPrompt(userPrompt) },\n ];\n const response = await modelToUse.invoke(\n this.provider === 'anthropic'\n ? addCacheControlToSystemMessages(messages)\n : messages,\n );\n\n let usage: LLMUsage | null = null;\n if (response.usage_metadata) {\n const usageMeta = response.usage_metadata as {\n input_tokens?: number;\n output_tokens?: number;\n };\n usage = {\n promptTokens: usageMeta.input_tokens || 0,\n completionTokens: usageMeta.output_tokens || 0,\n totalTokens:\n (usageMeta.input_tokens || 0) + (usageMeta.output_tokens || 0),\n };\n\n if (this.tokenTracker) {\n this.tokenTracker.addUsage(\n usage.promptTokens,\n usage.completionTokens,\n );\n }\n }\n\n const finishReason = this.extractFinishReason(response);\n const content =\n typeof response.content === 'string'\n ? response.content\n : JSON.stringify(response.content);\n\n return { raw: content, finishReason, usage };\n });\n }\n\n /**\n * Stream a raw text response as an async iterator of content chunks.\n * Uses the underlying LangChain model's .stream() method.\n *\n * @param options - System prompt plus full message history\n * @yields LLMStreamChunk with content deltas and a done flag\n */\n async *streamRaw(options: LLMStreamOptions): AsyncGenerator<LLMStreamChunk> {\n const { messages, maxTokens, temperature } = options;\n\n const modelToUse = (maxTokens || temperature !== undefined)\n ? this.getModelWithOptions({ maxTokens, temperature })\n : this.model;\n\n const langchainMessages = this.provider === 'anthropic'\n ? addCacheControlToSystemMessages(messages)\n : messages;\n\n const stream = await modelToUse.stream(langchainMessages);\n\n for await (const chunk of stream) {\n const content = typeof chunk.content === 'string'\n ? chunk.content\n : Array.isArray(chunk.content)\n ? chunk.content\n .filter((c): c is { type: 'text'; text: string } => typeof c === 'object' && c !== null && 'text' in c)\n .map((c) => c.text)\n .join('')\n : '';\n\n if (content) {\n yield { content, done: false };\n }\n }\n\n yield { content: '', done: true };\n }\n\n private isRateLimitError(error: Error): boolean {\n const message = error.message.toLowerCase();\n return (\n message.includes('rate limit') ||\n message.includes('429') ||\n message.includes('quota exceeded')\n );\n }\n\n // ==========================================================================\n // Anthropic Cache Control Support\n // ==========================================================================\n\n async callWithCache<T>(\n options: CacheAwareLLMCallOptions<T>,\n ): Promise<LLMResponse<T>> {\n const {\n systemPrompt,\n userPrompt,\n systemBlocks,\n userBlocks,\n schema,\n maxRetries = 2,\n maxTokens,\n skipSchemaValidation = false,\n temperature,\n rawText = false,\n } = options;\n\n if (this.provider !== 'anthropic') {\n console.log(\n `[LLMClient:callWithCache] Provider ${this.provider} doesn't support caching, using regular call`,\n );\n return this.callWithMetadata(options);\n }\n\n const cacheableCount =\n (systemBlocks || []).filter((b) => b.cache_control).length +\n (userBlocks || []).filter((b) => b.cache_control).length;\n console.log(\n `[LLMClient:callWithCache] ${cacheableCount} cacheable block(s)`,\n );\n\n let lastError: Error | null = null;\n\n for (let attempt = 0; attempt <= maxRetries; attempt++) {\n try {\n console.log(\n `[LLMClient:callWithCache] Attempt ${attempt + 1}/${maxRetries + 1}...`,\n );\n\n const result = await this.rateLimiter.execute(async () => {\n const anthropic = new Anthropic();\n\n const systemContent =\n systemBlocks && systemBlocks.length > 0\n ? systemBlocks.map((b) => ({\n type: 'text' as const,\n text: b.text,\n ...(b.cache_control\n ? { cache_control: b.cache_control }\n : {}),\n }))\n : systemPrompt\n ? [{ type: 'text' as const, text: systemPrompt }]\n : [];\n\n const userContent =\n userBlocks && userBlocks.length > 0\n ? userBlocks.map((b) => ({\n type: 'text' as const,\n text: b.text,\n ...(b.cache_control\n ? { cache_control: b.cache_control }\n : {}),\n }))\n : userPrompt\n ? [{ type: 'text' as const, text: userPrompt }]\n : [];\n\n const response = await anthropic.messages.create({\n model: this.modelName,\n max_tokens: maxTokens || 8192,\n temperature: temperature ?? 0,\n system: systemContent,\n messages: [{ role: 'user', content: userContent }],\n });\n\n const textContent = response.content.find((c) => c.type === 'text');\n const content =\n textContent && 'text' in textContent ? textContent.text : '';\n\n const apiUsage = response.usage as {\n input_tokens: number;\n output_tokens: number;\n cache_creation_input_tokens?: number;\n cache_read_input_tokens?: number;\n };\n\n const cacheRead = apiUsage.cache_read_input_tokens || 0;\n const cacheCreation = apiUsage.cache_creation_input_tokens || 0;\n\n if (cacheCreation > 0) {\n console.log(\n `[LLMClient:callWithCache] Cache WRITE: ${cacheCreation} tokens`,\n );\n }\n if (cacheRead > 0) {\n const savingsPercent = Math.round(\n (cacheRead / (cacheRead + apiUsage.input_tokens)) * 100,\n );\n console.log(\n `[LLMClient:callWithCache] Cache HIT: ${cacheRead} tokens (~${savingsPercent}% of prompt)`,\n );\n }\n if (cacheCreation === 0 && cacheRead === 0) {\n console.log(\n `[LLMClient:callWithCache] No caching: ${apiUsage.input_tokens} input tokens`,\n );\n }\n\n const usage: LLMUsage = {\n promptTokens: apiUsage.input_tokens,\n completionTokens: apiUsage.output_tokens,\n totalTokens: apiUsage.input_tokens + apiUsage.output_tokens,\n };\n\n if (this.tokenTracker) {\n this.tokenTracker.addUsage(\n usage.promptTokens,\n usage.completionTokens,\n );\n }\n\n const finishReason =\n response.stop_reason === 'end_turn'\n ? 'stop'\n : response.stop_reason;\n\n return {\n content,\n finishReason: finishReason as LLMFinishReason,\n usage,\n };\n });\n\n let parsed: T;\n if (rawText) {\n parsed = result.content as unknown as T;\n } else if (skipSchemaValidation) {\n parsed = parseJsonResponse(result.content, undefined) as T;\n } else {\n parsed = parseJsonResponse(result.content, schema);\n }\n\n return {\n data: parsed,\n raw: result.content,\n finishReason: result.finishReason,\n usage: result.usage,\n };\n } catch (error) {\n lastError = error instanceof Error ? error : new Error(String(error));\n console.error(\n `[LLMClient:callWithCache] Attempt ${attempt + 1} failed:`,\n lastError.message,\n );\n\n if (this.isRateLimitError(lastError)) {\n throw lastError;\n }\n }\n }\n\n throw lastError;\n }\n\n static cacheableBlock(text: string, cache = true): CacheableBlock {\n return cache\n ? { type: 'text', text, cache_control: { type: 'ephemeral' } }\n : { type: 'text', text };\n }\n}\n\n// ============================================================================\n// Singleton Instances\n// ============================================================================\n\nconst sharedClients: Partial<Record<LLMProvider, LLMClient>> = {};\n\nexport function getSharedLLMClient(options?: LLMClientOptions): LLMClient {\n const provider = options?.provider || 'openai';\n if (!sharedClients[provider]) {\n sharedClients[provider] = new LLMClient(options);\n }\n return sharedClients[provider]!;\n}\n\nexport function resetSharedLLMClient(provider?: LLMProvider): void {\n if (provider) {\n delete sharedClients[provider];\n } else {\n for (const key of Object.keys(sharedClients) as LLMProvider[]) {\n delete sharedClients[key];\n }\n }\n}\n\n// ============================================================================\n// Provider Detection\n// ============================================================================\n\nexport function getAvailableProvider(): LLMProvider {\n if (process.env.ANTHROPIC_API_KEY) return 'anthropic';\n if (process.env.DEEPSEEK_API_KEY) return 'deepseek';\n if (process.env.KIMI_API_KEY) return 'kimi';\n if (process.env.OPENAI_API_KEY) return 'openai';\n throw new Error(\n 'No LLM API key found. Please set ANTHROPIC_API_KEY, OPENAI_API_KEY, DEEPSEEK_API_KEY, or KIMI_API_KEY.',\n );\n}\n\nexport function isProviderAvailable(provider: LLMProvider): boolean {\n switch (provider) {\n case 'openai':\n return !!process.env.OPENAI_API_KEY;\n case 'deepseek':\n return !!process.env.DEEPSEEK_API_KEY;\n case 'anthropic':\n return !!process.env.ANTHROPIC_API_KEY;\n case 'kimi':\n return !!process.env.KIMI_API_KEY;\n case 'openrouter':\n return !!process.env.OPEN_ROUTER_API_KEY;\n case 'orbgen':\n return !!process.env.ORBGEN_URL;\n default:\n return false;\n }\n}\n\n// ============================================================================\n// Convenience Functions\n// ============================================================================\n\n/**\n * Create an LLM client optimized for requirements analysis.\n *\n * Uses lower temperature (0.3) for more deterministic output.\n * Defaults to GPT-5.1 for OpenAI or DeepSeek Chat.\n *\n * @param {Partial<LLMClientOptions>} [options] - Optional client configuration\n * @returns {LLMClient} Configured LLM client\n */\nexport function createRequirementsClient(\n options?: Partial<LLMClientOptions>,\n): LLMClient {\n const provider = options?.provider || getAvailableProvider();\n const defaultModel =\n provider === 'deepseek' ? DEEPSEEK_MODELS.CHAT : OPENAI_MODELS.GPT_5_1;\n return new LLMClient({\n provider,\n model: defaultModel,\n temperature: 0.3,\n ...options,\n });\n}\n\n/**\n * Create an LLM client optimized for creative tasks.\n *\n * Uses higher temperature (0.7) for more varied output.\n * Defaults to GPT-4o or DeepSeek Reasoner.\n *\n * @param {Partial<LLMClientOptions>} [options] - Optional client configuration\n * @returns {LLMClient} Configured LLM client\n */\nexport function createCreativeClient(\n options?: Partial<LLMClientOptions>,\n): LLMClient {\n const provider = options?.provider || getAvailableProvider();\n const defaultModel =\n provider === 'deepseek' ? DEEPSEEK_MODELS.REASONER : OPENAI_MODELS.GPT4O;\n return new LLMClient({\n provider,\n model: defaultModel,\n temperature: 0.7,\n ...options,\n });\n}\n\n/**\n * Create an LLM client optimized for code fixing.\n *\n * Uses low temperature (0.2) for precise, deterministic fixes.\n * Defaults to GPT-4o Mini or DeepSeek Chat for cost efficiency.\n *\n * @param {Partial<LLMClientOptions>} [options] - Optional client configuration\n * @returns {LLMClient} Configured LLM client\n */\nexport function createFixClient(\n options?: Partial<LLMClientOptions>,\n): LLMClient {\n const provider = options?.provider || getAvailableProvider();\n const defaultModel =\n provider === 'deepseek'\n ? DEEPSEEK_MODELS.CHAT\n : OPENAI_MODELS.GPT4O_MINI;\n return new LLMClient({\n provider,\n model: defaultModel,\n temperature: 0.2,\n ...options,\n });\n}\n\n/**\n * Create a DeepSeek LLM client.\n *\n * @param {Partial<Omit<LLMClientOptions, 'provider'>>} [options] - Optional client configuration\n * @returns {LLMClient} Configured DeepSeek client\n */\nexport function createDeepSeekClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'deepseek',\n model: DEEPSEEK_MODELS.CHAT,\n ...options,\n });\n}\n\n/**\n * Create an OpenAI LLM client.\n *\n * @param {Partial<Omit<LLMClientOptions, 'provider'>>} [options] - Optional client configuration\n * @returns {LLMClient} Configured OpenAI client\n */\nexport function createOpenAIClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'openai',\n model: OPENAI_MODELS.GPT4O,\n ...options,\n });\n}\n\n/**\n * Create an Anthropic LLM client.\n *\n * @param {Partial<Omit<LLMClientOptions, 'provider'>>} [options] - Optional client configuration\n * @returns {LLMClient} Configured Anthropic client\n */\nexport function createAnthropicClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'anthropic',\n model: ANTHROPIC_MODELS.CLAUDE_SONNET_4_5,\n ...options,\n });\n}\n\n/**\n * Create a Kimi LLM client.\n *\n * @param {Partial<Omit<LLMClientOptions, 'provider'>>} [options] - Optional client configuration\n * @returns {LLMClient} Configured Kimi client\n */\nexport function createKimiClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'kimi',\n model: KIMI_MODELS.K2_5,\n ...options,\n });\n}\n\n/**\n * Create an OpenRouter LLM client.\n *\n * @param {Partial<Omit<LLMClientOptions, 'provider'>>} [options] - Optional client configuration\n * @returns {LLMClient} Configured OpenRouter client\n */\nexport function createOpenRouterClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'openrouter',\n model: OPENROUTER_MODELS.QWEN_2_5_72B,\n ...options,\n });\n}\n\n/**\n * Create a Zhipu (GLM) LLM client via OpenRouter.\n *\n * @param {Partial<Omit<LLMClientOptions, 'provider'>>} [options] - Optional client configuration\n * @returns {LLMClient} Configured Zhipu client\n */\nexport function createZhipuClient(\n options?: Partial<Omit<LLMClientOptions, 'provider'>>,\n): LLMClient {\n return new LLMClient({\n provider: 'openrouter',\n model: OPENROUTER_MODELS.GLM_4_7,\n ...options,\n });\n}\n"],"mappings":";;;;;;;;;;AAaA,SAAS,kBAAkB;AAC3B,SAAS,qBAAqB;AAE9B,OAAO,eAAe;AActB,SAAS,gCACP,UACmB;AACnB,SAAO,SAAS,IAAI,CAAC,QAAQ;AAC3B,QAAI,IAAI,SAAS,UAAU;AACzB,aAAO;AAAA,IACT;AAEA,WAAO;AAAA,MACL,MAAM,IAAI;AAAA,MACV,SAAS;AAAA,QACP;AAAA,UACE,MAAM;AAAA,UACN,MAAM,IAAI;AAAA,UACV,eAAe,EAAE,MAAM,YAAY;AAAA,QACrC;AAAA,MACF;AAAA,IACF;AAAA,EACF,CAAC;AACH;AAsFA,IAAM,mBAA8D;AAAA,EAClE,QAAQ,MAAM;AACZ,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO,EAAE,QAAQ,SAAS,QAAW,cAAc,SAAS;AAAA,EAC9D;AAAA,EACA,UAAU,MAAM;AACd,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL;AAAA,MACA,SAAS;AAAA,MACT,cAAc;AAAA,IAChB;AAAA,EACF;AAAA,EACA,WAAW,MAAM;AACf,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL;AAAA,MACA,SAAS;AAAA,MACT,cAAc;AAAA,IAChB;AAAA,EACF;AAAA,EACA,MAAM,MAAM;AACV,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL;AAAA,MACA,SAAS;AAAA,MACT,cAAc;AAAA,IAChB;AAAA,EACF;AAAA,EACA,YAAY,MAAM;AAChB,UAAM,SAAS,QAAQ,IAAI;AAC3B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL;AAAA,MACA,SAAS;AAAA,MACT,cAAc;AAAA;AAAA,IAChB;AAAA,EACF;AAAA,EACA,QAAQ,MAAM;AACZ,UAAM,UAAU,QAAQ,IAAI;AAC5B,QAAI,CAAC,SAAS;AACZ,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AACA,WAAO;AAAA,MACL,QAAQ;AAAA,MACR,SAAS,GAAG,OAAO;AAAA,MACnB,cAAc;AAAA,IAChB;AAAA,EACF;AACF;AAEO,IAAM,kBAAkB;AAAA,EAC7B,MAAM;AAAA,EACN,OAAO;AAAA,EACP,UAAU;AACZ;AAEO,IAAM,gBAAgB;AAAA,EAC3B,OAAO;AAAA,EACP,YAAY;AAAA,EACZ,YAAY;AAAA,EACZ,aAAa;AAAA,EACb,SAAS;AACX;AAEO,IAAM,mBAAmB;AAAA,EAC9B,mBAAmB;AAAA,EACnB,iBAAiB;AAAA,EACjB,iBAAiB;AAAA,EACjB,kBAAkB;AACpB;AAEO,IAAM,cAAc;AAAA,EACzB,MAAM;AACR;AAEO,IAAM,oBAAoB;AAAA;AAAA,EAE/B,cAAc;AAAA,EACd,oBAAoB;AAAA,EACpB,aAAa;AAAA;AAAA;AAAA,EAIb,YAAY;AAAA,EACZ,aAAa;AAAA,EACb,aAAa;AAAA;AAAA;AAAA,EAIb,mBAAmB;AAAA;AAAA,EAEnB,oBAAoB;AAAA;AAAA,EAGpB,eAAe;AAAA,EACf,gBAAgB;AAAA,EAChB,kBAAkB;AAAA,EAClB,eAAe;AAAA;AAAA,EAGf,SAAS;AAAA;AAAA,EAGT,SAAS;AACX;AAEA,IAAM,sBAAsB;AAMrB,IAAM,YAAN,MAAgB;AAAA,EAUrB,YAAY,UAA4B,CAAC,GAAG;AAC1C,SAAK,WAAW,QAAQ,YAAY;AAEpC,SAAK,cAAc,QAAQ,gBACxB,KAAK,aAAa,SAAS,MAAM;AACpC,SAAK,YAAY,QAAQ,aAAa;AAEtC,SAAK,iBAAiB,iBAAiB,KAAK,QAAQ,EAAE;AACtD,SAAK,YAAY,QAAQ,SAAS,KAAK,eAAe;AAEtD,UAAM,aAAa,KAAK,eAAe,OAAO,MAAM,EAAE;AACtD,YAAQ;AAAA,MACN,yBAAyB,KAAK,QAAQ,YAAY,KAAK,SAAS,cAAc,UAAU;AAAA,IAC1F;AACA,QAAI,KAAK,eAAe,SAAS;AAC/B,cAAQ;AAAA,QACN,sCAAsC,KAAK,eAAe,OAAO;AAAA,MACnE;AAAA,IACF;AAEA,SAAK,QAAQ,KAAK,YAAY;AAE9B,SAAK,cACH,QAAQ,yBAAyB,QAC7B,qBAAqB,QAAQ,WAAW,IACxC,IAAI,YAAY,QAAQ,WAAW;AAEzC,SAAK,eACH,QAAQ,gBAAgB,QACpB,sBAAsB,KAAK,SAAS,IACpC;AAAA,EACR;AAAA,EAEQ,0BAAmC;AACzC,UAAM,QAAQ,KAAK,UAAU,YAAY;AACzC,WACE,MAAM,WAAW,IAAI,KACrB,MAAM,WAAW,OAAO,KACxB,MAAM,SAAS,KAAK,KACpB,MAAM,SAAS,IAAI;AAAA,EAEvB;AAAA,EAEQ,YAAY,SAGN;AACZ,UAAM,YAAY,SAAS;AAC3B,UAAM,cAAc,SAAS,eAAe,KAAK;AAEjD,QAAI,KAAK,aAAa,aAAa;AACjC,aAAO,IAAI,cAAc;AAAA,QACvB,QAAQ,KAAK,eAAe;AAAA,QAC5B,OAAO,KAAK;AAAA,QACZ;AAAA,QACA,WAAW,KAAK;AAAA,QAChB,WAAW,aAAa;AAAA,QACxB,WAAW;AAAA,UACT;AAAA,YACE,cAAc,CAAC,WAAW;AACxB,oBAAM,aAAa,OAAO,cAAc,CAAC,IAAI,CAAC;AAC9C,oBAAM,QACJ,YAUC,SAAS;AAEZ,kBAAI,OAAO;AACT,sBAAM,eAAe,MAAM,+BAA+B;AAC1D,sBAAM,YAAY,MAAM,2BAA2B;AACnD,sBAAM,cAAc,MAAM,gBAAgB;AAC1C,sBAAM,eAAe,MAAM,iBAAiB;AAE5C,oBAAI,eAAe,GAAG;AACpB,0BAAQ;AAAA,oBACN,sCAAsC,YAAY;AAAA,kBACpD;AAAA,gBACF;AACA,oBAAI,YAAY,GAAG;AACjB,wBAAM,iBAAiB,KAAK;AAAA,oBACzB,aAAa,YAAY,eAAgB;AAAA,kBAC5C;AACA,0BAAQ;AAAA,oBACN,oCAAoC,SAAS,aAAa,cAAc;AAAA,kBAC1E;AAAA,gBACF;AACA,oBAAI,iBAAiB,KAAK,cAAc,KAAK,cAAc,GAAG;AAC5D,sBAAI,cAAc,KAAK;AACrB,4BAAQ;AAAA,sBACN,yBAAyB,WAAW,WAAW,YAAY;AAAA,oBAC7D;AAAA,kBACF,OAAO;AACL,4BAAQ;AAAA,sBACN,yBAAyB,WAAW,WAAW,YAAY;AAAA,oBAC7D;AAAA,kBACF;AAAA,gBACF;AAAA,cACF;AAAA,YACF;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACH;AAEA,UAAM,sBAAsB,KAAK,wBAAwB;AAEzD,UAAM,cAAc,YAChB,sBACE,EAAE,aAAa,EAAE,uBAAuB,UAAU,EAAE,IACpD,EAAE,UAAU,IACd,CAAC;AAEL,UAAM,UAAU,KAAK,aAAa,aAAa,MAAS;AAIxD,UAAM,SAAS,KAAK,aAAa;AACjC,UAAM,gBAAgB,SAAS,MAAM;AAGrC,UAAM,cAAuC,CAAC;AAC9C,QAAI,uBAAuB,WAAW;AACpC,kBAAY,wBAAwB;AAAA,IACtC;AACA,QAAI,QAAQ;AACV,kBAAY,WAAW,EAAE,MAAM,WAAW;AAAA,IAC5C;AAEA,QAAI,KAAK,aAAa,cAAc;AAClC,kBAAY,cAAc;AAAA,IAC5B;AAEA,WAAO,IAAI,WAAW;AAAA,MACpB,QAAQ,KAAK,eAAe;AAAA,MAC5B,OAAO,KAAK;AAAA,MACZ,aAAa,sBAAsB,SAAY;AAAA,MAC/C,WAAW,KAAK;AAAA,MAChB;AAAA,MACA,GAAI,OAAO,KAAK,WAAW,EAAE,SAAS,IAAI,EAAE,YAAY,IAAI,CAAC;AAAA,MAC7D,GAAI,sBAAsB,CAAC,IAAI,YAAY,EAAE,UAAU,IAAI,CAAC;AAAA,MAC5D,eAAe;AAAA,QACb,QAAQ,KAAK,eAAe;AAAA,QAC5B,GAAI,KAAK,eAAe,UACpB,EAAE,SAAS,KAAK,eAAe,QAAQ,IACvC,CAAC;AAAA,MACP;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEQ,oBAAoB,SAGd;AACZ,WAAO,KAAK,YAAY,OAAO;AAAA,EACjC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOQ,sBAA+B;AACrC,WAAO,KAAK,UAAU,SAAS,SAAS;AAAA,EAC1C;AAAA;AAAA;AAAA;AAAA;AAAA,EAMQ,kBAAkB,QAAwB;AAChD,QAAI,KAAK,oBAAoB,GAAG;AAC9B,aAAO;AAAA,EAAc,MAAM;AAAA,IAC7B;AACA,WAAO;AAAA,EACT;AAAA,EAEA,cAA2B;AACzB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,eAAuB;AACrB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,WAAsB;AACpB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,uBAAuB;AACrB,WAAO,KAAK,YAAY,UAAU;AAAA,EACpC;AAAA,EAEA,gBAAgB;AACd,WAAO,KAAK,cAAc,WAAW,KAAK;AAAA,EAC5C;AAAA,EAEA,MAAM,KAAQ,SAAwC;AACpD,UAAM,WAAW,MAAM,KAAK,iBAAiB,OAAO;AACpD,WAAO,SAAS;AAAA,EAClB;AAAA,EAEA,MAAM,iBAAoB,SAAqD;AAC7E,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA,aAAa;AAAA,MACb,mBAAmB;AAAA,MACnB;AAAA,MACA,uBAAuB;AAAA,MACvB;AAAA,IACF,IAAI;AAEJ,QAAI,gBAAgB;AACpB,QAAI,YAA0B;AAE9B,YAAQ;AAAA,MACN,qCAAqC,KAAK,QAAQ,IAAI,KAAK,SAAS;AAAA,IACtE;AACA,YAAQ,IAAI,mCAAmC,WAAW,MAAM,QAAQ;AACxE,QAAI,WAAW;AACb,cAAQ,IAAI,gCAAgC,SAAS,EAAE;AAAA,IACzD;AAEA,aAAS,UAAU,GAAG,WAAW,YAAY,WAAW;AACtD,UAAI;AACF,gBAAQ;AAAA,UACN,4BAA4B,UAAU,CAAC,IAAI,aAAa,CAAC;AAAA,QAC3D;AACA,cAAM,mBAAmB,KAAK,IAAI;AAElC,cAAM,SAAS,MAAM,KAAK,YAAY,QAAQ,YAAY;AACxD,kBAAQ,IAAI,oCAAoC;AAChD,gBAAM,kBAAkB,KAAK,IAAI;AAEjC,gBAAM,aACJ,aAAa,gBAAgB,SACzB,KAAK,oBAAoB,EAAE,WAAW,YAAY,CAAC,IACnD,KAAK;AAEX,gBAAM,WAAW;AAAA,YACf,EAAE,MAAM,UAAU,SAAS,aAAa;AAAA,YACxC,EAAE,MAAM,QAAQ,SAAS,KAAK,kBAAkB,aAAa,EAAE;AAAA,UACjE;AACA,gBAAM,WAAW,MAAM,WAAW;AAAA,YAChC,KAAK,aAAa,cACd,gCAAgC,QAAQ,IACxC;AAAA,UACN;AAEA,kBAAQ;AAAA,YACN,uCAAuC,KAAK,IAAI,IAAI,eAAe;AAAA,UACrE;AAEA,cAAI,QAAyB;AAC7B,cAAI,SAAS,gBAAgB;AAC3B,kBAAM,YAAY,SAAS;AAI3B,oBAAQ;AAAA,cACN,cAAc,UAAU,gBAAgB;AAAA,cACxC,kBAAkB,UAAU,iBAAiB;AAAA,cAC7C,cACG,UAAU,gBAAgB,MAC1B,UAAU,iBAAiB;AAAA,YAChC;AACA,oBAAQ;AAAA,cACN,iCAAiC,MAAM,YAAY,QAAQ,MAAM,gBAAgB;AAAA,YACnF;AAEA,gBAAI,KAAK,cAAc;AACrB,mBAAK,aAAa;AAAA,gBAChB,MAAM;AAAA,gBACN,MAAM;AAAA,cACR;AAAA,YACF;AAAA,UACF;AAEA,gBAAM,eAAe,KAAK,oBAAoB,QAAQ;AACtD,cAAI,iBAAiB,UAAU;AAC7B,oBAAQ;AAAA,cACN;AAAA,YACF;AAAA,UACF;AAEA,gBAAM,UACJ,OAAO,SAAS,YAAY,WACxB,SAAS,UACT,KAAK,UAAU,SAAS,OAAO;AAErC,kBAAQ;AAAA,YACN,qCAAqC,QAAQ,MAAM,0BAA0B,YAAY;AAAA,UAC3F;AAEA,iBAAO,EAAE,SAAS,cAAc,MAAM;AAAA,QACxC,CAAC;AAED,gBAAQ;AAAA,UACN,4BAA4B,UAAU,CAAC,iBAAiB,KAAK,IAAI,IAAI,gBAAgB;AAAA,QACvF;AAEA,cAAM,SAAS,uBACV,kBAAkB,OAAO,SAAS,MAAS,IAC5C,kBAAkB,OAAO,SAAS,MAAM;AAC5C,gBAAQ;AAAA,UACN,gDAAgD,uBAAuB,iCAAiC,EAAE;AAAA,QAC5G;AAEA,eAAO;AAAA,UACL,MAAM;AAAA,UACN,KAAK,OAAO;AAAA,UACZ,cAAc,OAAO;AAAA,UACrB,OAAO,OAAO;AAAA,QAChB;AAAA,MACF,SAAS,OAAO;AACd,oBAAY,iBAAiB,QAAQ,QAAQ,IAAI,MAAM,OAAO,KAAK,CAAC;AACpE,gBAAQ;AAAA,UACN,4BAA4B,UAAU,CAAC;AAAA,UACvC,UAAU;AAAA,QACZ;AAEA,YAAI,KAAK,iBAAiB,SAAS,GAAG;AACpC,kBAAQ,MAAM,iDAAiD;AAC/D,gBAAM;AAAA,QACR;AAEA,YAAI,UAAU,cAAc,kBAAkB;AAC5C,kBAAQ,IAAI,gDAAgD;AAC5D,0BACE,GAAG,UAAU;AAAA;AAAA,iCACqB,UAAU,OAAO;AAAA;AAAA,QAEvD;AAAA,MACF;AAAA,IACF;AAEA,YAAQ,MAAM,yDAAyD;AACvE,UAAM;AAAA,EACR;AAAA,EAEQ,oBACN,UACiB;AACjB,UAAM,WAAW,SAAS;AAG1B,QAAI,UAAU,eAAe;AAC3B,YAAM,SAAS,SAAS;AACxB,UACE,WAAW,UACX,WAAW,YACX,WAAW,oBACX,WAAW,cACX;AACA,eAAO;AAAA,MACT;AAAA,IACF;AACA,WAAO;AAAA,EACT;AAAA,EAEA,MAAM,QAAQ,SAIM;AAClB,UAAM,WAAW,MAAM,KAAK,oBAAoB,OAAO;AACvD,WAAO,SAAS;AAAA,EAClB;AAAA,EAEA,MAAM,oBAAoB,SAIuC;AAC/D,UAAM,EAAE,cAAc,YAAY,UAAU,IAAI;AAEhD,WAAO,KAAK,YAAY,QAAQ,YAAY;AAC1C,YAAM,aAAa,YACf,KAAK,oBAAoB,EAAE,UAAU,CAAC,IACtC,KAAK;AAET,YAAM,WAAW;AAAA,QACf,EAAE,MAAM,UAAU,SAAS,aAAa;AAAA,QACxC,EAAE,MAAM,QAAQ,SAAS,KAAK,kBAAkB,UAAU,EAAE;AAAA,MAC9D;AACA,YAAM,WAAW,MAAM,WAAW;AAAA,QAChC,KAAK,aAAa,cACd,gCAAgC,QAAQ,IACxC;AAAA,MACN;AAEA,UAAI,QAAyB;AAC7B,UAAI,SAAS,gBAAgB;AAC3B,cAAM,YAAY,SAAS;AAI3B,gBAAQ;AAAA,UACN,cAAc,UAAU,gBAAgB;AAAA,UACxC,kBAAkB,UAAU,iBAAiB;AAAA,UAC7C,cACG,UAAU,gBAAgB,MAAM,UAAU,iBAAiB;AAAA,QAChE;AAEA,YAAI,KAAK,cAAc;AACrB,eAAK,aAAa;AAAA,YAChB,MAAM;AAAA,YACN,MAAM;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAEA,YAAM,eAAe,KAAK,oBAAoB,QAAQ;AACtD,YAAM,UACJ,OAAO,SAAS,YAAY,WACxB,SAAS,UACT,KAAK,UAAU,SAAS,OAAO;AAErC,aAAO,EAAE,KAAK,SAAS,cAAc,MAAM;AAAA,IAC7C,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,UAAU,SAA2D;AAC1E,UAAM,EAAE,UAAU,WAAW,YAAY,IAAI;AAE7C,UAAM,aAAc,aAAa,gBAAgB,SAC7C,KAAK,oBAAoB,EAAE,WAAW,YAAY,CAAC,IACnD,KAAK;AAET,UAAM,oBAAoB,KAAK,aAAa,cACxC,gCAAgC,QAAQ,IACxC;AAEJ,UAAM,SAAS,MAAM,WAAW,OAAO,iBAAiB;AAExD,qBAAiB,SAAS,QAAQ;AAChC,YAAM,UAAU,OAAO,MAAM,YAAY,WACrC,MAAM,UACN,MAAM,QAAQ,MAAM,OAAO,IACzB,MAAM,QACH,OAAO,CAAC,MAA2C,OAAO,MAAM,YAAY,MAAM,QAAQ,UAAU,CAAC,EACrG,IAAI,CAAC,MAAM,EAAE,IAAI,EACjB,KAAK,EAAE,IACV;AAEN,UAAI,SAAS;AACX,cAAM,EAAE,SAAS,MAAM,MAAM;AAAA,MAC/B;AAAA,IACF;AAEA,UAAM,EAAE,SAAS,IAAI,MAAM,KAAK;AAAA,EAClC;AAAA,EAEQ,iBAAiB,OAAuB;AAC9C,UAAM,UAAU,MAAM,QAAQ,YAAY;AAC1C,WACE,QAAQ,SAAS,YAAY,KAC7B,QAAQ,SAAS,KAAK,KACtB,QAAQ,SAAS,gBAAgB;AAAA,EAErC;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,cACJ,SACyB;AACzB,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,aAAa;AAAA,MACb;AAAA,MACA,uBAAuB;AAAA,MACvB;AAAA,MACA,UAAU;AAAA,IACZ,IAAI;AAEJ,QAAI,KAAK,aAAa,aAAa;AACjC,cAAQ;AAAA,QACN,sCAAsC,KAAK,QAAQ;AAAA,MACrD;AACA,aAAO,KAAK,iBAAiB,OAAO;AAAA,IACtC;AAEA,UAAM,kBACH,gBAAgB,CAAC,GAAG,OAAO,CAAC,MAAM,EAAE,aAAa,EAAE,UACnD,cAAc,CAAC,GAAG,OAAO,CAAC,MAAM,EAAE,aAAa,EAAE;AACpD,YAAQ;AAAA,MACN,6BAA6B,cAAc;AAAA,IAC7C;AAEA,QAAI,YAA0B;AAE9B,aAAS,UAAU,GAAG,WAAW,YAAY,WAAW;AACtD,UAAI;AACF,gBAAQ;AAAA,UACN,qCAAqC,UAAU,CAAC,IAAI,aAAa,CAAC;AAAA,QACpE;AAEA,cAAM,SAAS,MAAM,KAAK,YAAY,QAAQ,YAAY;AACxD,gBAAM,YAAY,IAAI,UAAU;AAEhC,gBAAM,gBACJ,gBAAgB,aAAa,SAAS,IAClC,aAAa,IAAI,CAAC,OAAO;AAAA,YACvB,MAAM;AAAA,YACN,MAAM,EAAE;AAAA,YACR,GAAI,EAAE,gBACF,EAAE,eAAe,EAAE,cAAc,IACjC,CAAC;AAAA,UACP,EAAE,IACF,eACE,CAAC,EAAE,MAAM,QAAiB,MAAM,aAAa,CAAC,IAC9C,CAAC;AAET,gBAAM,cACJ,cAAc,WAAW,SAAS,IAC9B,WAAW,IAAI,CAAC,OAAO;AAAA,YACrB,MAAM;AAAA,YACN,MAAM,EAAE;AAAA,YACR,GAAI,EAAE,gBACF,EAAE,eAAe,EAAE,cAAc,IACjC,CAAC;AAAA,UACP,EAAE,IACF,aACE,CAAC,EAAE,MAAM,QAAiB,MAAM,WAAW,CAAC,IAC5C,CAAC;AAET,gBAAM,WAAW,MAAM,UAAU,SAAS,OAAO;AAAA,YAC/C,OAAO,KAAK;AAAA,YACZ,YAAY,aAAa;AAAA,YACzB,aAAa,eAAe;AAAA,YAC5B,QAAQ;AAAA,YACR,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,YAAY,CAAC;AAAA,UACnD,CAAC;AAED,gBAAM,cAAc,SAAS,QAAQ,KAAK,CAAC,MAAM,EAAE,SAAS,MAAM;AAClE,gBAAM,UACJ,eAAe,UAAU,cAAc,YAAY,OAAO;AAE5D,gBAAM,WAAW,SAAS;AAO1B,gBAAM,YAAY,SAAS,2BAA2B;AACtD,gBAAM,gBAAgB,SAAS,+BAA+B;AAE9D,cAAI,gBAAgB,GAAG;AACrB,oBAAQ;AAAA,cACN,0CAA0C,aAAa;AAAA,YACzD;AAAA,UACF;AACA,cAAI,YAAY,GAAG;AACjB,kBAAM,iBAAiB,KAAK;AAAA,cACzB,aAAa,YAAY,SAAS,gBAAiB;AAAA,YACtD;AACA,oBAAQ;AAAA,cACN,wCAAwC,SAAS,aAAa,cAAc;AAAA,YAC9E;AAAA,UACF;AACA,cAAI,kBAAkB,KAAK,cAAc,GAAG;AAC1C,oBAAQ;AAAA,cACN,yCAAyC,SAAS,YAAY;AAAA,YAChE;AAAA,UACF;AAEA,gBAAM,QAAkB;AAAA,YACtB,cAAc,SAAS;AAAA,YACvB,kBAAkB,SAAS;AAAA,YAC3B,aAAa,SAAS,eAAe,SAAS;AAAA,UAChD;AAEA,cAAI,KAAK,cAAc;AACrB,iBAAK,aAAa;AAAA,cAChB,MAAM;AAAA,cACN,MAAM;AAAA,YACR;AAAA,UACF;AAEA,gBAAM,eACJ,SAAS,gBAAgB,aACrB,SACA,SAAS;AAEf,iBAAO;AAAA,YACL;AAAA,YACA;AAAA,YACA;AAAA,UACF;AAAA,QACF,CAAC;AAED,YAAI;AACJ,YAAI,SAAS;AACX,mBAAS,OAAO;AAAA,QAClB,WAAW,sBAAsB;AAC/B,mBAAS,kBAAkB,OAAO,SAAS,MAAS;AAAA,QACtD,OAAO;AACL,mBAAS,kBAAkB,OAAO,SAAS,MAAM;AAAA,QACnD;AAEA,eAAO;AAAA,UACL,MAAM;AAAA,UACN,KAAK,OAAO;AAAA,UACZ,cAAc,OAAO;AAAA,UACrB,OAAO,OAAO;AAAA,QAChB;AAAA,MACF,SAAS,OAAO;AACd,oBAAY,iBAAiB,QAAQ,QAAQ,IAAI,MAAM,OAAO,KAAK,CAAC;AACpE,gBAAQ;AAAA,UACN,qCAAqC,UAAU,CAAC;AAAA,UAChD,UAAU;AAAA,QACZ;AAEA,YAAI,KAAK,iBAAiB,SAAS,GAAG;AACpC,gBAAM;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,UAAM;AAAA,EACR;AAAA,EAEA,OAAO,eAAe,MAAc,QAAQ,MAAsB;AAChE,WAAO,QACH,EAAE,MAAM,QAAQ,MAAM,eAAe,EAAE,MAAM,YAAY,EAAE,IAC3D,EAAE,MAAM,QAAQ,KAAK;AAAA,EAC3B;AACF;AAMA,IAAM,gBAAyD,CAAC;AAEzD,SAAS,mBAAmB,SAAuC;AACxE,QAAM,WAAW,SAAS,YAAY;AACtC,MAAI,CAAC,cAAc,QAAQ,GAAG;AAC5B,kBAAc,QAAQ,IAAI,IAAI,UAAU,OAAO;AAAA,EACjD;AACA,SAAO,cAAc,QAAQ;AAC/B;AAEO,SAAS,qBAAqB,UAA8B;AACjE,MAAI,UAAU;AACZ,WAAO,cAAc,QAAQ;AAAA,EAC/B,OAAO;AACL,eAAW,OAAO,OAAO,KAAK,aAAa,GAAoB;AAC7D,aAAO,cAAc,GAAG;AAAA,IAC1B;AAAA,EACF;AACF;AAMO,SAAS,uBAAoC;AAClD,MAAI,QAAQ,IAAI,kBAAmB,QAAO;AAC1C,MAAI,QAAQ,IAAI,iBAAkB,QAAO;AACzC,MAAI,QAAQ,IAAI,aAAc,QAAO;AACrC,MAAI,QAAQ,IAAI,eAAgB,QAAO;AACvC,QAAM,IAAI;AAAA,IACR;AAAA,EACF;AACF;AAEO,SAAS,oBAAoB,UAAgC;AAClE,UAAQ,UAAU;AAAA,IAChB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB,KAAK;AACH,aAAO,CAAC,CAAC,QAAQ,IAAI;AAAA,IACvB;AACE,aAAO;AAAA,EACX;AACF;AAeO,SAAS,yBACd,SACW;AACX,QAAM,WAAW,SAAS,YAAY,qBAAqB;AAC3D,QAAM,eACJ,aAAa,aAAa,gBAAgB,OAAO,cAAc;AACjE,SAAO,IAAI,UAAU;AAAA,IACnB;AAAA,IACA,OAAO;AAAA,IACP,aAAa;AAAA,IACb,GAAG;AAAA,EACL,CAAC;AACH;AAWO,SAAS,qBACd,SACW;AACX,QAAM,WAAW,SAAS,YAAY,qBAAqB;AAC3D,QAAM,eACJ,aAAa,aAAa,gBAAgB,WAAW,cAAc;AACrE,SAAO,IAAI,UAAU;AAAA,IACnB;AAAA,IACA,OAAO;AAAA,IACP,aAAa;AAAA,IACb,GAAG;AAAA,EACL,CAAC;AACH;AAWO,SAAS,gBACd,SACW;AACX,QAAM,WAAW,SAAS,YAAY,qBAAqB;AAC3D,QAAM,eACJ,aAAa,aACT,gBAAgB,OAChB,cAAc;AACpB,SAAO,IAAI,UAAU;AAAA,IACnB;AAAA,IACA,OAAO;AAAA,IACP,aAAa;AAAA,IACb,GAAG;AAAA,EACL,CAAC;AACH;AAQO,SAAS,qBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,gBAAgB;AAAA,IACvB,GAAG;AAAA,EACL,CAAC;AACH;AAQO,SAAS,mBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,cAAc;AAAA,IACrB,GAAG;AAAA,EACL,CAAC;AACH;AAQO,SAAS,sBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,iBAAiB;AAAA,IACxB,GAAG;AAAA,EACL,CAAC;AACH;AAQO,SAAS,iBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,YAAY;AAAA,IACnB,GAAG;AAAA,EACL,CAAC;AACH;AAQO,SAAS,uBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,kBAAkB;AAAA,IACzB,GAAG;AAAA,EACL,CAAC;AACH;AAQO,SAAS,kBACd,SACW;AACX,SAAO,IAAI,UAAU;AAAA,IACnB,UAAU;AAAA,IACV,OAAO,kBAAkB;AAAA,IACzB,GAAG;AAAA,EACL,CAAC;AACH;","names":[]}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/providers/masar.ts"],"sourcesContent":["/**\n * Masar Provider\n *\n * Thin HTTP client for the Masar neural pipeline server.\n * Exposes generate, GFlowNet generation, error prediction,\n * edit ranking, and health-check endpoints.\n *\n * Reads `MASAR_URL` from environment (default: http://localhost:8080).\n *\n * @packageDocumentation\n */\n\n// ============================================================================\n// Types\n// ============================================================================\n\nexport interface MasarGenerateOptions {\n /** Model override (server decides default if omitted). */\n model?: string;\n /** Sampling temperature. */\n temperature?: number;\n /** Maximum tokens to generate. */\n maxTokens?: number;\n}\n\nexport interface MasarGenerateResult {\n text: string;\n usage: {\n promptTokens: number;\n completionTokens: number;\n totalTokens: number;\n };\n}\n\nexport interface GoalSpec {\n /** Natural-language description of the desired application. */\n description: string;\n /** Target entities (e.g. [\"User\", \"Product\", \"Order\"]). */\n entities?: string[];\n /** Domain hint (e.g. \"e-commerce\", \"healthcare\"). */\n domain?: string;\n /** Additional constraints passed to the GFlowNet sampler. */\n constraints?: Record<string, unknown>;\n}\n\nexport interface GFlowNetResult {\n /** Generated .orb schema text. */\n schema: string;\n /** Log-probability of the sampled trajectory. */\n logProb: number;\n /** Number of sampling steps taken. */\n steps: number;\n}\n\nexport interface ErrorPrediction {\n /** Line number (1-based) where the error is predicted. */\n line: number;\n /** Predicted error category. */\n category: string;\n /** Human-readable description. */\n message: string;\n /** Confidence score in [0, 1]. */\n confidence: number;\n}\n\nexport interface PredictErrorsResult {\n errors: ErrorPrediction[];\n}\n\nexport interface RankedEdit {\n /** The proposed replacement text. */\n edit: string;\n /** Score assigned by the ranker (higher is better). */\n score: number;\n /** Which error this edit addresses. */\n targetError: string;\n}\n\nexport interface RankEditsResult {\n edits: RankedEdit[];\n}\n\nexport interface MasarHealthResult {\n status: string;\n version?: string;\n uptime?: number;\n}\n\nexport interface MasarProviderOptions {\n /** Base URL of the Masar server. Overrides MASAR_URL env var. */\n baseUrl?: string;\n /** Request timeout in milliseconds (default: 30 000). */\n timeoutMs?: number;\n}\n\n// ============================================================================\n// Error\n// ============================================================================\n\nexport class MasarError extends Error {\n constructor(\n message: string,\n public readonly statusCode: number,\n public readonly responseBody: string,\n ) {\n super(message);\n this.name = 'MasarError';\n }\n}\n\n// ============================================================================\n// Provider\n// ============================================================================\n\nconst DEFAULT_BASE_URL = 'https://masar-345008351456.europe-west4.run.app';\nconst DEFAULT_TIMEOUT_MS = 30_000;\n\nexport class MasarProvider {\n private readonly baseUrl: string;\n private readonly timeoutMs: number;\n\n constructor(options?: MasarProviderOptions) {\n this.baseUrl = (\n options?.baseUrl ??\n process.env.MASAR_URL ??\n DEFAULT_BASE_URL\n ).replace(/\\/+$/, '');\n this.timeoutMs = options?.timeoutMs ?? DEFAULT_TIMEOUT_MS;\n }\n\n // --------------------------------------------------------------------------\n // Public API\n // --------------------------------------------------------------------------\n\n /**\n * Generate text from a prompt.\n *\n * POST /generate\n */\n async generate(\n prompt: string,\n options?: MasarGenerateOptions,\n ): Promise<MasarGenerateResult> {\n return this.post<MasarGenerateResult>('/generate', {\n prompt,\n ...options,\n });\n }\n\n /**\n * Generate a .orb schema via GFlowNet sampling.\n *\n * POST /generate/gflownet\n */\n async generateGFlowNet(goal: GoalSpec): Promise<GFlowNetResult> {\n return this.post<GFlowNetResult>('/generate/gflownet', goal);\n }\n\n /**\n * Predict validation errors in a .orb schema before compilation.\n *\n * POST /predict-errors\n */\n async predictErrors(schema: string): Promise<PredictErrorsResult> {\n return this.post<PredictErrorsResult>('/predict-errors', { schema });\n }\n\n /**\n * Rank candidate edits for fixing errors in a .orb schema.\n *\n * POST /rank-edits\n */\n async rankEdits(\n schema: string,\n errors: string[],\n ): Promise<RankEditsResult> {\n return this.post<RankEditsResult>('/rank-edits', { schema, errors });\n }\n\n /**\n * Check server health.\n *\n * GET /health\n */\n async health(): Promise<MasarHealthResult> {\n return this.get<MasarHealthResult>('/health');\n }\n\n // --------------------------------------------------------------------------\n // Internal helpers\n // --------------------------------------------------------------------------\n\n private async post<T>(path: string, body: unknown): Promise<T> {\n return this.request<T>(path, {\n method: 'POST',\n headers: { 'Content-Type': 'application/json' },\n body: JSON.stringify(body),\n });\n }\n\n private async get<T>(path: string): Promise<T> {\n return this.request<T>(path, { method: 'GET' });\n }\n\n private async request<T>(\n path: string,\n init: RequestInit,\n ): Promise<T> {\n const url = `${this.baseUrl}${path}`;\n const controller = new AbortController();\n const timer = setTimeout(() => controller.abort(), this.timeoutMs);\n\n try {\n const response = await fetch(url, {\n ...init,\n signal: controller.signal,\n });\n\n if (!response.ok) {\n const text = await response.text().catch(() => '');\n throw new MasarError(\n `Masar ${init.method} ${path} failed with status ${response.status}`,\n response.status,\n text,\n );\n }\n\n return (await response.json()) as T;\n } catch (error) {\n if (error instanceof MasarError) {\n throw error;\n }\n\n if (error instanceof DOMException && error.name === 'AbortError') {\n throw new MasarError(\n `Masar ${init.method} ${path} timed out after ${this.timeoutMs}ms`,\n 0,\n '',\n );\n }\n\n const message =\n error instanceof Error ? error.message : String(error);\n throw new MasarError(\n `Masar ${init.method} ${path} failed: ${message}`,\n 0,\n '',\n );\n } finally {\n clearTimeout(timer);\n }\n }\n}\n\n// ============================================================================\n// Singleton\n// ============================================================================\n\nlet sharedInstance: MasarProvider | null = null;\n\nexport function getMasarProvider(\n options?: MasarProviderOptions,\n): MasarProvider {\n if (!sharedInstance) {\n sharedInstance = new MasarProvider(options);\n }\n return sharedInstance;\n}\n\nexport function resetMasarProvider(): void {\n sharedInstance = null;\n}\n"],"mappings":";AAmGO,IAAM,aAAN,cAAyB,MAAM;AAAA,EACpC,YACE,SACgB,YACA,cAChB;AACA,UAAM,OAAO;AAHG;AACA;AAGhB,SAAK,OAAO;AAAA,EACd;AACF;AAMA,IAAM,mBAAmB;AACzB,IAAM,qBAAqB;AAEpB,IAAM,gBAAN,MAAoB;AAAA,EAIzB,YAAY,SAAgC;AAC1C,SAAK,WACH,SAAS,WACT,QAAQ,IAAI,aACZ,kBACA,QAAQ,QAAQ,EAAE;AACpB,SAAK,YAAY,SAAS,aAAa;AAAA,EACzC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,SACJ,QACA,SAC8B;AAC9B,WAAO,KAAK,KAA0B,aAAa;AAAA,MACjD;AAAA,MACA,GAAG;AAAA,IACL,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,iBAAiB,MAAyC;AAC9D,WAAO,KAAK,KAAqB,sBAAsB,IAAI;AAAA,EAC7D;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,cAAc,QAA8C;AAChE,WAAO,KAAK,KAA0B,mBAAmB,EAAE,OAAO,CAAC;AAAA,EACrE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,UACJ,QACA,QAC0B;AAC1B,WAAO,KAAK,KAAsB,eAAe,EAAE,QAAQ,OAAO,CAAC;AAAA,EACrE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,SAAqC;AACzC,WAAO,KAAK,IAAuB,SAAS;AAAA,EAC9C;AAAA;AAAA;AAAA;AAAA,EAMA,MAAc,KAAQ,MAAc,MAA2B;AAC7D,WAAO,KAAK,QAAW,MAAM;AAAA,MAC3B,QAAQ;AAAA,MACR,SAAS,EAAE,gBAAgB,mBAAmB;AAAA,MAC9C,MAAM,KAAK,UAAU,IAAI;AAAA,IAC3B,CAAC;AAAA,EACH;AAAA,EAEA,MAAc,IAAO,MAA0B;AAC7C,WAAO,KAAK,QAAW,MAAM,EAAE,QAAQ,MAAM,CAAC;AAAA,EAChD;AAAA,EAEA,MAAc,QACZ,MACA,MACY;AACZ,UAAM,MAAM,GAAG,KAAK,OAAO,GAAG,IAAI;AAClC,UAAM,aAAa,IAAI,gBAAgB;AACvC,UAAM,QAAQ,WAAW,MAAM,WAAW,MAAM,GAAG,KAAK,SAAS;AAEjE,QAAI;AACF,YAAM,WAAW,MAAM,MAAM,KAAK;AAAA,QAChC,GAAG;AAAA,QACH,QAAQ,WAAW;AAAA,MACrB,CAAC;AAED,UAAI,CAAC,SAAS,IAAI;AAChB,cAAM,OAAO,MAAM,SAAS,KAAK,EAAE,MAAM,MAAM,EAAE;AACjD,cAAM,IAAI;AAAA,UACR,SAAS,KAAK,MAAM,IAAI,IAAI,uBAAuB,SAAS,MAAM;AAAA,UAClE,SAAS;AAAA,UACT;AAAA,QACF;AAAA,MACF;AAEA,aAAQ,MAAM,SAAS,KAAK;AAAA,IAC9B,SAAS,OAAO;AACd,UAAI,iBAAiB,YAAY;AAC/B,cAAM;AAAA,MACR;AAEA,UAAI,iBAAiB,gBAAgB,MAAM,SAAS,cAAc;AAChE,cAAM,IAAI;AAAA,UACR,SAAS,KAAK,MAAM,IAAI,IAAI,oBAAoB,KAAK,SAAS;AAAA,UAC9D;AAAA,UACA;AAAA,QACF;AAAA,MACF;AAEA,YAAM,UACJ,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AACvD,YAAM,IAAI;AAAA,QACR,SAAS,KAAK,MAAM,IAAI,IAAI,YAAY,OAAO;AAAA,QAC/C;AAAA,QACA;AAAA,MACF;AAAA,IACF,UAAE;AACA,mBAAa,KAAK;AAAA,IACpB;AAAA,EACF;AACF;AAMA,IAAI,iBAAuC;
|
|
1
|
+
{"version":3,"sources":["../src/providers/masar.ts"],"sourcesContent":["/**\n * Masar Provider\n *\n * Thin HTTP client for the Masar neural pipeline server.\n * Exposes generate, GFlowNet generation, error prediction,\n * edit ranking, and health-check endpoints.\n *\n * Reads `MASAR_URL` from environment (default: http://localhost:8080).\n *\n * @packageDocumentation\n */\n\n// ============================================================================\n// Types\n// ============================================================================\n\nexport interface MasarGenerateOptions {\n /** Model override (server decides default if omitted). */\n model?: string;\n /** Sampling temperature. */\n temperature?: number;\n /** Maximum tokens to generate. */\n maxTokens?: number;\n}\n\nexport interface MasarGenerateResult {\n text: string;\n usage: {\n promptTokens: number;\n completionTokens: number;\n totalTokens: number;\n };\n}\n\nexport interface GoalSpec {\n /** Natural-language description of the desired application. */\n description: string;\n /** Target entities (e.g. [\"User\", \"Product\", \"Order\"]). */\n entities?: string[];\n /** Domain hint (e.g. \"e-commerce\", \"healthcare\"). */\n domain?: string;\n /** Additional constraints passed to the GFlowNet sampler. */\n constraints?: Record<string, unknown>;\n}\n\nexport interface GFlowNetResult {\n /** Generated .orb schema text. */\n schema: string;\n /** Log-probability of the sampled trajectory. */\n logProb: number;\n /** Number of sampling steps taken. */\n steps: number;\n}\n\nexport interface ErrorPrediction {\n /** Line number (1-based) where the error is predicted. */\n line: number;\n /** Predicted error category. */\n category: string;\n /** Human-readable description. */\n message: string;\n /** Confidence score in [0, 1]. */\n confidence: number;\n}\n\nexport interface PredictErrorsResult {\n errors: ErrorPrediction[];\n}\n\nexport interface RankedEdit {\n /** The proposed replacement text. */\n edit: string;\n /** Score assigned by the ranker (higher is better). */\n score: number;\n /** Which error this edit addresses. */\n targetError: string;\n}\n\nexport interface RankEditsResult {\n edits: RankedEdit[];\n}\n\nexport interface MasarHealthResult {\n status: string;\n version?: string;\n uptime?: number;\n}\n\nexport interface MasarProviderOptions {\n /** Base URL of the Masar server. Overrides MASAR_URL env var. */\n baseUrl?: string;\n /** Request timeout in milliseconds (default: 30 000). */\n timeoutMs?: number;\n}\n\n// ============================================================================\n// Error\n// ============================================================================\n\nexport class MasarError extends Error {\n constructor(\n message: string,\n public readonly statusCode: number,\n public readonly responseBody: string,\n ) {\n super(message);\n this.name = 'MasarError';\n }\n}\n\n// ============================================================================\n// Provider\n// ============================================================================\n\nconst DEFAULT_BASE_URL = 'https://masar-345008351456.europe-west4.run.app';\nconst DEFAULT_TIMEOUT_MS = 30_000;\n\nexport class MasarProvider {\n private readonly baseUrl: string;\n private readonly timeoutMs: number;\n\n constructor(options?: MasarProviderOptions) {\n this.baseUrl = (\n options?.baseUrl ??\n process.env.MASAR_URL ??\n DEFAULT_BASE_URL\n ).replace(/\\/+$/, '');\n this.timeoutMs = options?.timeoutMs ?? DEFAULT_TIMEOUT_MS;\n }\n\n // --------------------------------------------------------------------------\n // Public API\n // --------------------------------------------------------------------------\n\n /**\n * Generate text from a prompt.\n *\n * POST /generate\n */\n async generate(\n prompt: string,\n options?: MasarGenerateOptions,\n ): Promise<MasarGenerateResult> {\n return this.post<MasarGenerateResult>('/generate', {\n prompt,\n ...options,\n });\n }\n\n /**\n * Generate a .orb schema via GFlowNet sampling.\n *\n * POST /generate/gflownet\n */\n async generateGFlowNet(goal: GoalSpec): Promise<GFlowNetResult> {\n return this.post<GFlowNetResult>('/generate/gflownet', goal);\n }\n\n /**\n * Predict validation errors in a .orb schema before compilation.\n *\n * POST /predict-errors\n */\n async predictErrors(schema: string): Promise<PredictErrorsResult> {\n return this.post<PredictErrorsResult>('/predict-errors', { schema });\n }\n\n /**\n * Rank candidate edits for fixing errors in a .orb schema.\n *\n * POST /rank-edits\n */\n async rankEdits(\n schema: string,\n errors: string[],\n ): Promise<RankEditsResult> {\n return this.post<RankEditsResult>('/rank-edits', { schema, errors });\n }\n\n /**\n * Check server health.\n *\n * GET /health\n */\n async health(): Promise<MasarHealthResult> {\n return this.get<MasarHealthResult>('/health');\n }\n\n // --------------------------------------------------------------------------\n // Internal helpers\n // --------------------------------------------------------------------------\n\n private async post<T>(path: string, body: unknown): Promise<T> {\n return this.request<T>(path, {\n method: 'POST',\n headers: { 'Content-Type': 'application/json' },\n body: JSON.stringify(body),\n });\n }\n\n private async get<T>(path: string): Promise<T> {\n return this.request<T>(path, { method: 'GET' });\n }\n\n private async request<T>(\n path: string,\n init: RequestInit,\n ): Promise<T> {\n const url = `${this.baseUrl}${path}`;\n const controller = new AbortController();\n const timer = setTimeout(() => controller.abort(), this.timeoutMs);\n\n try {\n const response = await fetch(url, {\n ...init,\n signal: controller.signal,\n });\n\n if (!response.ok) {\n const text = await response.text().catch(() => '');\n throw new MasarError(\n `Masar ${init.method} ${path} failed with status ${response.status}`,\n response.status,\n text,\n );\n }\n\n return (await response.json()) as T;\n } catch (error) {\n if (error instanceof MasarError) {\n throw error;\n }\n\n if (error instanceof DOMException && error.name === 'AbortError') {\n throw new MasarError(\n `Masar ${init.method} ${path} timed out after ${this.timeoutMs}ms`,\n 0,\n '',\n );\n }\n\n const message =\n error instanceof Error ? error.message : String(error);\n throw new MasarError(\n `Masar ${init.method} ${path} failed: ${message}`,\n 0,\n '',\n );\n } finally {\n clearTimeout(timer);\n }\n }\n}\n\n// ============================================================================\n// Singleton\n// ============================================================================\n\nlet sharedInstance: MasarProvider | null = null;\n\n/**\n * Get the singleton Masar provider instance.\n *\n * Creates the instance on first call, returns cached instance thereafter.\n *\n * @param {MasarProviderOptions} [options] - Provider configuration options\n * @returns {MasarProvider} The Masar provider instance\n */\nexport function getMasarProvider(\n options?: MasarProviderOptions,\n): MasarProvider {\n if (!sharedInstance) {\n sharedInstance = new MasarProvider(options);\n }\n return sharedInstance;\n}\n\nexport function resetMasarProvider(): void {\n sharedInstance = null;\n}\n"],"mappings":";AAmGO,IAAM,aAAN,cAAyB,MAAM;AAAA,EACpC,YACE,SACgB,YACA,cAChB;AACA,UAAM,OAAO;AAHG;AACA;AAGhB,SAAK,OAAO;AAAA,EACd;AACF;AAMA,IAAM,mBAAmB;AACzB,IAAM,qBAAqB;AAEpB,IAAM,gBAAN,MAAoB;AAAA,EAIzB,YAAY,SAAgC;AAC1C,SAAK,WACH,SAAS,WACT,QAAQ,IAAI,aACZ,kBACA,QAAQ,QAAQ,EAAE;AACpB,SAAK,YAAY,SAAS,aAAa;AAAA,EACzC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,SACJ,QACA,SAC8B;AAC9B,WAAO,KAAK,KAA0B,aAAa;AAAA,MACjD;AAAA,MACA,GAAG;AAAA,IACL,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,iBAAiB,MAAyC;AAC9D,WAAO,KAAK,KAAqB,sBAAsB,IAAI;AAAA,EAC7D;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,cAAc,QAA8C;AAChE,WAAO,KAAK,KAA0B,mBAAmB,EAAE,OAAO,CAAC;AAAA,EACrE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,UACJ,QACA,QAC0B;AAC1B,WAAO,KAAK,KAAsB,eAAe,EAAE,QAAQ,OAAO,CAAC;AAAA,EACrE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,SAAqC;AACzC,WAAO,KAAK,IAAuB,SAAS;AAAA,EAC9C;AAAA;AAAA;AAAA;AAAA,EAMA,MAAc,KAAQ,MAAc,MAA2B;AAC7D,WAAO,KAAK,QAAW,MAAM;AAAA,MAC3B,QAAQ;AAAA,MACR,SAAS,EAAE,gBAAgB,mBAAmB;AAAA,MAC9C,MAAM,KAAK,UAAU,IAAI;AAAA,IAC3B,CAAC;AAAA,EACH;AAAA,EAEA,MAAc,IAAO,MAA0B;AAC7C,WAAO,KAAK,QAAW,MAAM,EAAE,QAAQ,MAAM,CAAC;AAAA,EAChD;AAAA,EAEA,MAAc,QACZ,MACA,MACY;AACZ,UAAM,MAAM,GAAG,KAAK,OAAO,GAAG,IAAI;AAClC,UAAM,aAAa,IAAI,gBAAgB;AACvC,UAAM,QAAQ,WAAW,MAAM,WAAW,MAAM,GAAG,KAAK,SAAS;AAEjE,QAAI;AACF,YAAM,WAAW,MAAM,MAAM,KAAK;AAAA,QAChC,GAAG;AAAA,QACH,QAAQ,WAAW;AAAA,MACrB,CAAC;AAED,UAAI,CAAC,SAAS,IAAI;AAChB,cAAM,OAAO,MAAM,SAAS,KAAK,EAAE,MAAM,MAAM,EAAE;AACjD,cAAM,IAAI;AAAA,UACR,SAAS,KAAK,MAAM,IAAI,IAAI,uBAAuB,SAAS,MAAM;AAAA,UAClE,SAAS;AAAA,UACT;AAAA,QACF;AAAA,MACF;AAEA,aAAQ,MAAM,SAAS,KAAK;AAAA,IAC9B,SAAS,OAAO;AACd,UAAI,iBAAiB,YAAY;AAC/B,cAAM;AAAA,MACR;AAEA,UAAI,iBAAiB,gBAAgB,MAAM,SAAS,cAAc;AAChE,cAAM,IAAI;AAAA,UACR,SAAS,KAAK,MAAM,IAAI,IAAI,oBAAoB,KAAK,SAAS;AAAA,UAC9D;AAAA,UACA;AAAA,QACF;AAAA,MACF;AAEA,YAAM,UACJ,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AACvD,YAAM,IAAI;AAAA,QACR,SAAS,KAAK,MAAM,IAAI,IAAI,YAAY,OAAO;AAAA,QAC/C;AAAA,QACA;AAAA,MACF;AAAA,IACF,UAAE;AACA,mBAAa,KAAK;AAAA,IACpB;AAAA,EACF;AACF;AAMA,IAAI,iBAAuC;AAUpC,SAAS,iBACd,SACe;AACf,MAAI,CAAC,gBAAgB;AACnB,qBAAiB,IAAI,cAAc,OAAO;AAAA,EAC5C;AACA,SAAO;AACT;AAEO,SAAS,qBAA2B;AACzC,mBAAiB;AACnB;","names":[]}
|
package/dist/client.d.ts
CHANGED
|
@@ -169,14 +169,77 @@ declare function getSharedLLMClient(options?: LLMClientOptions): LLMClient;
|
|
|
169
169
|
declare function resetSharedLLMClient(provider?: LLMProvider): void;
|
|
170
170
|
declare function getAvailableProvider(): LLMProvider;
|
|
171
171
|
declare function isProviderAvailable(provider: LLMProvider): boolean;
|
|
172
|
+
/**
|
|
173
|
+
* Create an LLM client optimized for requirements analysis.
|
|
174
|
+
*
|
|
175
|
+
* Uses lower temperature (0.3) for more deterministic output.
|
|
176
|
+
* Defaults to GPT-5.1 for OpenAI or DeepSeek Chat.
|
|
177
|
+
*
|
|
178
|
+
* @param {Partial<LLMClientOptions>} [options] - Optional client configuration
|
|
179
|
+
* @returns {LLMClient} Configured LLM client
|
|
180
|
+
*/
|
|
172
181
|
declare function createRequirementsClient(options?: Partial<LLMClientOptions>): LLMClient;
|
|
182
|
+
/**
|
|
183
|
+
* Create an LLM client optimized for creative tasks.
|
|
184
|
+
*
|
|
185
|
+
* Uses higher temperature (0.7) for more varied output.
|
|
186
|
+
* Defaults to GPT-4o or DeepSeek Reasoner.
|
|
187
|
+
*
|
|
188
|
+
* @param {Partial<LLMClientOptions>} [options] - Optional client configuration
|
|
189
|
+
* @returns {LLMClient} Configured LLM client
|
|
190
|
+
*/
|
|
173
191
|
declare function createCreativeClient(options?: Partial<LLMClientOptions>): LLMClient;
|
|
192
|
+
/**
|
|
193
|
+
* Create an LLM client optimized for code fixing.
|
|
194
|
+
*
|
|
195
|
+
* Uses low temperature (0.2) for precise, deterministic fixes.
|
|
196
|
+
* Defaults to GPT-4o Mini or DeepSeek Chat for cost efficiency.
|
|
197
|
+
*
|
|
198
|
+
* @param {Partial<LLMClientOptions>} [options] - Optional client configuration
|
|
199
|
+
* @returns {LLMClient} Configured LLM client
|
|
200
|
+
*/
|
|
174
201
|
declare function createFixClient(options?: Partial<LLMClientOptions>): LLMClient;
|
|
202
|
+
/**
|
|
203
|
+
* Create a DeepSeek LLM client.
|
|
204
|
+
*
|
|
205
|
+
* @param {Partial<Omit<LLMClientOptions, 'provider'>>} [options] - Optional client configuration
|
|
206
|
+
* @returns {LLMClient} Configured DeepSeek client
|
|
207
|
+
*/
|
|
175
208
|
declare function createDeepSeekClient(options?: Partial<Omit<LLMClientOptions, 'provider'>>): LLMClient;
|
|
209
|
+
/**
|
|
210
|
+
* Create an OpenAI LLM client.
|
|
211
|
+
*
|
|
212
|
+
* @param {Partial<Omit<LLMClientOptions, 'provider'>>} [options] - Optional client configuration
|
|
213
|
+
* @returns {LLMClient} Configured OpenAI client
|
|
214
|
+
*/
|
|
176
215
|
declare function createOpenAIClient(options?: Partial<Omit<LLMClientOptions, 'provider'>>): LLMClient;
|
|
216
|
+
/**
|
|
217
|
+
* Create an Anthropic LLM client.
|
|
218
|
+
*
|
|
219
|
+
* @param {Partial<Omit<LLMClientOptions, 'provider'>>} [options] - Optional client configuration
|
|
220
|
+
* @returns {LLMClient} Configured Anthropic client
|
|
221
|
+
*/
|
|
177
222
|
declare function createAnthropicClient(options?: Partial<Omit<LLMClientOptions, 'provider'>>): LLMClient;
|
|
223
|
+
/**
|
|
224
|
+
* Create a Kimi LLM client.
|
|
225
|
+
*
|
|
226
|
+
* @param {Partial<Omit<LLMClientOptions, 'provider'>>} [options] - Optional client configuration
|
|
227
|
+
* @returns {LLMClient} Configured Kimi client
|
|
228
|
+
*/
|
|
178
229
|
declare function createKimiClient(options?: Partial<Omit<LLMClientOptions, 'provider'>>): LLMClient;
|
|
230
|
+
/**
|
|
231
|
+
* Create an OpenRouter LLM client.
|
|
232
|
+
*
|
|
233
|
+
* @param {Partial<Omit<LLMClientOptions, 'provider'>>} [options] - Optional client configuration
|
|
234
|
+
* @returns {LLMClient} Configured OpenRouter client
|
|
235
|
+
*/
|
|
179
236
|
declare function createOpenRouterClient(options?: Partial<Omit<LLMClientOptions, 'provider'>>): LLMClient;
|
|
237
|
+
/**
|
|
238
|
+
* Create a Zhipu (GLM) LLM client via OpenRouter.
|
|
239
|
+
*
|
|
240
|
+
* @param {Partial<Omit<LLMClientOptions, 'provider'>>} [options] - Optional client configuration
|
|
241
|
+
* @returns {LLMClient} Configured Zhipu client
|
|
242
|
+
*/
|
|
180
243
|
declare function createZhipuClient(options?: Partial<Omit<LLMClientOptions, 'provider'>>): LLMClient;
|
|
181
244
|
|
|
182
245
|
export { ANTHROPIC_MODELS, type CacheAwareLLMCallOptions, type CacheableBlock, DEEPSEEK_MODELS, KIMI_MODELS, type LLMCallOptions, LLMClient, type LLMClientOptions, type LLMFinishReason, type LLMProvider, type LLMResponse, type LLMStreamChunk, type LLMStreamOptions, type LLMUsage, OPENAI_MODELS, OPENROUTER_MODELS, type ProviderConfig, createAnthropicClient, createCreativeClient, createDeepSeekClient, createFixClient, createKimiClient, createOpenAIClient, createOpenRouterClient, createRequirementsClient, createZhipuClient, getAvailableProvider, getSharedLLMClient, isProviderAvailable, resetSharedLLMClient };
|
package/dist/client.js
CHANGED
package/dist/index.d.ts
CHANGED
|
@@ -27,6 +27,16 @@ interface TruncationResult {
|
|
|
27
27
|
missingCloseBrackets?: number;
|
|
28
28
|
missingCloseBraces?: number;
|
|
29
29
|
}
|
|
30
|
+
/**
|
|
31
|
+
* Detect if an LLM response has been truncated.
|
|
32
|
+
*
|
|
33
|
+
* Analyzes the response content and finish reason to determine if
|
|
34
|
+
* the output was cut off due to token limits or other issues.
|
|
35
|
+
*
|
|
36
|
+
* @param {string} response - The LLM response text
|
|
37
|
+
* @param {LLMFinishReason} finishReason - The finish reason from the LLM
|
|
38
|
+
* @returns {TruncationResult} Detection result with truncation details
|
|
39
|
+
*/
|
|
30
40
|
declare function detectTruncation(response: string, finishReason: LLMFinishReason): TruncationResult;
|
|
31
41
|
declare function findLastCompleteElement(json: string): unknown | null;
|
|
32
42
|
declare function isLikelyTruncated(content: string): boolean;
|
|
@@ -61,9 +71,41 @@ interface ContinuationResult<T> {
|
|
|
61
71
|
warnings: string[];
|
|
62
72
|
wasSalvaged: boolean;
|
|
63
73
|
}
|
|
74
|
+
/**
|
|
75
|
+
* Merge a previous partial response with a continuation.
|
|
76
|
+
*
|
|
77
|
+
* Handles JSON structure merging, removing markdown code blocks,
|
|
78
|
+
* and ensuring proper comma separation between merged parts.
|
|
79
|
+
*
|
|
80
|
+
* @param {string} previous - The previous partial response
|
|
81
|
+
* @param {string} continuation - The continuation to merge
|
|
82
|
+
* @returns {string} The merged response
|
|
83
|
+
*/
|
|
64
84
|
declare function mergeResponses(previous: string, continuation: string): string;
|
|
65
85
|
declare function salvagePartialResponse<T>(rawResponse: string): T | null;
|
|
86
|
+
/**
|
|
87
|
+
* Call an LLM with automatic continuation handling.
|
|
88
|
+
*
|
|
89
|
+
* Manages token limits by detecting truncation and making continuation
|
|
90
|
+
* calls until the complete response is received or max continuations reached.
|
|
91
|
+
*
|
|
92
|
+
* @template T - Expected response type
|
|
93
|
+
* @param {ContinuationOptions<T>} options - Continuation call options
|
|
94
|
+
* @returns {Promise<ContinuationResult<T>>} Result with data and continuation metadata
|
|
95
|
+
*/
|
|
66
96
|
declare function callWithContinuation<T>(options: ContinuationOptions<T>): Promise<ContinuationResult<T>>;
|
|
97
|
+
/**
|
|
98
|
+
* Build a generic continuation prompt.
|
|
99
|
+
*
|
|
100
|
+
* Creates a prompt that instructs the LLM to continue from where
|
|
101
|
+
* a previous truncated response left off.
|
|
102
|
+
*
|
|
103
|
+
* @param {string} context - Original context/prompt
|
|
104
|
+
* @param {string} partialResponse - The partial response generated so far
|
|
105
|
+
* @param {number} attempt - Current continuation attempt number
|
|
106
|
+
* @param {number} [maxAttempts] - Maximum allowed continuation attempts
|
|
107
|
+
* @returns {string} The formatted continuation prompt
|
|
108
|
+
*/
|
|
67
109
|
declare function buildGenericContinuationPrompt(context: string, partialResponse: string, attempt: number, maxAttempts?: number): string;
|
|
68
110
|
|
|
69
111
|
/**
|
package/dist/index.js
CHANGED
|
@@ -18,7 +18,7 @@ import {
|
|
|
18
18
|
getSharedLLMClient,
|
|
19
19
|
isProviderAvailable,
|
|
20
20
|
resetSharedLLMClient
|
|
21
|
-
} from "./chunk-
|
|
21
|
+
} from "./chunk-FVMB5NND.js";
|
|
22
22
|
import {
|
|
23
23
|
autoCloseJson,
|
|
24
24
|
extractJsonFromText,
|
|
@@ -32,7 +32,7 @@ import {
|
|
|
32
32
|
getStructuredOutputClient,
|
|
33
33
|
isStructuredOutputAvailable,
|
|
34
34
|
resetStructuredOutputClient
|
|
35
|
-
} from "./chunk-
|
|
35
|
+
} from "./chunk-3OVQNNPN.js";
|
|
36
36
|
import {
|
|
37
37
|
RateLimiter,
|
|
38
38
|
TokenTracker,
|
|
@@ -46,7 +46,7 @@ import {
|
|
|
46
46
|
MasarProvider,
|
|
47
47
|
getMasarProvider,
|
|
48
48
|
resetMasarProvider
|
|
49
|
-
} from "./chunk-
|
|
49
|
+
} from "./chunk-QHJ3T46X.js";
|
|
50
50
|
|
|
51
51
|
// src/truncation-detector.ts
|
|
52
52
|
function detectTruncation(response, finishReason) {
|
package/dist/index.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/truncation-detector.ts","../src/continuation.ts"],"sourcesContent":["/**\n * Truncation Detector\n *\n * Utilities for detecting when LLM output has been truncated and\n * extracting usable content from partial responses.\n *\n * @packageDocumentation\n */\n\nimport type { LLMFinishReason } from './client.js';\nimport { autoCloseJson } from './json-parser.js';\n\n// ============================================================================\n// Types\n// ============================================================================\n\nexport type TruncationReason =\n | 'finish_reason'\n | 'json_incomplete'\n | 'bracket_mismatch'\n | 'none';\n\nexport interface TruncationResult {\n isTruncated: boolean;\n reason: TruncationReason;\n partialContent?: string;\n lastCompleteElement?: unknown;\n missingCloseBrackets?: number;\n missingCloseBraces?: number;\n}\n\n// ============================================================================\n// Main Detection Function\n// ============================================================================\n\nexport function detectTruncation(\n response: string,\n finishReason: LLMFinishReason,\n): TruncationResult {\n if (finishReason === 'length') {\n const bracketInfo = countBrackets(response);\n return {\n isTruncated: true,\n reason: 'finish_reason',\n partialContent: response,\n lastCompleteElement: findLastCompleteElement(response),\n missingCloseBrackets: bracketInfo.missingCloseBrackets,\n missingCloseBraces: bracketInfo.missingCloseBraces,\n };\n }\n\n try {\n JSON.parse(response);\n return { isTruncated: false, reason: 'none' };\n } catch {\n // JSON is invalid, check if due to truncation\n }\n\n if (finishReason === 'stop' || finishReason === null) {\n const trimmed = response.trim();\n\n const isMidContent =\n trimmed.endsWith(',') ||\n trimmed.endsWith(':') ||\n trimmed.endsWith('\": ') ||\n /:\\s*$/.test(trimmed) ||\n /,\\s*$/.test(trimmed);\n\n if (isMidContent) {\n const bracketInfo = countBrackets(response);\n return {\n isTruncated: true,\n reason: 'json_incomplete',\n partialContent: response,\n lastCompleteElement: findLastCompleteElement(response),\n missingCloseBrackets: bracketInfo.missingCloseBrackets,\n missingCloseBraces: bracketInfo.missingCloseBraces,\n };\n }\n\n try {\n const closed = autoCloseJson(trimmed);\n JSON.parse(closed);\n return { isTruncated: false, reason: 'none' };\n } catch {\n return { isTruncated: false, reason: 'none' };\n }\n }\n\n const bracketInfo = countBrackets(response);\n if (\n bracketInfo.missingCloseBrackets > 0 ||\n bracketInfo.missingCloseBraces > 0\n ) {\n return {\n isTruncated: true,\n reason: 'bracket_mismatch',\n partialContent: response,\n lastCompleteElement: findLastCompleteElement(response),\n missingCloseBrackets: bracketInfo.missingCloseBrackets,\n missingCloseBraces: bracketInfo.missingCloseBraces,\n };\n }\n\n return { isTruncated: false, reason: 'none' };\n}\n\n// ============================================================================\n// Helper Functions\n// ============================================================================\n\nfunction countBrackets(json: string): {\n openBrackets: number;\n closeBrackets: number;\n openBraces: number;\n closeBraces: number;\n missingCloseBrackets: number;\n missingCloseBraces: number;\n} {\n let inString = false;\n let escaped = false;\n let openBrackets = 0;\n let closeBrackets = 0;\n let openBraces = 0;\n let closeBraces = 0;\n\n for (const char of json) {\n if (escaped) {\n escaped = false;\n continue;\n }\n if (char === '\\\\' && inString) {\n escaped = true;\n continue;\n }\n if (char === '\"') {\n inString = !inString;\n continue;\n }\n if (inString) continue;\n\n switch (char) {\n case '[':\n openBrackets++;\n break;\n case ']':\n closeBrackets++;\n break;\n case '{':\n openBraces++;\n break;\n case '}':\n closeBraces++;\n break;\n }\n }\n\n return {\n openBrackets,\n closeBrackets,\n openBraces,\n closeBraces,\n missingCloseBrackets: Math.max(0, openBrackets - closeBrackets),\n missingCloseBraces: Math.max(0, openBraces - closeBraces),\n };\n}\n\nexport function findLastCompleteElement(json: string): unknown | null {\n const autoClosed = autoCloseJson(json);\n try {\n return JSON.parse(autoClosed);\n } catch {\n // Auto-close didn't work\n }\n\n const trimmed = json.trim();\n\n if (trimmed.startsWith('[')) {\n const lastCompleteIndex = findLastCompleteArrayElement(trimmed);\n if (lastCompleteIndex > 0) {\n const subset = trimmed.substring(0, lastCompleteIndex) + ']';\n try {\n return JSON.parse(subset);\n } catch {\n // Continue\n }\n }\n }\n\n if (trimmed.startsWith('{')) {\n const closed = autoCloseJson(trimmed);\n try {\n return JSON.parse(closed);\n } catch {\n const lastCompleteIndex = findLastCompleteObjectProperty(trimmed);\n if (lastCompleteIndex > 0) {\n const subset = trimmed.substring(0, lastCompleteIndex) + '}';\n try {\n return JSON.parse(subset);\n } catch {\n // Give up\n }\n }\n }\n }\n\n return null;\n}\n\nfunction findLastCompleteArrayElement(json: string): number {\n let depth = 0;\n let inString = false;\n let escaped = false;\n let lastCompleteElementEnd = -1;\n\n for (let i = 0; i < json.length; i++) {\n const char = json[i];\n\n if (escaped) {\n escaped = false;\n continue;\n }\n if (char === '\\\\' && inString) {\n escaped = true;\n continue;\n }\n if (char === '\"') {\n inString = !inString;\n continue;\n }\n if (inString) continue;\n\n if (char === '[' || char === '{') {\n depth++;\n } else if (char === ']' || char === '}') {\n depth--;\n if (depth === 1) {\n lastCompleteElementEnd = i + 1;\n }\n } else if (char === ',' && depth === 1) {\n lastCompleteElementEnd = i;\n }\n }\n\n return lastCompleteElementEnd > 0 ? lastCompleteElementEnd : -1;\n}\n\nfunction findLastCompleteObjectProperty(json: string): number {\n let depth = 0;\n let inString = false;\n let escaped = false;\n let lastCommaIndex = -1;\n\n for (let i = 0; i < json.length; i++) {\n const char = json[i];\n\n if (escaped) {\n escaped = false;\n continue;\n }\n if (char === '\\\\' && inString) {\n escaped = true;\n continue;\n }\n if (char === '\"') {\n inString = !inString;\n continue;\n }\n if (inString) continue;\n\n if (char === '[' || char === '{') {\n depth++;\n } else if (char === ']' || char === '}') {\n depth--;\n } else if (char === ',' && depth === 1) {\n lastCommaIndex = i;\n }\n }\n\n return lastCommaIndex > 0 ? lastCommaIndex : -1;\n}\n\nexport function isLikelyTruncated(content: string): boolean {\n const trimmed = content.trim();\n if (!trimmed) return false;\n\n const brackets = countBrackets(trimmed);\n if (\n brackets.missingCloseBrackets > 0 ||\n brackets.missingCloseBraces > 0\n ) {\n return true;\n }\n\n const abruptEndings = [\n /,\\s*$/,\n /:\\s*$/,\n /\"\\s*:\\s*$/,\n /\\[\\s*$/,\n /{\\s*$/,\n ];\n\n for (const pattern of abruptEndings) {\n if (pattern.test(trimmed)) return true;\n }\n\n return false;\n}\n","/**\n * LLM Continuation Utility\n *\n * Handles truncated LLM responses with automatic continuation.\n * - Detects truncation via finish_reason and JSON structure\n * - Automatically continues with full context\n * - Merges partial and continuation responses\n * - Salvages partial data if max continuations reached\n *\n * @packageDocumentation\n */\n\nimport { z } from 'zod';\nimport { LLMClient, type LLMFinishReason } from './client.js';\nimport { detectTruncation } from './truncation-detector.js';\nimport { extractJsonFromText, autoCloseJson, isValidJson } from './json-parser.js';\n\n// ============================================================================\n// Types\n// ============================================================================\n\nexport interface ContinuationOptions<T> {\n client: LLMClient;\n systemPrompt: string;\n userPrompt: string;\n schema?: z.ZodSchema<T>;\n maxTokens?: number;\n maxContinuations?: number;\n maxRetries?: number;\n buildContinuationPrompt: (\n partialResponse: string,\n attempt: number,\n ) => string;\n continuationSystemPrompt?: string;\n}\n\nexport interface ContinuationResult<T> {\n data: T;\n raw: string;\n continuationCount: number;\n warnings: string[];\n wasSalvaged: boolean;\n}\n\n// ============================================================================\n// Constants\n// ============================================================================\n\nconst DEFAULT_MAX_TOKENS = 8192;\nconst DEFAULT_MAX_CONTINUATIONS = 3;\n\n/**\n * Default continuation system prompt.\n * Used when no custom continuationSystemPrompt is provided.\n */\nconst DEFAULT_CONTINUATION_SYSTEM_PROMPT = `You are a JSON continuation assistant. Your ONLY job is to continue generating JSON from where the previous response was truncated.\n\nRules:\n1. Continue from EXACTLY where the previous output stopped\n2. Do NOT repeat any content already generated\n3. Complete the JSON structure properly with all closing brackets\n4. Do NOT wrap in markdown code blocks\n5. Output ONLY the continuation JSON, nothing else`;\n\n// ============================================================================\n// Helper Functions\n// ============================================================================\n\nexport function mergeResponses(\n previous: string,\n continuation: string,\n): string {\n const trimmedPrev = previous.trimEnd();\n const trimmedCont = continuation.trimStart();\n\n let cleanedCont = trimmedCont\n .replace(/^```json?\\s*/i, '')\n .replace(/```\\s*$/i, '')\n .trim();\n\n if (cleanedCont.startsWith('{')) {\n try {\n const contParsed = JSON.parse(autoCloseJson(cleanedCont));\n const keys = Object.keys(contParsed);\n if (keys.length === 1 && Array.isArray(contParsed[keys[0]])) {\n cleanedCont = contParsed[keys[0]]\n .map((item: unknown) => JSON.stringify(item))\n .join(',\\n');\n }\n } catch {\n // Continue with original cleaning\n }\n }\n\n if (cleanedCont.startsWith('}') || cleanedCont.startsWith(']')) {\n return trimmedPrev + cleanedCont;\n }\n\n const prevEndsWithValue = /[\\}\\]\\\"\\d]$/.test(trimmedPrev);\n const contStartsWithValue = /^[\\{\\[\\\"]/.test(cleanedCont);\n\n if (prevEndsWithValue && contStartsWithValue) {\n return trimmedPrev + ',\\n' + cleanedCont;\n }\n\n return trimmedPrev + cleanedCont;\n}\n\nexport function salvagePartialResponse<T>(rawResponse: string): T | null {\n console.warn('[Continuation] Attempting to salvage partial response');\n\n try {\n const cleanedResponse = extractJsonFromText(rawResponse) || rawResponse;\n const closed = autoCloseJson(cleanedResponse);\n const parsed = JSON.parse(closed) as T;\n console.log('[Continuation] Successfully salvaged partial response');\n return parsed;\n } catch (error) {\n console.error('[Continuation] Could not salvage response:', error);\n }\n\n return null;\n}\n\n// ============================================================================\n// Main Function\n// ============================================================================\n\nexport async function callWithContinuation<T>(\n options: ContinuationOptions<T>,\n): Promise<ContinuationResult<T>> {\n const {\n client,\n systemPrompt,\n userPrompt,\n schema,\n maxTokens = DEFAULT_MAX_TOKENS,\n maxContinuations = DEFAULT_MAX_CONTINUATIONS,\n buildContinuationPrompt,\n continuationSystemPrompt = DEFAULT_CONTINUATION_SYSTEM_PROMPT,\n } = options;\n\n let rawResponse = '';\n let continuationCount = 0;\n const warnings: string[] = [];\n let wasSalvaged = false;\n\n console.log('[Continuation] Starting LLM call with continuation support');\n console.log(\n `[Continuation] Max tokens: ${maxTokens}, Max continuations: ${maxContinuations}`,\n );\n\n try {\n const response = await client.callRawWithMetadata({\n systemPrompt,\n userPrompt,\n maxTokens,\n });\n\n rawResponse = extractJsonFromText(response.raw) || response.raw;\n\n console.log(\n `[Continuation] Initial response: ${rawResponse.length} chars, finish_reason: ${response.finishReason}`,\n );\n\n let truncation = detectTruncation(rawResponse, response.finishReason);\n\n while (truncation.isTruncated && continuationCount < maxContinuations) {\n continuationCount++;\n const warningMsg = `Response truncated (${truncation.reason}), continuing (attempt ${continuationCount}/${maxContinuations})`;\n console.log(`[Continuation] ${warningMsg}`);\n warnings.push(warningMsg);\n\n const contPrompt = buildContinuationPrompt(\n rawResponse,\n continuationCount,\n );\n\n const contResponse = await client.callRawWithMetadata({\n systemPrompt: continuationSystemPrompt,\n userPrompt: contPrompt,\n maxTokens,\n });\n\n console.log(\n `[Continuation] Continuation response: ${contResponse.raw.length} chars, finish_reason: ${contResponse.finishReason}`,\n );\n\n const cleanedContResponse =\n extractJsonFromText(contResponse.raw) || contResponse.raw;\n rawResponse = mergeResponses(rawResponse, cleanedContResponse);\n\n truncation = detectTruncation(rawResponse, contResponse.finishReason);\n }\n\n if (\n continuationCount >= maxContinuations &&\n truncation.isTruncated\n ) {\n console.warn(\n `[Continuation] Reached max continuations (${maxContinuations}), attempting to salvage...`,\n );\n warnings.push(\n `Reached max continuations - some content may be incomplete`,\n );\n wasSalvaged = true;\n }\n\n const cleanedResponse =\n extractJsonFromText(rawResponse) || rawResponse;\n let data: T;\n\n try {\n if (isValidJson(cleanedResponse)) {\n data = JSON.parse(cleanedResponse) as T;\n } else {\n const closed = autoCloseJson(cleanedResponse);\n data = JSON.parse(closed) as T;\n if (!wasSalvaged) {\n warnings.push('Response required auto-closing of JSON brackets');\n }\n }\n } catch (parseError) {\n const salvaged = salvagePartialResponse<T>(cleanedResponse);\n if (salvaged) {\n data = salvaged;\n wasSalvaged = true;\n warnings.push('Response was salvaged from partial data');\n } else {\n throw new Error(\n `Failed to parse response after ${continuationCount} continuations: ${parseError}`,\n );\n }\n }\n\n if (schema) {\n try {\n data = schema.parse(data);\n } catch (validationError) {\n console.warn(\n '[Continuation] Schema validation failed:',\n validationError,\n );\n warnings.push(`Schema validation issue: ${validationError}`);\n }\n }\n\n console.log(\n `[Continuation] Complete. Continuations: ${continuationCount}, Warnings: ${warnings.length}`,\n );\n\n return {\n data,\n raw: rawResponse,\n continuationCount,\n warnings,\n wasSalvaged,\n };\n } catch (error) {\n console.error('[Continuation] Error during LLM call:', error);\n throw error;\n }\n}\n\nexport function buildGenericContinuationPrompt(\n context: string,\n partialResponse: string,\n attempt: number,\n maxAttempts: number = DEFAULT_MAX_CONTINUATIONS,\n): string {\n return `## CONTINUATION REQUEST (Attempt ${attempt}/${maxAttempts})\n\nYour previous response was truncated. Continue generating from where you left off.\n\n### ORIGINAL CONTEXT\n${context}\n\n### WHAT YOU GENERATED SO FAR\n\\`\\`\\`json\n${partialResponse}\n\\`\\`\\`\n\n### INSTRUCTIONS\n1. Continue from EXACTLY where the response was cut off\n2. Do NOT repeat any content already generated\n3. Complete the JSON structure properly\n4. Do NOT wrap your response in markdown code blocks\n\nContinue generating now:`;\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAmCO,SAAS,iBACd,UACA,cACkB;AAClB,MAAI,iBAAiB,UAAU;AAC7B,UAAMA,eAAc,cAAc,QAAQ;AAC1C,WAAO;AAAA,MACL,aAAa;AAAA,MACb,QAAQ;AAAA,MACR,gBAAgB;AAAA,MAChB,qBAAqB,wBAAwB,QAAQ;AAAA,MACrD,sBAAsBA,aAAY;AAAA,MAClC,oBAAoBA,aAAY;AAAA,IAClC;AAAA,EACF;AAEA,MAAI;AACF,SAAK,MAAM,QAAQ;AACnB,WAAO,EAAE,aAAa,OAAO,QAAQ,OAAO;AAAA,EAC9C,QAAQ;AAAA,EAER;AAEA,MAAI,iBAAiB,UAAU,iBAAiB,MAAM;AACpD,UAAM,UAAU,SAAS,KAAK;AAE9B,UAAM,eACJ,QAAQ,SAAS,GAAG,KACpB,QAAQ,SAAS,GAAG,KACpB,QAAQ,SAAS,KAAK,KACtB,QAAQ,KAAK,OAAO,KACpB,QAAQ,KAAK,OAAO;AAEtB,QAAI,cAAc;AAChB,YAAMA,eAAc,cAAc,QAAQ;AAC1C,aAAO;AAAA,QACL,aAAa;AAAA,QACb,QAAQ;AAAA,QACR,gBAAgB;AAAA,QAChB,qBAAqB,wBAAwB,QAAQ;AAAA,QACrD,sBAAsBA,aAAY;AAAA,QAClC,oBAAoBA,aAAY;AAAA,MAClC;AAAA,IACF;AAEA,QAAI;AACF,YAAM,SAAS,cAAc,OAAO;AACpC,WAAK,MAAM,MAAM;AACjB,aAAO,EAAE,aAAa,OAAO,QAAQ,OAAO;AAAA,IAC9C,QAAQ;AACN,aAAO,EAAE,aAAa,OAAO,QAAQ,OAAO;AAAA,IAC9C;AAAA,EACF;AAEA,QAAM,cAAc,cAAc,QAAQ;AAC1C,MACE,YAAY,uBAAuB,KACnC,YAAY,qBAAqB,GACjC;AACA,WAAO;AAAA,MACL,aAAa;AAAA,MACb,QAAQ;AAAA,MACR,gBAAgB;AAAA,MAChB,qBAAqB,wBAAwB,QAAQ;AAAA,MACrD,sBAAsB,YAAY;AAAA,MAClC,oBAAoB,YAAY;AAAA,IAClC;AAAA,EACF;AAEA,SAAO,EAAE,aAAa,OAAO,QAAQ,OAAO;AAC9C;AAMA,SAAS,cAAc,MAOrB;AACA,MAAI,WAAW;AACf,MAAI,UAAU;AACd,MAAI,eAAe;AACnB,MAAI,gBAAgB;AACpB,MAAI,aAAa;AACjB,MAAI,cAAc;AAElB,aAAW,QAAQ,MAAM;AACvB,QAAI,SAAS;AACX,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,QAAQ,UAAU;AAC7B,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,KAAK;AAChB,iBAAW,CAAC;AACZ;AAAA,IACF;AACA,QAAI,SAAU;AAEd,YAAQ,MAAM;AAAA,MACZ,KAAK;AACH;AACA;AAAA,MACF,KAAK;AACH;AACA;AAAA,MACF,KAAK;AACH;AACA;AAAA,MACF,KAAK;AACH;AACA;AAAA,IACJ;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,sBAAsB,KAAK,IAAI,GAAG,eAAe,aAAa;AAAA,IAC9D,oBAAoB,KAAK,IAAI,GAAG,aAAa,WAAW;AAAA,EAC1D;AACF;AAEO,SAAS,wBAAwB,MAA8B;AACpE,QAAM,aAAa,cAAc,IAAI;AACrC,MAAI;AACF,WAAO,KAAK,MAAM,UAAU;AAAA,EAC9B,QAAQ;AAAA,EAER;AAEA,QAAM,UAAU,KAAK,KAAK;AAE1B,MAAI,QAAQ,WAAW,GAAG,GAAG;AAC3B,UAAM,oBAAoB,6BAA6B,OAAO;AAC9D,QAAI,oBAAoB,GAAG;AACzB,YAAM,SAAS,QAAQ,UAAU,GAAG,iBAAiB,IAAI;AACzD,UAAI;AACF,eAAO,KAAK,MAAM,MAAM;AAAA,MAC1B,QAAQ;AAAA,MAER;AAAA,IACF;AAAA,EACF;AAEA,MAAI,QAAQ,WAAW,GAAG,GAAG;AAC3B,UAAM,SAAS,cAAc,OAAO;AACpC,QAAI;AACF,aAAO,KAAK,MAAM,MAAM;AAAA,IAC1B,QAAQ;AACN,YAAM,oBAAoB,+BAA+B,OAAO;AAChE,UAAI,oBAAoB,GAAG;AACzB,cAAM,SAAS,QAAQ,UAAU,GAAG,iBAAiB,IAAI;AACzD,YAAI;AACF,iBAAO,KAAK,MAAM,MAAM;AAAA,QAC1B,QAAQ;AAAA,QAER;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AAEA,SAAS,6BAA6B,MAAsB;AAC1D,MAAI,QAAQ;AACZ,MAAI,WAAW;AACf,MAAI,UAAU;AACd,MAAI,yBAAyB;AAE7B,WAAS,IAAI,GAAG,IAAI,KAAK,QAAQ,KAAK;AACpC,UAAM,OAAO,KAAK,CAAC;AAEnB,QAAI,SAAS;AACX,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,QAAQ,UAAU;AAC7B,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,KAAK;AAChB,iBAAW,CAAC;AACZ;AAAA,IACF;AACA,QAAI,SAAU;AAEd,QAAI,SAAS,OAAO,SAAS,KAAK;AAChC;AAAA,IACF,WAAW,SAAS,OAAO,SAAS,KAAK;AACvC;AACA,UAAI,UAAU,GAAG;AACf,iCAAyB,IAAI;AAAA,MAC/B;AAAA,IACF,WAAW,SAAS,OAAO,UAAU,GAAG;AACtC,+BAAyB;AAAA,IAC3B;AAAA,EACF;AAEA,SAAO,yBAAyB,IAAI,yBAAyB;AAC/D;AAEA,SAAS,+BAA+B,MAAsB;AAC5D,MAAI,QAAQ;AACZ,MAAI,WAAW;AACf,MAAI,UAAU;AACd,MAAI,iBAAiB;AAErB,WAAS,IAAI,GAAG,IAAI,KAAK,QAAQ,KAAK;AACpC,UAAM,OAAO,KAAK,CAAC;AAEnB,QAAI,SAAS;AACX,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,QAAQ,UAAU;AAC7B,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,KAAK;AAChB,iBAAW,CAAC;AACZ;AAAA,IACF;AACA,QAAI,SAAU;AAEd,QAAI,SAAS,OAAO,SAAS,KAAK;AAChC;AAAA,IACF,WAAW,SAAS,OAAO,SAAS,KAAK;AACvC;AAAA,IACF,WAAW,SAAS,OAAO,UAAU,GAAG;AACtC,uBAAiB;AAAA,IACnB;AAAA,EACF;AAEA,SAAO,iBAAiB,IAAI,iBAAiB;AAC/C;AAEO,SAAS,kBAAkB,SAA0B;AAC1D,QAAM,UAAU,QAAQ,KAAK;AAC7B,MAAI,CAAC,QAAS,QAAO;AAErB,QAAM,WAAW,cAAc,OAAO;AACtC,MACE,SAAS,uBAAuB,KAChC,SAAS,qBAAqB,GAC9B;AACA,WAAO;AAAA,EACT;AAEA,QAAM,gBAAgB;AAAA,IACpB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAEA,aAAW,WAAW,eAAe;AACnC,QAAI,QAAQ,KAAK,OAAO,EAAG,QAAO;AAAA,EACpC;AAEA,SAAO;AACT;;;ACnQA,IAAM,qBAAqB;AAC3B,IAAM,4BAA4B;AAMlC,IAAM,qCAAqC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAapC,SAAS,eACd,UACA,cACQ;AACR,QAAM,cAAc,SAAS,QAAQ;AACrC,QAAM,cAAc,aAAa,UAAU;AAE3C,MAAI,cAAc,YACf,QAAQ,iBAAiB,EAAE,EAC3B,QAAQ,YAAY,EAAE,EACtB,KAAK;AAER,MAAI,YAAY,WAAW,GAAG,GAAG;AAC/B,QAAI;AACF,YAAM,aAAa,KAAK,MAAM,cAAc,WAAW,CAAC;AACxD,YAAM,OAAO,OAAO,KAAK,UAAU;AACnC,UAAI,KAAK,WAAW,KAAK,MAAM,QAAQ,WAAW,KAAK,CAAC,CAAC,CAAC,GAAG;AAC3D,sBAAc,WAAW,KAAK,CAAC,CAAC,EAC7B,IAAI,CAAC,SAAkB,KAAK,UAAU,IAAI,CAAC,EAC3C,KAAK,KAAK;AAAA,MACf;AAAA,IACF,QAAQ;AAAA,IAER;AAAA,EACF;AAEA,MAAI,YAAY,WAAW,GAAG,KAAK,YAAY,WAAW,GAAG,GAAG;AAC9D,WAAO,cAAc;AAAA,EACvB;AAEA,QAAM,oBAAoB,cAAc,KAAK,WAAW;AACxD,QAAM,sBAAsB,YAAY,KAAK,WAAW;AAExD,MAAI,qBAAqB,qBAAqB;AAC5C,WAAO,cAAc,QAAQ;AAAA,EAC/B;AAEA,SAAO,cAAc;AACvB;AAEO,SAAS,uBAA0B,aAA+B;AACvE,UAAQ,KAAK,uDAAuD;AAEpE,MAAI;AACF,UAAM,kBAAkB,oBAAoB,WAAW,KAAK;AAC5D,UAAM,SAAS,cAAc,eAAe;AAC5C,UAAM,SAAS,KAAK,MAAM,MAAM;AAChC,YAAQ,IAAI,uDAAuD;AACnE,WAAO;AAAA,EACT,SAAS,OAAO;AACd,YAAQ,MAAM,8CAA8C,KAAK;AAAA,EACnE;AAEA,SAAO;AACT;AAMA,eAAsB,qBACpB,SACgC;AAChC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,YAAY;AAAA,IACZ,mBAAmB;AAAA,IACnB;AAAA,IACA,2BAA2B;AAAA,EAC7B,IAAI;AAEJ,MAAI,cAAc;AAClB,MAAI,oBAAoB;AACxB,QAAM,WAAqB,CAAC;AAC5B,MAAI,cAAc;AAElB,UAAQ,IAAI,4DAA4D;AACxE,UAAQ;AAAA,IACN,8BAA8B,SAAS,wBAAwB,gBAAgB;AAAA,EACjF;AAEA,MAAI;AACF,UAAM,WAAW,MAAM,OAAO,oBAAoB;AAAA,MAChD;AAAA,MACA;AAAA,MACA;AAAA,IACF,CAAC;AAED,kBAAc,oBAAoB,SAAS,GAAG,KAAK,SAAS;AAE5D,YAAQ;AAAA,MACN,oCAAoC,YAAY,MAAM,0BAA0B,SAAS,YAAY;AAAA,IACvG;AAEA,QAAI,aAAa,iBAAiB,aAAa,SAAS,YAAY;AAEpE,WAAO,WAAW,eAAe,oBAAoB,kBAAkB;AACrE;AACA,YAAM,aAAa,uBAAuB,WAAW,MAAM,0BAA0B,iBAAiB,IAAI,gBAAgB;AAC1H,cAAQ,IAAI,kBAAkB,UAAU,EAAE;AAC1C,eAAS,KAAK,UAAU;AAExB,YAAM,aAAa;AAAA,QACjB;AAAA,QACA;AAAA,MACF;AAEA,YAAM,eAAe,MAAM,OAAO,oBAAoB;AAAA,QACpD,cAAc;AAAA,QACd,YAAY;AAAA,QACZ;AAAA,MACF,CAAC;AAED,cAAQ;AAAA,QACN,yCAAyC,aAAa,IAAI,MAAM,0BAA0B,aAAa,YAAY;AAAA,MACrH;AAEA,YAAM,sBACJ,oBAAoB,aAAa,GAAG,KAAK,aAAa;AACxD,oBAAc,eAAe,aAAa,mBAAmB;AAE7D,mBAAa,iBAAiB,aAAa,aAAa,YAAY;AAAA,IACtE;AAEA,QACE,qBAAqB,oBACrB,WAAW,aACX;AACA,cAAQ;AAAA,QACN,6CAA6C,gBAAgB;AAAA,MAC/D;AACA,eAAS;AAAA,QACP;AAAA,MACF;AACA,oBAAc;AAAA,IAChB;AAEA,UAAM,kBACJ,oBAAoB,WAAW,KAAK;AACtC,QAAI;AAEJ,QAAI;AACF,UAAI,YAAY,eAAe,GAAG;AAChC,eAAO,KAAK,MAAM,eAAe;AAAA,MACnC,OAAO;AACL,cAAM,SAAS,cAAc,eAAe;AAC5C,eAAO,KAAK,MAAM,MAAM;AACxB,YAAI,CAAC,aAAa;AAChB,mBAAS,KAAK,iDAAiD;AAAA,QACjE;AAAA,MACF;AAAA,IACF,SAAS,YAAY;AACnB,YAAM,WAAW,uBAA0B,eAAe;AAC1D,UAAI,UAAU;AACZ,eAAO;AACP,sBAAc;AACd,iBAAS,KAAK,yCAAyC;AAAA,MACzD,OAAO;AACL,cAAM,IAAI;AAAA,UACR,kCAAkC,iBAAiB,mBAAmB,UAAU;AAAA,QAClF;AAAA,MACF;AAAA,IACF;AAEA,QAAI,QAAQ;AACV,UAAI;AACF,eAAO,OAAO,MAAM,IAAI;AAAA,MAC1B,SAAS,iBAAiB;AACxB,gBAAQ;AAAA,UACN;AAAA,UACA;AAAA,QACF;AACA,iBAAS,KAAK,4BAA4B,eAAe,EAAE;AAAA,MAC7D;AAAA,IACF;AAEA,YAAQ;AAAA,MACN,2CAA2C,iBAAiB,eAAe,SAAS,MAAM;AAAA,IAC5F;AAEA,WAAO;AAAA,MACL;AAAA,MACA,KAAK;AAAA,MACL;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,EACF,SAAS,OAAO;AACd,YAAQ,MAAM,yCAAyC,KAAK;AAC5D,UAAM;AAAA,EACR;AACF;AAEO,SAAS,+BACd,SACA,iBACA,SACA,cAAsB,2BACd;AACR,SAAO,oCAAoC,OAAO,IAAI,WAAW;AAAA;AAAA;AAAA;AAAA;AAAA,EAKjE,OAAO;AAAA;AAAA;AAAA;AAAA,EAIP,eAAe;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAUjB;","names":["bracketInfo"]}
|
|
1
|
+
{"version":3,"sources":["../src/truncation-detector.ts","../src/continuation.ts"],"sourcesContent":["/**\n * Truncation Detector\n *\n * Utilities for detecting when LLM output has been truncated and\n * extracting usable content from partial responses.\n *\n * @packageDocumentation\n */\n\nimport type { LLMFinishReason } from './client.js';\nimport { autoCloseJson } from './json-parser.js';\n\n// ============================================================================\n// Types\n// ============================================================================\n\nexport type TruncationReason =\n | 'finish_reason'\n | 'json_incomplete'\n | 'bracket_mismatch'\n | 'none';\n\nexport interface TruncationResult {\n isTruncated: boolean;\n reason: TruncationReason;\n partialContent?: string;\n lastCompleteElement?: unknown;\n missingCloseBrackets?: number;\n missingCloseBraces?: number;\n}\n\n// ============================================================================\n// Main Detection Function\n// ============================================================================\n\n/**\n * Detect if an LLM response has been truncated.\n *\n * Analyzes the response content and finish reason to determine if\n * the output was cut off due to token limits or other issues.\n *\n * @param {string} response - The LLM response text\n * @param {LLMFinishReason} finishReason - The finish reason from the LLM\n * @returns {TruncationResult} Detection result with truncation details\n */\nexport function detectTruncation(\n response: string,\n finishReason: LLMFinishReason,\n): TruncationResult {\n if (finishReason === 'length') {\n const bracketInfo = countBrackets(response);\n return {\n isTruncated: true,\n reason: 'finish_reason',\n partialContent: response,\n lastCompleteElement: findLastCompleteElement(response),\n missingCloseBrackets: bracketInfo.missingCloseBrackets,\n missingCloseBraces: bracketInfo.missingCloseBraces,\n };\n }\n\n try {\n JSON.parse(response);\n return { isTruncated: false, reason: 'none' };\n } catch {\n // JSON is invalid, check if due to truncation\n }\n\n if (finishReason === 'stop' || finishReason === null) {\n const trimmed = response.trim();\n\n const isMidContent =\n trimmed.endsWith(',') ||\n trimmed.endsWith(':') ||\n trimmed.endsWith('\": ') ||\n /:\\s*$/.test(trimmed) ||\n /,\\s*$/.test(trimmed);\n\n if (isMidContent) {\n const bracketInfo = countBrackets(response);\n return {\n isTruncated: true,\n reason: 'json_incomplete',\n partialContent: response,\n lastCompleteElement: findLastCompleteElement(response),\n missingCloseBrackets: bracketInfo.missingCloseBrackets,\n missingCloseBraces: bracketInfo.missingCloseBraces,\n };\n }\n\n try {\n const closed = autoCloseJson(trimmed);\n JSON.parse(closed);\n return { isTruncated: false, reason: 'none' };\n } catch {\n return { isTruncated: false, reason: 'none' };\n }\n }\n\n const bracketInfo = countBrackets(response);\n if (\n bracketInfo.missingCloseBrackets > 0 ||\n bracketInfo.missingCloseBraces > 0\n ) {\n return {\n isTruncated: true,\n reason: 'bracket_mismatch',\n partialContent: response,\n lastCompleteElement: findLastCompleteElement(response),\n missingCloseBrackets: bracketInfo.missingCloseBrackets,\n missingCloseBraces: bracketInfo.missingCloseBraces,\n };\n }\n\n return { isTruncated: false, reason: 'none' };\n}\n\n// ============================================================================\n// Helper Functions\n// ============================================================================\n\nfunction countBrackets(json: string): {\n openBrackets: number;\n closeBrackets: number;\n openBraces: number;\n closeBraces: number;\n missingCloseBrackets: number;\n missingCloseBraces: number;\n} {\n let inString = false;\n let escaped = false;\n let openBrackets = 0;\n let closeBrackets = 0;\n let openBraces = 0;\n let closeBraces = 0;\n\n for (const char of json) {\n if (escaped) {\n escaped = false;\n continue;\n }\n if (char === '\\\\' && inString) {\n escaped = true;\n continue;\n }\n if (char === '\"') {\n inString = !inString;\n continue;\n }\n if (inString) continue;\n\n switch (char) {\n case '[':\n openBrackets++;\n break;\n case ']':\n closeBrackets++;\n break;\n case '{':\n openBraces++;\n break;\n case '}':\n closeBraces++;\n break;\n }\n }\n\n return {\n openBrackets,\n closeBrackets,\n openBraces,\n closeBraces,\n missingCloseBrackets: Math.max(0, openBrackets - closeBrackets),\n missingCloseBraces: Math.max(0, openBraces - closeBraces),\n };\n}\n\nexport function findLastCompleteElement(json: string): unknown | null {\n const autoClosed = autoCloseJson(json);\n try {\n return JSON.parse(autoClosed);\n } catch {\n // Auto-close didn't work\n }\n\n const trimmed = json.trim();\n\n if (trimmed.startsWith('[')) {\n const lastCompleteIndex = findLastCompleteArrayElement(trimmed);\n if (lastCompleteIndex > 0) {\n const subset = trimmed.substring(0, lastCompleteIndex) + ']';\n try {\n return JSON.parse(subset);\n } catch {\n // Continue\n }\n }\n }\n\n if (trimmed.startsWith('{')) {\n const closed = autoCloseJson(trimmed);\n try {\n return JSON.parse(closed);\n } catch {\n const lastCompleteIndex = findLastCompleteObjectProperty(trimmed);\n if (lastCompleteIndex > 0) {\n const subset = trimmed.substring(0, lastCompleteIndex) + '}';\n try {\n return JSON.parse(subset);\n } catch {\n // Give up\n }\n }\n }\n }\n\n return null;\n}\n\nfunction findLastCompleteArrayElement(json: string): number {\n let depth = 0;\n let inString = false;\n let escaped = false;\n let lastCompleteElementEnd = -1;\n\n for (let i = 0; i < json.length; i++) {\n const char = json[i];\n\n if (escaped) {\n escaped = false;\n continue;\n }\n if (char === '\\\\' && inString) {\n escaped = true;\n continue;\n }\n if (char === '\"') {\n inString = !inString;\n continue;\n }\n if (inString) continue;\n\n if (char === '[' || char === '{') {\n depth++;\n } else if (char === ']' || char === '}') {\n depth--;\n if (depth === 1) {\n lastCompleteElementEnd = i + 1;\n }\n } else if (char === ',' && depth === 1) {\n lastCompleteElementEnd = i;\n }\n }\n\n return lastCompleteElementEnd > 0 ? lastCompleteElementEnd : -1;\n}\n\nfunction findLastCompleteObjectProperty(json: string): number {\n let depth = 0;\n let inString = false;\n let escaped = false;\n let lastCommaIndex = -1;\n\n for (let i = 0; i < json.length; i++) {\n const char = json[i];\n\n if (escaped) {\n escaped = false;\n continue;\n }\n if (char === '\\\\' && inString) {\n escaped = true;\n continue;\n }\n if (char === '\"') {\n inString = !inString;\n continue;\n }\n if (inString) continue;\n\n if (char === '[' || char === '{') {\n depth++;\n } else if (char === ']' || char === '}') {\n depth--;\n } else if (char === ',' && depth === 1) {\n lastCommaIndex = i;\n }\n }\n\n return lastCommaIndex > 0 ? lastCommaIndex : -1;\n}\n\nexport function isLikelyTruncated(content: string): boolean {\n const trimmed = content.trim();\n if (!trimmed) return false;\n\n const brackets = countBrackets(trimmed);\n if (\n brackets.missingCloseBrackets > 0 ||\n brackets.missingCloseBraces > 0\n ) {\n return true;\n }\n\n const abruptEndings = [\n /,\\s*$/,\n /:\\s*$/,\n /\"\\s*:\\s*$/,\n /\\[\\s*$/,\n /{\\s*$/,\n ];\n\n for (const pattern of abruptEndings) {\n if (pattern.test(trimmed)) return true;\n }\n\n return false;\n}\n","/**\n * LLM Continuation Utility\n *\n * Handles truncated LLM responses with automatic continuation.\n * - Detects truncation via finish_reason and JSON structure\n * - Automatically continues with full context\n * - Merges partial and continuation responses\n * - Salvages partial data if max continuations reached\n *\n * @packageDocumentation\n */\n\nimport { z } from 'zod';\nimport { LLMClient, type LLMFinishReason } from './client.js';\nimport { detectTruncation } from './truncation-detector.js';\nimport { extractJsonFromText, autoCloseJson, isValidJson } from './json-parser.js';\n\n// ============================================================================\n// Types\n// ============================================================================\n\nexport interface ContinuationOptions<T> {\n client: LLMClient;\n systemPrompt: string;\n userPrompt: string;\n schema?: z.ZodSchema<T>;\n maxTokens?: number;\n maxContinuations?: number;\n maxRetries?: number;\n buildContinuationPrompt: (\n partialResponse: string,\n attempt: number,\n ) => string;\n continuationSystemPrompt?: string;\n}\n\nexport interface ContinuationResult<T> {\n data: T;\n raw: string;\n continuationCount: number;\n warnings: string[];\n wasSalvaged: boolean;\n}\n\n// ============================================================================\n// Constants\n// ============================================================================\n\nconst DEFAULT_MAX_TOKENS = 8192;\nconst DEFAULT_MAX_CONTINUATIONS = 3;\n\n/**\n * Default continuation system prompt.\n * Used when no custom continuationSystemPrompt is provided.\n */\nconst DEFAULT_CONTINUATION_SYSTEM_PROMPT = `You are a JSON continuation assistant. Your ONLY job is to continue generating JSON from where the previous response was truncated.\n\nRules:\n1. Continue from EXACTLY where the previous output stopped\n2. Do NOT repeat any content already generated\n3. Complete the JSON structure properly with all closing brackets\n4. Do NOT wrap in markdown code blocks\n5. Output ONLY the continuation JSON, nothing else`;\n\n// ============================================================================\n// Helper Functions\n// ============================================================================\n\n/**\n * Merge a previous partial response with a continuation.\n *\n * Handles JSON structure merging, removing markdown code blocks,\n * and ensuring proper comma separation between merged parts.\n *\n * @param {string} previous - The previous partial response\n * @param {string} continuation - The continuation to merge\n * @returns {string} The merged response\n */\nexport function mergeResponses(\n previous: string,\n continuation: string,\n): string {\n const trimmedPrev = previous.trimEnd();\n const trimmedCont = continuation.trimStart();\n\n let cleanedCont = trimmedCont\n .replace(/^```json?\\s*/i, '')\n .replace(/```\\s*$/i, '')\n .trim();\n\n if (cleanedCont.startsWith('{')) {\n try {\n const contParsed = JSON.parse(autoCloseJson(cleanedCont));\n const keys = Object.keys(contParsed);\n if (keys.length === 1 && Array.isArray(contParsed[keys[0]])) {\n cleanedCont = contParsed[keys[0]]\n .map((item: unknown) => JSON.stringify(item))\n .join(',\\n');\n }\n } catch {\n // Continue with original cleaning\n }\n }\n\n if (cleanedCont.startsWith('}') || cleanedCont.startsWith(']')) {\n return trimmedPrev + cleanedCont;\n }\n\n const prevEndsWithValue = /[\\}\\]\\\"\\d]$/.test(trimmedPrev);\n const contStartsWithValue = /^[\\{\\[\\\"]/.test(cleanedCont);\n\n if (prevEndsWithValue && contStartsWithValue) {\n return trimmedPrev + ',\\n' + cleanedCont;\n }\n\n return trimmedPrev + cleanedCont;\n}\n\nexport function salvagePartialResponse<T>(rawResponse: string): T | null {\n console.warn('[Continuation] Attempting to salvage partial response');\n\n try {\n const cleanedResponse = extractJsonFromText(rawResponse) || rawResponse;\n const closed = autoCloseJson(cleanedResponse);\n const parsed = JSON.parse(closed) as T;\n console.log('[Continuation] Successfully salvaged partial response');\n return parsed;\n } catch (error) {\n console.error('[Continuation] Could not salvage response:', error);\n }\n\n return null;\n}\n\n// ============================================================================\n// Main Function\n// ============================================================================\n\n/**\n * Call an LLM with automatic continuation handling.\n *\n * Manages token limits by detecting truncation and making continuation\n * calls until the complete response is received or max continuations reached.\n *\n * @template T - Expected response type\n * @param {ContinuationOptions<T>} options - Continuation call options\n * @returns {Promise<ContinuationResult<T>>} Result with data and continuation metadata\n */\nexport async function callWithContinuation<T>(\n options: ContinuationOptions<T>,\n): Promise<ContinuationResult<T>> {\n const {\n client,\n systemPrompt,\n userPrompt,\n schema,\n maxTokens = DEFAULT_MAX_TOKENS,\n maxContinuations = DEFAULT_MAX_CONTINUATIONS,\n buildContinuationPrompt,\n continuationSystemPrompt = DEFAULT_CONTINUATION_SYSTEM_PROMPT,\n } = options;\n\n let rawResponse = '';\n let continuationCount = 0;\n const warnings: string[] = [];\n let wasSalvaged = false;\n\n console.log('[Continuation] Starting LLM call with continuation support');\n console.log(\n `[Continuation] Max tokens: ${maxTokens}, Max continuations: ${maxContinuations}`,\n );\n\n try {\n const response = await client.callRawWithMetadata({\n systemPrompt,\n userPrompt,\n maxTokens,\n });\n\n rawResponse = extractJsonFromText(response.raw) || response.raw;\n\n console.log(\n `[Continuation] Initial response: ${rawResponse.length} chars, finish_reason: ${response.finishReason}`,\n );\n\n let truncation = detectTruncation(rawResponse, response.finishReason);\n\n while (truncation.isTruncated && continuationCount < maxContinuations) {\n continuationCount++;\n const warningMsg = `Response truncated (${truncation.reason}), continuing (attempt ${continuationCount}/${maxContinuations})`;\n console.log(`[Continuation] ${warningMsg}`);\n warnings.push(warningMsg);\n\n const contPrompt = buildContinuationPrompt(\n rawResponse,\n continuationCount,\n );\n\n const contResponse = await client.callRawWithMetadata({\n systemPrompt: continuationSystemPrompt,\n userPrompt: contPrompt,\n maxTokens,\n });\n\n console.log(\n `[Continuation] Continuation response: ${contResponse.raw.length} chars, finish_reason: ${contResponse.finishReason}`,\n );\n\n const cleanedContResponse =\n extractJsonFromText(contResponse.raw) || contResponse.raw;\n rawResponse = mergeResponses(rawResponse, cleanedContResponse);\n\n truncation = detectTruncation(rawResponse, contResponse.finishReason);\n }\n\n if (\n continuationCount >= maxContinuations &&\n truncation.isTruncated\n ) {\n console.warn(\n `[Continuation] Reached max continuations (${maxContinuations}), attempting to salvage...`,\n );\n warnings.push(\n `Reached max continuations - some content may be incomplete`,\n );\n wasSalvaged = true;\n }\n\n const cleanedResponse =\n extractJsonFromText(rawResponse) || rawResponse;\n let data: T;\n\n try {\n if (isValidJson(cleanedResponse)) {\n data = JSON.parse(cleanedResponse) as T;\n } else {\n const closed = autoCloseJson(cleanedResponse);\n data = JSON.parse(closed) as T;\n if (!wasSalvaged) {\n warnings.push('Response required auto-closing of JSON brackets');\n }\n }\n } catch (parseError) {\n const salvaged = salvagePartialResponse<T>(cleanedResponse);\n if (salvaged) {\n data = salvaged;\n wasSalvaged = true;\n warnings.push('Response was salvaged from partial data');\n } else {\n throw new Error(\n `Failed to parse response after ${continuationCount} continuations: ${parseError}`,\n );\n }\n }\n\n if (schema) {\n try {\n data = schema.parse(data);\n } catch (validationError) {\n console.warn(\n '[Continuation] Schema validation failed:',\n validationError,\n );\n warnings.push(`Schema validation issue: ${validationError}`);\n }\n }\n\n console.log(\n `[Continuation] Complete. Continuations: ${continuationCount}, Warnings: ${warnings.length}`,\n );\n\n return {\n data,\n raw: rawResponse,\n continuationCount,\n warnings,\n wasSalvaged,\n };\n } catch (error) {\n console.error('[Continuation] Error during LLM call:', error);\n throw error;\n }\n}\n\n/**\n * Build a generic continuation prompt.\n *\n * Creates a prompt that instructs the LLM to continue from where\n * a previous truncated response left off.\n *\n * @param {string} context - Original context/prompt\n * @param {string} partialResponse - The partial response generated so far\n * @param {number} attempt - Current continuation attempt number\n * @param {number} [maxAttempts] - Maximum allowed continuation attempts\n * @returns {string} The formatted continuation prompt\n */\nexport function buildGenericContinuationPrompt(\n context: string,\n partialResponse: string,\n attempt: number,\n maxAttempts: number = DEFAULT_MAX_CONTINUATIONS,\n): string {\n return `## CONTINUATION REQUEST (Attempt ${attempt}/${maxAttempts})\n\nYour previous response was truncated. Continue generating from where you left off.\n\n### ORIGINAL CONTEXT\n${context}\n\n### WHAT YOU GENERATED SO FAR\n\\`\\`\\`json\n${partialResponse}\n\\`\\`\\`\n\n### INSTRUCTIONS\n1. Continue from EXACTLY where the response was cut off\n2. Do NOT repeat any content already generated\n3. Complete the JSON structure properly\n4. Do NOT wrap your response in markdown code blocks\n\nContinue generating now:`;\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA6CO,SAAS,iBACd,UACA,cACkB;AAClB,MAAI,iBAAiB,UAAU;AAC7B,UAAMA,eAAc,cAAc,QAAQ;AAC1C,WAAO;AAAA,MACL,aAAa;AAAA,MACb,QAAQ;AAAA,MACR,gBAAgB;AAAA,MAChB,qBAAqB,wBAAwB,QAAQ;AAAA,MACrD,sBAAsBA,aAAY;AAAA,MAClC,oBAAoBA,aAAY;AAAA,IAClC;AAAA,EACF;AAEA,MAAI;AACF,SAAK,MAAM,QAAQ;AACnB,WAAO,EAAE,aAAa,OAAO,QAAQ,OAAO;AAAA,EAC9C,QAAQ;AAAA,EAER;AAEA,MAAI,iBAAiB,UAAU,iBAAiB,MAAM;AACpD,UAAM,UAAU,SAAS,KAAK;AAE9B,UAAM,eACJ,QAAQ,SAAS,GAAG,KACpB,QAAQ,SAAS,GAAG,KACpB,QAAQ,SAAS,KAAK,KACtB,QAAQ,KAAK,OAAO,KACpB,QAAQ,KAAK,OAAO;AAEtB,QAAI,cAAc;AAChB,YAAMA,eAAc,cAAc,QAAQ;AAC1C,aAAO;AAAA,QACL,aAAa;AAAA,QACb,QAAQ;AAAA,QACR,gBAAgB;AAAA,QAChB,qBAAqB,wBAAwB,QAAQ;AAAA,QACrD,sBAAsBA,aAAY;AAAA,QAClC,oBAAoBA,aAAY;AAAA,MAClC;AAAA,IACF;AAEA,QAAI;AACF,YAAM,SAAS,cAAc,OAAO;AACpC,WAAK,MAAM,MAAM;AACjB,aAAO,EAAE,aAAa,OAAO,QAAQ,OAAO;AAAA,IAC9C,QAAQ;AACN,aAAO,EAAE,aAAa,OAAO,QAAQ,OAAO;AAAA,IAC9C;AAAA,EACF;AAEA,QAAM,cAAc,cAAc,QAAQ;AAC1C,MACE,YAAY,uBAAuB,KACnC,YAAY,qBAAqB,GACjC;AACA,WAAO;AAAA,MACL,aAAa;AAAA,MACb,QAAQ;AAAA,MACR,gBAAgB;AAAA,MAChB,qBAAqB,wBAAwB,QAAQ;AAAA,MACrD,sBAAsB,YAAY;AAAA,MAClC,oBAAoB,YAAY;AAAA,IAClC;AAAA,EACF;AAEA,SAAO,EAAE,aAAa,OAAO,QAAQ,OAAO;AAC9C;AAMA,SAAS,cAAc,MAOrB;AACA,MAAI,WAAW;AACf,MAAI,UAAU;AACd,MAAI,eAAe;AACnB,MAAI,gBAAgB;AACpB,MAAI,aAAa;AACjB,MAAI,cAAc;AAElB,aAAW,QAAQ,MAAM;AACvB,QAAI,SAAS;AACX,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,QAAQ,UAAU;AAC7B,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,KAAK;AAChB,iBAAW,CAAC;AACZ;AAAA,IACF;AACA,QAAI,SAAU;AAEd,YAAQ,MAAM;AAAA,MACZ,KAAK;AACH;AACA;AAAA,MACF,KAAK;AACH;AACA;AAAA,MACF,KAAK;AACH;AACA;AAAA,MACF,KAAK;AACH;AACA;AAAA,IACJ;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,sBAAsB,KAAK,IAAI,GAAG,eAAe,aAAa;AAAA,IAC9D,oBAAoB,KAAK,IAAI,GAAG,aAAa,WAAW;AAAA,EAC1D;AACF;AAEO,SAAS,wBAAwB,MAA8B;AACpE,QAAM,aAAa,cAAc,IAAI;AACrC,MAAI;AACF,WAAO,KAAK,MAAM,UAAU;AAAA,EAC9B,QAAQ;AAAA,EAER;AAEA,QAAM,UAAU,KAAK,KAAK;AAE1B,MAAI,QAAQ,WAAW,GAAG,GAAG;AAC3B,UAAM,oBAAoB,6BAA6B,OAAO;AAC9D,QAAI,oBAAoB,GAAG;AACzB,YAAM,SAAS,QAAQ,UAAU,GAAG,iBAAiB,IAAI;AACzD,UAAI;AACF,eAAO,KAAK,MAAM,MAAM;AAAA,MAC1B,QAAQ;AAAA,MAER;AAAA,IACF;AAAA,EACF;AAEA,MAAI,QAAQ,WAAW,GAAG,GAAG;AAC3B,UAAM,SAAS,cAAc,OAAO;AACpC,QAAI;AACF,aAAO,KAAK,MAAM,MAAM;AAAA,IAC1B,QAAQ;AACN,YAAM,oBAAoB,+BAA+B,OAAO;AAChE,UAAI,oBAAoB,GAAG;AACzB,cAAM,SAAS,QAAQ,UAAU,GAAG,iBAAiB,IAAI;AACzD,YAAI;AACF,iBAAO,KAAK,MAAM,MAAM;AAAA,QAC1B,QAAQ;AAAA,QAER;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AAEA,SAAS,6BAA6B,MAAsB;AAC1D,MAAI,QAAQ;AACZ,MAAI,WAAW;AACf,MAAI,UAAU;AACd,MAAI,yBAAyB;AAE7B,WAAS,IAAI,GAAG,IAAI,KAAK,QAAQ,KAAK;AACpC,UAAM,OAAO,KAAK,CAAC;AAEnB,QAAI,SAAS;AACX,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,QAAQ,UAAU;AAC7B,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,KAAK;AAChB,iBAAW,CAAC;AACZ;AAAA,IACF;AACA,QAAI,SAAU;AAEd,QAAI,SAAS,OAAO,SAAS,KAAK;AAChC;AAAA,IACF,WAAW,SAAS,OAAO,SAAS,KAAK;AACvC;AACA,UAAI,UAAU,GAAG;AACf,iCAAyB,IAAI;AAAA,MAC/B;AAAA,IACF,WAAW,SAAS,OAAO,UAAU,GAAG;AACtC,+BAAyB;AAAA,IAC3B;AAAA,EACF;AAEA,SAAO,yBAAyB,IAAI,yBAAyB;AAC/D;AAEA,SAAS,+BAA+B,MAAsB;AAC5D,MAAI,QAAQ;AACZ,MAAI,WAAW;AACf,MAAI,UAAU;AACd,MAAI,iBAAiB;AAErB,WAAS,IAAI,GAAG,IAAI,KAAK,QAAQ,KAAK;AACpC,UAAM,OAAO,KAAK,CAAC;AAEnB,QAAI,SAAS;AACX,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,QAAQ,UAAU;AAC7B,gBAAU;AACV;AAAA,IACF;AACA,QAAI,SAAS,KAAK;AAChB,iBAAW,CAAC;AACZ;AAAA,IACF;AACA,QAAI,SAAU;AAEd,QAAI,SAAS,OAAO,SAAS,KAAK;AAChC;AAAA,IACF,WAAW,SAAS,OAAO,SAAS,KAAK;AACvC;AAAA,IACF,WAAW,SAAS,OAAO,UAAU,GAAG;AACtC,uBAAiB;AAAA,IACnB;AAAA,EACF;AAEA,SAAO,iBAAiB,IAAI,iBAAiB;AAC/C;AAEO,SAAS,kBAAkB,SAA0B;AAC1D,QAAM,UAAU,QAAQ,KAAK;AAC7B,MAAI,CAAC,QAAS,QAAO;AAErB,QAAM,WAAW,cAAc,OAAO;AACtC,MACE,SAAS,uBAAuB,KAChC,SAAS,qBAAqB,GAC9B;AACA,WAAO;AAAA,EACT;AAEA,QAAM,gBAAgB;AAAA,IACpB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAEA,aAAW,WAAW,eAAe;AACnC,QAAI,QAAQ,KAAK,OAAO,EAAG,QAAO;AAAA,EACpC;AAEA,SAAO;AACT;;;AC7QA,IAAM,qBAAqB;AAC3B,IAAM,4BAA4B;AAMlC,IAAM,qCAAqC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAuBpC,SAAS,eACd,UACA,cACQ;AACR,QAAM,cAAc,SAAS,QAAQ;AACrC,QAAM,cAAc,aAAa,UAAU;AAE3C,MAAI,cAAc,YACf,QAAQ,iBAAiB,EAAE,EAC3B,QAAQ,YAAY,EAAE,EACtB,KAAK;AAER,MAAI,YAAY,WAAW,GAAG,GAAG;AAC/B,QAAI;AACF,YAAM,aAAa,KAAK,MAAM,cAAc,WAAW,CAAC;AACxD,YAAM,OAAO,OAAO,KAAK,UAAU;AACnC,UAAI,KAAK,WAAW,KAAK,MAAM,QAAQ,WAAW,KAAK,CAAC,CAAC,CAAC,GAAG;AAC3D,sBAAc,WAAW,KAAK,CAAC,CAAC,EAC7B,IAAI,CAAC,SAAkB,KAAK,UAAU,IAAI,CAAC,EAC3C,KAAK,KAAK;AAAA,MACf;AAAA,IACF,QAAQ;AAAA,IAER;AAAA,EACF;AAEA,MAAI,YAAY,WAAW,GAAG,KAAK,YAAY,WAAW,GAAG,GAAG;AAC9D,WAAO,cAAc;AAAA,EACvB;AAEA,QAAM,oBAAoB,cAAc,KAAK,WAAW;AACxD,QAAM,sBAAsB,YAAY,KAAK,WAAW;AAExD,MAAI,qBAAqB,qBAAqB;AAC5C,WAAO,cAAc,QAAQ;AAAA,EAC/B;AAEA,SAAO,cAAc;AACvB;AAEO,SAAS,uBAA0B,aAA+B;AACvE,UAAQ,KAAK,uDAAuD;AAEpE,MAAI;AACF,UAAM,kBAAkB,oBAAoB,WAAW,KAAK;AAC5D,UAAM,SAAS,cAAc,eAAe;AAC5C,UAAM,SAAS,KAAK,MAAM,MAAM;AAChC,YAAQ,IAAI,uDAAuD;AACnE,WAAO;AAAA,EACT,SAAS,OAAO;AACd,YAAQ,MAAM,8CAA8C,KAAK;AAAA,EACnE;AAEA,SAAO;AACT;AAgBA,eAAsB,qBACpB,SACgC;AAChC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,YAAY;AAAA,IACZ,mBAAmB;AAAA,IACnB;AAAA,IACA,2BAA2B;AAAA,EAC7B,IAAI;AAEJ,MAAI,cAAc;AAClB,MAAI,oBAAoB;AACxB,QAAM,WAAqB,CAAC;AAC5B,MAAI,cAAc;AAElB,UAAQ,IAAI,4DAA4D;AACxE,UAAQ;AAAA,IACN,8BAA8B,SAAS,wBAAwB,gBAAgB;AAAA,EACjF;AAEA,MAAI;AACF,UAAM,WAAW,MAAM,OAAO,oBAAoB;AAAA,MAChD;AAAA,MACA;AAAA,MACA;AAAA,IACF,CAAC;AAED,kBAAc,oBAAoB,SAAS,GAAG,KAAK,SAAS;AAE5D,YAAQ;AAAA,MACN,oCAAoC,YAAY,MAAM,0BAA0B,SAAS,YAAY;AAAA,IACvG;AAEA,QAAI,aAAa,iBAAiB,aAAa,SAAS,YAAY;AAEpE,WAAO,WAAW,eAAe,oBAAoB,kBAAkB;AACrE;AACA,YAAM,aAAa,uBAAuB,WAAW,MAAM,0BAA0B,iBAAiB,IAAI,gBAAgB;AAC1H,cAAQ,IAAI,kBAAkB,UAAU,EAAE;AAC1C,eAAS,KAAK,UAAU;AAExB,YAAM,aAAa;AAAA,QACjB;AAAA,QACA;AAAA,MACF;AAEA,YAAM,eAAe,MAAM,OAAO,oBAAoB;AAAA,QACpD,cAAc;AAAA,QACd,YAAY;AAAA,QACZ;AAAA,MACF,CAAC;AAED,cAAQ;AAAA,QACN,yCAAyC,aAAa,IAAI,MAAM,0BAA0B,aAAa,YAAY;AAAA,MACrH;AAEA,YAAM,sBACJ,oBAAoB,aAAa,GAAG,KAAK,aAAa;AACxD,oBAAc,eAAe,aAAa,mBAAmB;AAE7D,mBAAa,iBAAiB,aAAa,aAAa,YAAY;AAAA,IACtE;AAEA,QACE,qBAAqB,oBACrB,WAAW,aACX;AACA,cAAQ;AAAA,QACN,6CAA6C,gBAAgB;AAAA,MAC/D;AACA,eAAS;AAAA,QACP;AAAA,MACF;AACA,oBAAc;AAAA,IAChB;AAEA,UAAM,kBACJ,oBAAoB,WAAW,KAAK;AACtC,QAAI;AAEJ,QAAI;AACF,UAAI,YAAY,eAAe,GAAG;AAChC,eAAO,KAAK,MAAM,eAAe;AAAA,MACnC,OAAO;AACL,cAAM,SAAS,cAAc,eAAe;AAC5C,eAAO,KAAK,MAAM,MAAM;AACxB,YAAI,CAAC,aAAa;AAChB,mBAAS,KAAK,iDAAiD;AAAA,QACjE;AAAA,MACF;AAAA,IACF,SAAS,YAAY;AACnB,YAAM,WAAW,uBAA0B,eAAe;AAC1D,UAAI,UAAU;AACZ,eAAO;AACP,sBAAc;AACd,iBAAS,KAAK,yCAAyC;AAAA,MACzD,OAAO;AACL,cAAM,IAAI;AAAA,UACR,kCAAkC,iBAAiB,mBAAmB,UAAU;AAAA,QAClF;AAAA,MACF;AAAA,IACF;AAEA,QAAI,QAAQ;AACV,UAAI;AACF,eAAO,OAAO,MAAM,IAAI;AAAA,MAC1B,SAAS,iBAAiB;AACxB,gBAAQ;AAAA,UACN;AAAA,UACA;AAAA,QACF;AACA,iBAAS,KAAK,4BAA4B,eAAe,EAAE;AAAA,MAC7D;AAAA,IACF;AAEA,YAAQ;AAAA,MACN,2CAA2C,iBAAiB,eAAe,SAAS,MAAM;AAAA,IAC5F;AAEA,WAAO;AAAA,MACL;AAAA,MACA,KAAK;AAAA,MACL;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,EACF,SAAS,OAAO;AACd,YAAQ,MAAM,yCAAyC,KAAK;AAC5D,UAAM;AAAA,EACR;AACF;AAcO,SAAS,+BACd,SACA,iBACA,SACA,cAAsB,2BACd;AACR,SAAO,oCAAoC,OAAO,IAAI,WAAW;AAAA;AAAA;AAAA;AAAA;AAAA,EAKjE,OAAO;AAAA;AAAA;AAAA;AAAA,EAIP,eAAe;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAUjB;","names":["bracketInfo"]}
|
|
@@ -121,6 +121,14 @@ declare class MasarProvider {
|
|
|
121
121
|
private get;
|
|
122
122
|
private request;
|
|
123
123
|
}
|
|
124
|
+
/**
|
|
125
|
+
* Get the singleton Masar provider instance.
|
|
126
|
+
*
|
|
127
|
+
* Creates the instance on first call, returns cached instance thereafter.
|
|
128
|
+
*
|
|
129
|
+
* @param {MasarProviderOptions} [options] - Provider configuration options
|
|
130
|
+
* @returns {MasarProvider} The Masar provider instance
|
|
131
|
+
*/
|
|
124
132
|
declare function getMasarProvider(options?: MasarProviderOptions): MasarProvider;
|
|
125
133
|
declare function resetMasarProvider(): void;
|
|
126
134
|
|
package/dist/providers/index.js
CHANGED
|
@@ -106,6 +106,14 @@ declare class StructuredOutputClient {
|
|
|
106
106
|
};
|
|
107
107
|
getTokenUsage(): TokenUsage | null;
|
|
108
108
|
}
|
|
109
|
+
/**
|
|
110
|
+
* Get the singleton structured output client instance.
|
|
111
|
+
*
|
|
112
|
+
* Creates the instance on first call, returns cached instance thereafter.
|
|
113
|
+
*
|
|
114
|
+
* @param {StructuredOutputOptions} [options] - Client configuration options
|
|
115
|
+
* @returns {StructuredOutputClient} The structured output client instance
|
|
116
|
+
*/
|
|
109
117
|
declare function getStructuredOutputClient(options?: StructuredOutputOptions): StructuredOutputClient;
|
|
110
118
|
declare function resetStructuredOutputClient(): void;
|
|
111
119
|
declare function isStructuredOutputAvailable(): boolean;
|
package/package.json
CHANGED
package/src/client.ts
CHANGED
|
@@ -1001,6 +1001,15 @@ export function isProviderAvailable(provider: LLMProvider): boolean {
|
|
|
1001
1001
|
// Convenience Functions
|
|
1002
1002
|
// ============================================================================
|
|
1003
1003
|
|
|
1004
|
+
/**
|
|
1005
|
+
* Create an LLM client optimized for requirements analysis.
|
|
1006
|
+
*
|
|
1007
|
+
* Uses lower temperature (0.3) for more deterministic output.
|
|
1008
|
+
* Defaults to GPT-5.1 for OpenAI or DeepSeek Chat.
|
|
1009
|
+
*
|
|
1010
|
+
* @param {Partial<LLMClientOptions>} [options] - Optional client configuration
|
|
1011
|
+
* @returns {LLMClient} Configured LLM client
|
|
1012
|
+
*/
|
|
1004
1013
|
export function createRequirementsClient(
|
|
1005
1014
|
options?: Partial<LLMClientOptions>,
|
|
1006
1015
|
): LLMClient {
|
|
@@ -1015,6 +1024,15 @@ export function createRequirementsClient(
|
|
|
1015
1024
|
});
|
|
1016
1025
|
}
|
|
1017
1026
|
|
|
1027
|
+
/**
|
|
1028
|
+
* Create an LLM client optimized for creative tasks.
|
|
1029
|
+
*
|
|
1030
|
+
* Uses higher temperature (0.7) for more varied output.
|
|
1031
|
+
* Defaults to GPT-4o or DeepSeek Reasoner.
|
|
1032
|
+
*
|
|
1033
|
+
* @param {Partial<LLMClientOptions>} [options] - Optional client configuration
|
|
1034
|
+
* @returns {LLMClient} Configured LLM client
|
|
1035
|
+
*/
|
|
1018
1036
|
export function createCreativeClient(
|
|
1019
1037
|
options?: Partial<LLMClientOptions>,
|
|
1020
1038
|
): LLMClient {
|
|
@@ -1029,6 +1047,15 @@ export function createCreativeClient(
|
|
|
1029
1047
|
});
|
|
1030
1048
|
}
|
|
1031
1049
|
|
|
1050
|
+
/**
|
|
1051
|
+
* Create an LLM client optimized for code fixing.
|
|
1052
|
+
*
|
|
1053
|
+
* Uses low temperature (0.2) for precise, deterministic fixes.
|
|
1054
|
+
* Defaults to GPT-4o Mini or DeepSeek Chat for cost efficiency.
|
|
1055
|
+
*
|
|
1056
|
+
* @param {Partial<LLMClientOptions>} [options] - Optional client configuration
|
|
1057
|
+
* @returns {LLMClient} Configured LLM client
|
|
1058
|
+
*/
|
|
1032
1059
|
export function createFixClient(
|
|
1033
1060
|
options?: Partial<LLMClientOptions>,
|
|
1034
1061
|
): LLMClient {
|
|
@@ -1045,6 +1072,12 @@ export function createFixClient(
|
|
|
1045
1072
|
});
|
|
1046
1073
|
}
|
|
1047
1074
|
|
|
1075
|
+
/**
|
|
1076
|
+
* Create a DeepSeek LLM client.
|
|
1077
|
+
*
|
|
1078
|
+
* @param {Partial<Omit<LLMClientOptions, 'provider'>>} [options] - Optional client configuration
|
|
1079
|
+
* @returns {LLMClient} Configured DeepSeek client
|
|
1080
|
+
*/
|
|
1048
1081
|
export function createDeepSeekClient(
|
|
1049
1082
|
options?: Partial<Omit<LLMClientOptions, 'provider'>>,
|
|
1050
1083
|
): LLMClient {
|
|
@@ -1055,6 +1088,12 @@ export function createDeepSeekClient(
|
|
|
1055
1088
|
});
|
|
1056
1089
|
}
|
|
1057
1090
|
|
|
1091
|
+
/**
|
|
1092
|
+
* Create an OpenAI LLM client.
|
|
1093
|
+
*
|
|
1094
|
+
* @param {Partial<Omit<LLMClientOptions, 'provider'>>} [options] - Optional client configuration
|
|
1095
|
+
* @returns {LLMClient} Configured OpenAI client
|
|
1096
|
+
*/
|
|
1058
1097
|
export function createOpenAIClient(
|
|
1059
1098
|
options?: Partial<Omit<LLMClientOptions, 'provider'>>,
|
|
1060
1099
|
): LLMClient {
|
|
@@ -1065,6 +1104,12 @@ export function createOpenAIClient(
|
|
|
1065
1104
|
});
|
|
1066
1105
|
}
|
|
1067
1106
|
|
|
1107
|
+
/**
|
|
1108
|
+
* Create an Anthropic LLM client.
|
|
1109
|
+
*
|
|
1110
|
+
* @param {Partial<Omit<LLMClientOptions, 'provider'>>} [options] - Optional client configuration
|
|
1111
|
+
* @returns {LLMClient} Configured Anthropic client
|
|
1112
|
+
*/
|
|
1068
1113
|
export function createAnthropicClient(
|
|
1069
1114
|
options?: Partial<Omit<LLMClientOptions, 'provider'>>,
|
|
1070
1115
|
): LLMClient {
|
|
@@ -1075,6 +1120,12 @@ export function createAnthropicClient(
|
|
|
1075
1120
|
});
|
|
1076
1121
|
}
|
|
1077
1122
|
|
|
1123
|
+
/**
|
|
1124
|
+
* Create a Kimi LLM client.
|
|
1125
|
+
*
|
|
1126
|
+
* @param {Partial<Omit<LLMClientOptions, 'provider'>>} [options] - Optional client configuration
|
|
1127
|
+
* @returns {LLMClient} Configured Kimi client
|
|
1128
|
+
*/
|
|
1078
1129
|
export function createKimiClient(
|
|
1079
1130
|
options?: Partial<Omit<LLMClientOptions, 'provider'>>,
|
|
1080
1131
|
): LLMClient {
|
|
@@ -1085,6 +1136,12 @@ export function createKimiClient(
|
|
|
1085
1136
|
});
|
|
1086
1137
|
}
|
|
1087
1138
|
|
|
1139
|
+
/**
|
|
1140
|
+
* Create an OpenRouter LLM client.
|
|
1141
|
+
*
|
|
1142
|
+
* @param {Partial<Omit<LLMClientOptions, 'provider'>>} [options] - Optional client configuration
|
|
1143
|
+
* @returns {LLMClient} Configured OpenRouter client
|
|
1144
|
+
*/
|
|
1088
1145
|
export function createOpenRouterClient(
|
|
1089
1146
|
options?: Partial<Omit<LLMClientOptions, 'provider'>>,
|
|
1090
1147
|
): LLMClient {
|
|
@@ -1095,6 +1152,12 @@ export function createOpenRouterClient(
|
|
|
1095
1152
|
});
|
|
1096
1153
|
}
|
|
1097
1154
|
|
|
1155
|
+
/**
|
|
1156
|
+
* Create a Zhipu (GLM) LLM client via OpenRouter.
|
|
1157
|
+
*
|
|
1158
|
+
* @param {Partial<Omit<LLMClientOptions, 'provider'>>} [options] - Optional client configuration
|
|
1159
|
+
* @returns {LLMClient} Configured Zhipu client
|
|
1160
|
+
*/
|
|
1098
1161
|
export function createZhipuClient(
|
|
1099
1162
|
options?: Partial<Omit<LLMClientOptions, 'provider'>>,
|
|
1100
1163
|
): LLMClient {
|
package/src/continuation.ts
CHANGED
|
@@ -66,6 +66,16 @@ Rules:
|
|
|
66
66
|
// Helper Functions
|
|
67
67
|
// ============================================================================
|
|
68
68
|
|
|
69
|
+
/**
|
|
70
|
+
* Merge a previous partial response with a continuation.
|
|
71
|
+
*
|
|
72
|
+
* Handles JSON structure merging, removing markdown code blocks,
|
|
73
|
+
* and ensuring proper comma separation between merged parts.
|
|
74
|
+
*
|
|
75
|
+
* @param {string} previous - The previous partial response
|
|
76
|
+
* @param {string} continuation - The continuation to merge
|
|
77
|
+
* @returns {string} The merged response
|
|
78
|
+
*/
|
|
69
79
|
export function mergeResponses(
|
|
70
80
|
previous: string,
|
|
71
81
|
continuation: string,
|
|
@@ -126,6 +136,16 @@ export function salvagePartialResponse<T>(rawResponse: string): T | null {
|
|
|
126
136
|
// Main Function
|
|
127
137
|
// ============================================================================
|
|
128
138
|
|
|
139
|
+
/**
|
|
140
|
+
* Call an LLM with automatic continuation handling.
|
|
141
|
+
*
|
|
142
|
+
* Manages token limits by detecting truncation and making continuation
|
|
143
|
+
* calls until the complete response is received or max continuations reached.
|
|
144
|
+
*
|
|
145
|
+
* @template T - Expected response type
|
|
146
|
+
* @param {ContinuationOptions<T>} options - Continuation call options
|
|
147
|
+
* @returns {Promise<ContinuationResult<T>>} Result with data and continuation metadata
|
|
148
|
+
*/
|
|
129
149
|
export async function callWithContinuation<T>(
|
|
130
150
|
options: ContinuationOptions<T>,
|
|
131
151
|
): Promise<ContinuationResult<T>> {
|
|
@@ -262,6 +282,18 @@ export async function callWithContinuation<T>(
|
|
|
262
282
|
}
|
|
263
283
|
}
|
|
264
284
|
|
|
285
|
+
/**
|
|
286
|
+
* Build a generic continuation prompt.
|
|
287
|
+
*
|
|
288
|
+
* Creates a prompt that instructs the LLM to continue from where
|
|
289
|
+
* a previous truncated response left off.
|
|
290
|
+
*
|
|
291
|
+
* @param {string} context - Original context/prompt
|
|
292
|
+
* @param {string} partialResponse - The partial response generated so far
|
|
293
|
+
* @param {number} attempt - Current continuation attempt number
|
|
294
|
+
* @param {number} [maxAttempts] - Maximum allowed continuation attempts
|
|
295
|
+
* @returns {string} The formatted continuation prompt
|
|
296
|
+
*/
|
|
265
297
|
export function buildGenericContinuationPrompt(
|
|
266
298
|
context: string,
|
|
267
299
|
partialResponse: string,
|
package/src/providers/masar.ts
CHANGED
|
@@ -258,6 +258,14 @@ export class MasarProvider {
|
|
|
258
258
|
|
|
259
259
|
let sharedInstance: MasarProvider | null = null;
|
|
260
260
|
|
|
261
|
+
/**
|
|
262
|
+
* Get the singleton Masar provider instance.
|
|
263
|
+
*
|
|
264
|
+
* Creates the instance on first call, returns cached instance thereafter.
|
|
265
|
+
*
|
|
266
|
+
* @param {MasarProviderOptions} [options] - Provider configuration options
|
|
267
|
+
* @returns {MasarProvider} The Masar provider instance
|
|
268
|
+
*/
|
|
261
269
|
export function getMasarProvider(
|
|
262
270
|
options?: MasarProviderOptions,
|
|
263
271
|
): MasarProvider {
|
package/src/structured-output.ts
CHANGED
|
@@ -308,6 +308,14 @@ export class StructuredOutputClient {
|
|
|
308
308
|
|
|
309
309
|
let sharedClient: StructuredOutputClient | null = null;
|
|
310
310
|
|
|
311
|
+
/**
|
|
312
|
+
* Get the singleton structured output client instance.
|
|
313
|
+
*
|
|
314
|
+
* Creates the instance on first call, returns cached instance thereafter.
|
|
315
|
+
*
|
|
316
|
+
* @param {StructuredOutputOptions} [options] - Client configuration options
|
|
317
|
+
* @returns {StructuredOutputClient} The structured output client instance
|
|
318
|
+
*/
|
|
311
319
|
export function getStructuredOutputClient(
|
|
312
320
|
options?: StructuredOutputOptions,
|
|
313
321
|
): StructuredOutputClient {
|
|
@@ -33,6 +33,16 @@ export interface TruncationResult {
|
|
|
33
33
|
// Main Detection Function
|
|
34
34
|
// ============================================================================
|
|
35
35
|
|
|
36
|
+
/**
|
|
37
|
+
* Detect if an LLM response has been truncated.
|
|
38
|
+
*
|
|
39
|
+
* Analyzes the response content and finish reason to determine if
|
|
40
|
+
* the output was cut off due to token limits or other issues.
|
|
41
|
+
*
|
|
42
|
+
* @param {string} response - The LLM response text
|
|
43
|
+
* @param {LLMFinishReason} finishReason - The finish reason from the LLM
|
|
44
|
+
* @returns {TruncationResult} Detection result with truncation details
|
|
45
|
+
*/
|
|
36
46
|
export function detectTruncation(
|
|
37
47
|
response: string,
|
|
38
48
|
finishReason: LLMFinishReason,
|