@nextsparkjs/plugin-langchain 0.1.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. package/.env.example +41 -0
  2. package/api/observability/metrics/route.ts +110 -0
  3. package/api/observability/traces/[traceId]/route.ts +398 -0
  4. package/api/observability/traces/route.ts +205 -0
  5. package/api/sessions/route.ts +332 -0
  6. package/components/observability/CollapsibleJson.tsx +71 -0
  7. package/components/observability/CompactTimeline.tsx +75 -0
  8. package/components/observability/ConversationFlow.tsx +271 -0
  9. package/components/observability/DisabledMessage.tsx +21 -0
  10. package/components/observability/FiltersPanel.tsx +82 -0
  11. package/components/observability/ObservabilityDashboard.tsx +230 -0
  12. package/components/observability/SpansList.tsx +210 -0
  13. package/components/observability/TraceDetail.tsx +335 -0
  14. package/components/observability/TraceStatusBadge.tsx +39 -0
  15. package/components/observability/TracesTable.tsx +97 -0
  16. package/components/observability/index.ts +7 -0
  17. package/docs/01-getting-started/01-overview.md +196 -0
  18. package/docs/01-getting-started/02-installation.md +368 -0
  19. package/docs/01-getting-started/03-configuration.md +794 -0
  20. package/docs/02-core-concepts/01-architecture.md +566 -0
  21. package/docs/02-core-concepts/02-agents.md +597 -0
  22. package/docs/02-core-concepts/03-tools.md +689 -0
  23. package/docs/03-orchestration/01-graph-orchestrator.md +809 -0
  24. package/docs/03-orchestration/02-legacy-react.md +650 -0
  25. package/docs/04-advanced/01-observability.md +645 -0
  26. package/docs/04-advanced/02-token-tracking.md +469 -0
  27. package/docs/04-advanced/03-streaming.md +476 -0
  28. package/docs/04-advanced/04-guardrails.md +597 -0
  29. package/docs/05-reference/01-api-reference.md +1403 -0
  30. package/docs/05-reference/02-customization.md +646 -0
  31. package/docs/05-reference/03-examples.md +881 -0
  32. package/docs/index.md +85 -0
  33. package/hooks/observability/useMetrics.ts +31 -0
  34. package/hooks/observability/useTraceDetail.ts +48 -0
  35. package/hooks/observability/useTraces.ts +59 -0
  36. package/lib/agent-factory.ts +354 -0
  37. package/lib/agent-helpers.ts +201 -0
  38. package/lib/db-memory-store.ts +417 -0
  39. package/lib/graph/index.ts +58 -0
  40. package/lib/graph/nodes/combiner.ts +399 -0
  41. package/lib/graph/nodes/router.ts +440 -0
  42. package/lib/graph/orchestrator-graph.ts +386 -0
  43. package/lib/graph/prompts/combiner.md +131 -0
  44. package/lib/graph/prompts/router.md +193 -0
  45. package/lib/graph/types.ts +365 -0
  46. package/lib/guardrails.ts +230 -0
  47. package/lib/index.ts +44 -0
  48. package/lib/logger.ts +70 -0
  49. package/lib/memory-store.ts +168 -0
  50. package/lib/message-serializer.ts +110 -0
  51. package/lib/prompt-renderer.ts +94 -0
  52. package/lib/providers.ts +226 -0
  53. package/lib/streaming.ts +232 -0
  54. package/lib/token-tracker.ts +298 -0
  55. package/lib/tools-builder.ts +192 -0
  56. package/lib/tracer-callbacks.ts +342 -0
  57. package/lib/tracer.ts +350 -0
  58. package/migrations/001_langchain_memory.sql +83 -0
  59. package/migrations/002_token_usage.sql +127 -0
  60. package/migrations/003_observability.sql +257 -0
  61. package/package.json +28 -0
  62. package/plugin.config.ts +170 -0
  63. package/presets/lib/langchain.config.ts.preset +142 -0
  64. package/presets/templates/sector7/ai-observability/[traceId]/page.tsx +91 -0
  65. package/presets/templates/sector7/ai-observability/page.tsx +54 -0
  66. package/types/langchain.types.ts +274 -0
  67. package/types/observability.types.ts +270 -0
@@ -0,0 +1,168 @@
1
+ /**
2
+ * Memory Store for LangChain
3
+ *
4
+ * This module provides the primary interface for conversation memory.
5
+ * Uses database persistence via dbMemoryStore.
6
+ *
7
+ * For new code, use dbMemoryStore directly with explicit context.
8
+ */
9
+
10
+ import { BaseMessage, HumanMessage, AIMessage } from '@langchain/core/messages'
11
+ import {
12
+ dbMemoryStore,
13
+ CONVERSATION_LIMITS,
14
+ generateSessionId,
15
+ type ConversationInfo,
16
+ } from './db-memory-store'
17
+ import type { AgentContext, SessionConfig } from '../types/langchain.types'
18
+
19
+ export {
20
+ dbMemoryStore,
21
+ CONVERSATION_LIMITS,
22
+ generateSessionId,
23
+ type AgentContext,
24
+ type SessionConfig,
25
+ type ConversationInfo,
26
+ }
27
+
28
+ /**
29
+ * Memory store interface with context support
30
+ *
31
+ * All methods are async and require context (userId, teamId) for multi-tenancy.
32
+ */
33
+ export const memoryStore = {
34
+ /**
35
+ * Get messages for a session
36
+ */
37
+ getMessages: async (
38
+ sessionId: string,
39
+ context: AgentContext
40
+ ): Promise<BaseMessage[]> => {
41
+ return dbMemoryStore.getMessages(sessionId, context)
42
+ },
43
+
44
+ /**
45
+ * Add messages to a session
46
+ */
47
+ addMessages: async (
48
+ sessionId: string,
49
+ messages: BaseMessage[],
50
+ context: AgentContext,
51
+ config?: SessionConfig
52
+ ): Promise<void> => {
53
+ return dbMemoryStore.addMessages(sessionId, messages, context, config)
54
+ },
55
+
56
+ /**
57
+ * Clear a session
58
+ */
59
+ clearSession: async (
60
+ sessionId: string,
61
+ context: AgentContext
62
+ ): Promise<void> => {
63
+ return dbMemoryStore.clearSession(sessionId, context)
64
+ },
65
+
66
+ /**
67
+ * Clean up expired sessions
68
+ */
69
+ cleanup: async (): Promise<number> => {
70
+ return dbMemoryStore.cleanup()
71
+ },
72
+
73
+ /**
74
+ * List all sessions for a user in a team
75
+ */
76
+ listSessions: async (
77
+ context: AgentContext
78
+ ): Promise<ConversationInfo[]> => {
79
+ return dbMemoryStore.listSessions(context)
80
+ },
81
+
82
+ /**
83
+ * Get full session info
84
+ */
85
+ getSession: async (
86
+ sessionId: string,
87
+ context: AgentContext
88
+ ): Promise<ConversationInfo | null> => {
89
+ return dbMemoryStore.getSession(sessionId, context)
90
+ },
91
+
92
+ /**
93
+ * Create a new session
94
+ */
95
+ createSession: async (
96
+ context: AgentContext,
97
+ name?: string
98
+ ): Promise<{ sessionId: string; createdAt: Date }> => {
99
+ return dbMemoryStore.createSession(context, name)
100
+ },
101
+
102
+ /**
103
+ * Rename a session
104
+ */
105
+ renameSession: async (
106
+ sessionId: string,
107
+ name: string,
108
+ context: AgentContext
109
+ ): Promise<void> => {
110
+ return dbMemoryStore.renameSession(sessionId, name, context)
111
+ },
112
+
113
+ /**
114
+ * Toggle pin status
115
+ */
116
+ togglePinSession: async (
117
+ sessionId: string,
118
+ isPinned: boolean,
119
+ context: AgentContext
120
+ ): Promise<void> => {
121
+ return dbMemoryStore.togglePinSession(sessionId, isPinned, context)
122
+ },
123
+
124
+ /**
125
+ * Count sessions for limit enforcement
126
+ */
127
+ countSessions: async (
128
+ context: AgentContext
129
+ ): Promise<number> => {
130
+ return dbMemoryStore.countSessions(context)
131
+ },
132
+
133
+ /**
134
+ * Extend session TTL (deprecated)
135
+ * @deprecated Sessions now have no expiration by default
136
+ */
137
+ extendSession: async (
138
+ sessionId: string,
139
+ context: AgentContext,
140
+ ttlHours?: number
141
+ ): Promise<void> => {
142
+ return dbMemoryStore.extendSession(sessionId, context, ttlHours)
143
+ },
144
+
145
+ /**
146
+ * Default configuration values
147
+ */
148
+ defaults: {
149
+ maxMessages: CONVERSATION_LIMITS.MAX_MESSAGES_PER_CONVERSATION,
150
+ maxConversations: CONVERSATION_LIMITS.MAX_CONVERSATIONS,
151
+ },
152
+ }
153
+
154
+ /**
155
+ * Helper function to create a HumanMessage
156
+ * Re-exported for use in theme code without direct @langchain imports
157
+ */
158
+ export function createHumanMessage(content: string): HumanMessage {
159
+ return new HumanMessage(content)
160
+ }
161
+
162
+ /**
163
+ * Helper function to create an AIMessage
164
+ * Re-exported for use in theme code without direct @langchain imports
165
+ */
166
+ export function createAIMessage(content: string): AIMessage {
167
+ return new AIMessage(content)
168
+ }
@@ -0,0 +1,110 @@
1
+ /**
2
+ * Message Serializer for LangChain
3
+ *
4
+ * Handles serialization/deserialization of LangChain BaseMessage objects
5
+ * to/from JSON for database storage.
6
+ */
7
+
8
+ import {
9
+ BaseMessage,
10
+ HumanMessage,
11
+ AIMessage,
12
+ SystemMessage,
13
+ ToolMessage,
14
+ } from '@langchain/core/messages'
15
+
16
+ /**
17
+ * Serialized message format for database storage
18
+ */
19
+ export interface SerializedMessage {
20
+ type: 'human' | 'ai' | 'system' | 'tool'
21
+ content: string
22
+ name?: string
23
+ additional_kwargs?: Record<string, unknown>
24
+ response_metadata?: Record<string, unknown>
25
+ tool_call_id?: string
26
+ }
27
+
28
+ /**
29
+ * Serialize LangChain messages to JSON-compatible format
30
+ */
31
+ export function serializeMessages(messages: BaseMessage[]): SerializedMessage[] {
32
+ return messages.map((msg) => {
33
+ // Convert content to string (it might be complex objects)
34
+ const content = typeof msg.content === 'string'
35
+ ? msg.content
36
+ : JSON.stringify(msg.content)
37
+
38
+ const serialized: SerializedMessage = {
39
+ type: msg._getType() as SerializedMessage['type'],
40
+ content,
41
+ }
42
+
43
+ if (msg.name) {
44
+ serialized.name = msg.name
45
+ }
46
+
47
+ if (msg.additional_kwargs && Object.keys(msg.additional_kwargs).length > 0) {
48
+ serialized.additional_kwargs = msg.additional_kwargs
49
+ }
50
+
51
+ // AIMessage has response_metadata
52
+ if (msg._getType() === 'ai' && (msg as AIMessage).response_metadata) {
53
+ serialized.response_metadata = (msg as AIMessage).response_metadata
54
+ }
55
+
56
+ // ToolMessage has tool_call_id
57
+ if (msg._getType() === 'tool' && (msg as ToolMessage).tool_call_id) {
58
+ serialized.tool_call_id = (msg as ToolMessage).tool_call_id
59
+ }
60
+
61
+ return serialized
62
+ })
63
+ }
64
+
65
+ /**
66
+ * Deserialize JSON messages back to LangChain BaseMessage objects
67
+ */
68
+ export function deserializeMessages(serialized: SerializedMessage[]): BaseMessage[] {
69
+ return serialized.map((msg) => {
70
+ switch (msg.type) {
71
+ case 'human':
72
+ return new HumanMessage({
73
+ content: msg.content,
74
+ name: msg.name,
75
+ additional_kwargs: msg.additional_kwargs,
76
+ })
77
+
78
+ case 'ai':
79
+ return new AIMessage({
80
+ content: msg.content,
81
+ name: msg.name,
82
+ additional_kwargs: msg.additional_kwargs,
83
+ response_metadata: msg.response_metadata,
84
+ })
85
+
86
+ case 'system':
87
+ return new SystemMessage({
88
+ content: msg.content,
89
+ name: msg.name,
90
+ additional_kwargs: msg.additional_kwargs,
91
+ })
92
+
93
+ case 'tool':
94
+ return new ToolMessage({
95
+ content: msg.content,
96
+ tool_call_id: msg.tool_call_id || '',
97
+ name: msg.name,
98
+ additional_kwargs: msg.additional_kwargs,
99
+ })
100
+
101
+ default:
102
+ // Fallback to HumanMessage for unknown types
103
+ return new HumanMessage({
104
+ content: msg.content,
105
+ name: msg.name,
106
+ additional_kwargs: msg.additional_kwargs,
107
+ })
108
+ }
109
+ })
110
+ }
@@ -0,0 +1,94 @@
1
+ /**
2
+ * Prompt Renderer for LangChain
3
+ *
4
+ * Renders system prompts using Handlebars templates.
5
+ * Allows themes to inject dynamic context data into agent prompts.
6
+ *
7
+ * @example
8
+ * // Template with variables
9
+ * const template = 'Hello {{user.name}} from {{company.name}}!'
10
+ * const context = { user: { name: 'John' }, company: { name: 'Acme' } }
11
+ * const result = renderPrompt(template, context)
12
+ * // => 'Hello John from Acme!'
13
+ *
14
+ * @example
15
+ * // Template with conditionals
16
+ * const template = `
17
+ * {{#if isAdmin}}
18
+ * You have admin privileges.
19
+ * {{else}}
20
+ * You are a regular user.
21
+ * {{/if}}
22
+ * `
23
+ *
24
+ * @example
25
+ * // Template with loops
26
+ * const template = `
27
+ * Recent items:
28
+ * {{#each items}}
29
+ * - {{this.name}}: {{this.status}}
30
+ * {{/each}}
31
+ * `
32
+ */
33
+
34
+ import Handlebars from 'handlebars'
35
+ import type { AgentContext } from '../types/langchain.types'
36
+ import { config } from '../plugin.config'
37
+
38
+ /**
39
+ * Check if a template contains Handlebars syntax
40
+ */
41
+ export function hasTemplateVariables(template: string): boolean {
42
+ // Match {{ }} patterns (Handlebars syntax)
43
+ return /\{\{[^}]+\}\}/.test(template)
44
+ }
45
+
46
+ /**
47
+ * Render a prompt template with context data
48
+ *
49
+ * Uses Handlebars for template rendering, supporting:
50
+ * - Variables: {{key}}, {{nested.key}}
51
+ * - Conditionals: {{#if condition}}...{{/if}}
52
+ * - Loops: {{#each items}}...{{/each}}
53
+ * - Built-in helpers: {{#unless}}, {{#with}}, etc.
54
+ *
55
+ * @param template - The prompt template (may contain Handlebars syntax)
56
+ * @param context - The context data to inject into the template
57
+ * @returns Rendered prompt string
58
+ */
59
+ export function renderPrompt(template: string, context: AgentContext): string {
60
+ // If no template variables, return as-is (optimization)
61
+ if (!hasTemplateVariables(template)) {
62
+ return template
63
+ }
64
+
65
+ try {
66
+ const compiled = Handlebars.compile(template, {
67
+ // Strict mode: throw error if variable is not found
68
+ strict: false,
69
+ // Don't escape HTML (prompts are not rendered in browser)
70
+ noEscape: true,
71
+ })
72
+
73
+ return compiled(context)
74
+ } catch (error) {
75
+ // Log error and return original template if rendering fails
76
+ if (config.debug) {
77
+ console.error('[PromptRenderer] Failed to render template:', error)
78
+ }
79
+ return template
80
+ }
81
+ }
82
+
83
+ /**
84
+ * Pre-compile a template for performance (optional)
85
+ * Use when rendering the same template multiple times
86
+ */
87
+ export function compilePrompt(template: string): (context: AgentContext) => string {
88
+ const compiled = Handlebars.compile(template, {
89
+ strict: false,
90
+ noEscape: true,
91
+ })
92
+
93
+ return (context: AgentContext) => compiled(context)
94
+ }
@@ -0,0 +1,226 @@
1
+ import { ChatOllama } from '@langchain/ollama'
2
+ import { ChatOpenAI } from '@langchain/openai'
3
+ import { ChatAnthropic } from '@langchain/anthropic'
4
+ import type { BaseChatModel } from '@langchain/core/language_models/chat_models'
5
+ import { config, validateProviderConfig, isProviderAvailable, getAvailableProviders } from '../plugin.config'
6
+ import type { ModelConfig, LLMProvider } from '../types/langchain.types'
7
+
8
+ /**
9
+ * Create an OpenAI model instance
10
+ *
11
+ * @param modelConfig - Optional configuration overrides
12
+ * @returns ChatOpenAI instance
13
+ *
14
+ * @example
15
+ * // Use defaults from env
16
+ * const model = createOpenAIModel()
17
+ *
18
+ * // Use specific model
19
+ * const model = createOpenAIModel({ model: 'gpt-4o' })
20
+ *
21
+ * // Use with temperature
22
+ * const model = createOpenAIModel({ model: 'gpt-4o', temperature: 0.7 })
23
+ *
24
+ * // Use with LM Studio (local OpenAI-compatible server)
25
+ * const model = createOpenAIModel({
26
+ * model: 'local-model',
27
+ * options: {
28
+ * baseUrl: 'http://localhost:1234/v1',
29
+ * apiKey: 'lm-studio' // LM Studio doesn't need real key
30
+ * }
31
+ * })
32
+ */
33
+ export function createOpenAIModel(modelConfig?: Partial<ModelConfig>): ChatOpenAI {
34
+ // Determine baseUrl:
35
+ // - If options.baseUrl is explicitly set to undefined, force real OpenAI API
36
+ // - If options.baseUrl is set to a string, use that
37
+ // - Otherwise, fall back to env var
38
+ const explicitlyUndefined = modelConfig?.options && 'baseUrl' in modelConfig.options && modelConfig.options.baseUrl === undefined
39
+ const baseUrl = explicitlyUndefined ? undefined : (modelConfig?.options?.baseUrl || config.providers.openai.baseUrl)
40
+
41
+ // Skip API key validation if using custom baseUrl (e.g., LM Studio)
42
+ const isLocalServer = !!baseUrl
43
+ if (!isLocalServer) {
44
+ validateProviderConfig('openai')
45
+ }
46
+
47
+ const chatOpenAI = new ChatOpenAI({
48
+ openAIApiKey: modelConfig?.options?.apiKey || config.providers.openai.apiKey || 'lm-studio',
49
+ modelName: modelConfig?.model || config.providers.openai.model,
50
+ temperature: modelConfig?.temperature ?? config.providers.openai.temperature,
51
+ maxTokens: modelConfig?.maxTokens,
52
+ configuration: baseUrl ? { baseURL: baseUrl } : undefined,
53
+ // LM Studio compatibility: disable strict tool calling which requires specific JSON Schema format
54
+ supportsStrictToolCalling: false,
55
+ // Enable verbose mode in debug to see what's sent to API
56
+ verbose: config.debug,
57
+ })
58
+
59
+ return chatOpenAI
60
+ }
61
+
62
+ /**
63
+ * Create an Anthropic model instance
64
+ *
65
+ * @param modelConfig - Optional configuration overrides
66
+ * @returns ChatAnthropic instance
67
+ *
68
+ * @example
69
+ * // Use defaults from env
70
+ * const model = createAnthropicModel()
71
+ *
72
+ * // Use specific model
73
+ * const model = createAnthropicModel({ model: 'claude-3-opus-20240229' })
74
+ */
75
+ export function createAnthropicModel(modelConfig?: Partial<ModelConfig>): ChatAnthropic {
76
+ validateProviderConfig('anthropic')
77
+
78
+ return new ChatAnthropic({
79
+ anthropicApiKey: modelConfig?.options?.apiKey || config.providers.anthropic.apiKey,
80
+ modelName: modelConfig?.model || config.providers.anthropic.model,
81
+ temperature: modelConfig?.temperature ?? config.providers.anthropic.temperature,
82
+ maxTokens: modelConfig?.maxTokens,
83
+ })
84
+ }
85
+
86
+ /**
87
+ * Create an Ollama model instance (local)
88
+ *
89
+ * @param modelConfig - Optional configuration overrides
90
+ * @returns ChatOllama instance
91
+ *
92
+ * @example
93
+ * // Use defaults from env
94
+ * const model = createOllamaModel()
95
+ *
96
+ * // Use specific model
97
+ * const model = createOllamaModel({ model: 'llama3.2:3b' })
98
+ *
99
+ * // Use custom Ollama server
100
+ * const model = createOllamaModel({
101
+ * model: 'qwen2.5:7b',
102
+ * options: { baseUrl: 'http://192.168.1.100:11434' }
103
+ * })
104
+ */
105
+ export function createOllamaModel(modelConfig?: Partial<ModelConfig>): ChatOllama {
106
+ return new ChatOllama({
107
+ baseUrl: modelConfig?.options?.baseUrl || config.providers.ollama.baseUrl,
108
+ model: modelConfig?.model || config.providers.ollama.model,
109
+ temperature: modelConfig?.temperature ?? config.providers.ollama.temperature,
110
+ })
111
+ }
112
+
113
+ /**
114
+ * Provider factory map
115
+ */
116
+ const providerFactories: Record<LLMProvider, (cfg?: Partial<ModelConfig>) => BaseChatModel> = {
117
+ openai: createOpenAIModel,
118
+ anthropic: createAnthropicModel,
119
+ ollama: createOllamaModel,
120
+ }
121
+
122
+ /**
123
+ * Create a model instance based on configuration
124
+ *
125
+ * This is the main factory function for creating LLM instances.
126
+ * It selects the appropriate provider and applies configuration.
127
+ *
128
+ * @param modelConfig - Optional model configuration. If not provided, uses plugin defaults.
129
+ * @returns A LangChain chat model instance
130
+ *
131
+ * @example
132
+ * // Use plugin defaults (from env vars)
133
+ * const model = getModel()
134
+ *
135
+ * // Use specific provider with default model
136
+ * const model = getModel({ provider: 'openai' })
137
+ *
138
+ * // Use specific provider and model
139
+ * const model = getModel({ provider: 'anthropic', model: 'claude-3-opus-20240229' })
140
+ *
141
+ * // Use with temperature override
142
+ * const model = getModel({ provider: 'openai', model: 'gpt-4o', temperature: 0.7 })
143
+ *
144
+ * // Use Ollama with custom server
145
+ * const model = getModel({
146
+ * provider: 'ollama',
147
+ * model: 'qwen2.5:7b',
148
+ * options: { baseUrl: 'http://192.168.1.100:11434' }
149
+ * })
150
+ */
151
+ export function getModel(modelConfig?: Partial<ModelConfig>): BaseChatModel {
152
+ const provider = modelConfig?.provider || config.defaultProvider
153
+
154
+ const factory = providerFactories[provider]
155
+ if (!factory) {
156
+ throw new Error(
157
+ `Unsupported provider: ${provider}. ` +
158
+ `Supported providers: ${Object.keys(providerFactories).join(', ')}`
159
+ )
160
+ }
161
+
162
+ if (config.debug) {
163
+ const model = modelConfig?.model || config.providers[provider]?.model || 'default'
164
+ console.log(`[LangChain] Creating model - Provider: ${provider}, Model: ${model}`)
165
+ }
166
+
167
+ return factory(modelConfig)
168
+ }
169
+
170
+ /**
171
+ * Structured output method types supported by LangChain
172
+ */
173
+ export type StructuredOutputMethod = 'functionCalling' | 'jsonMode' | 'jsonSchema'
174
+
175
+ /**
176
+ * Determine the best structured output method for a given provider configuration
177
+ *
178
+ * Different providers/servers have different capabilities:
179
+ * - OpenAI API: Supports all methods (functionCalling is best)
180
+ * - Anthropic: Uses tool use (functionCalling)
181
+ * - Ollama: Supports functionCalling for most models
182
+ * - LM Studio (OpenAI-compatible): Only supports jsonSchema
183
+ *
184
+ * @param modelConfig - The model configuration being used
185
+ * @returns The recommended structured output method
186
+ *
187
+ * @example
188
+ * const method = getStructuredOutputMethod({ provider: 'openai' })
189
+ * const structuredModel = model.withStructuredOutput(schema, { method })
190
+ */
191
+ export function getStructuredOutputMethod(modelConfig?: Partial<ModelConfig>): StructuredOutputMethod {
192
+ const provider = modelConfig?.provider || config.defaultProvider
193
+
194
+ // Check if using OpenAI provider with custom baseUrl (LM Studio, LocalAI, etc.)
195
+ if (provider === 'openai') {
196
+ // Check if baseUrl is explicitly set to undefined (force real OpenAI API)
197
+ const explicitlyUndefined = modelConfig?.options && 'baseUrl' in modelConfig.options && modelConfig.options.baseUrl === undefined
198
+ const baseUrl = explicitlyUndefined ? undefined : (modelConfig?.options?.baseUrl || config.providers.openai.baseUrl)
199
+
200
+ if (baseUrl) {
201
+ // Local OpenAI-compatible servers (LM Studio) use jsonSchema
202
+ if (config.debug) {
203
+ console.log('[LangChain] Using jsonSchema method for local OpenAI-compatible server')
204
+ }
205
+ return 'jsonSchema'
206
+ }
207
+ // Real OpenAI API - use functionCalling (most reliable)
208
+ return 'functionCalling'
209
+ }
210
+
211
+ // Anthropic - uses tool use which maps to functionCalling
212
+ if (provider === 'anthropic') {
213
+ return 'functionCalling'
214
+ }
215
+
216
+ // Ollama - most models support function calling
217
+ if (provider === 'ollama') {
218
+ return 'functionCalling'
219
+ }
220
+
221
+ // Default fallback
222
+ return 'functionCalling'
223
+ }
224
+
225
+ // Re-export utility functions from plugin.config for convenience
226
+ export { isProviderAvailable, getAvailableProviders }