@layer-ai/core 0.2.1 → 0.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +1 @@
1
- {"version":3,"file":"anthropic-adapter.d.ts","sourceRoot":"","sources":["../../../src/services/providers/anthropic-adapter.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,mBAAmB,EAAmB,MAAM,mBAAmB,CAAC;AACzE,OAAO,EACL,YAAY,EACZ,aAAa,EACb,IAAI,EACJ,YAAY,EACZ,UAAU,EACX,MAAM,eAAe,CAAC;AAavB,qBAAa,gBAAiB,SAAQ,mBAAmB;IACvD,SAAS,CAAC,QAAQ,SAAe;IAEjC,SAAS,CAAC,YAAY,EAAE,MAAM,CAAC,IAAI,EAAE,MAAM,CAAC,CAQ1C;IAEF,SAAS,CAAC,kBAAkB,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,GAAG,MAAM,CAAC,CAI3D;IAEF,SAAS,CAAC,oBAAoB,EAAE,MAAM,CAAC,MAAM,EAAE,YAAY,CAAC,CAM1D;IAEF,SAAS,CAAC,aAAa,CAAC,MAAM,EAAE,UAAU,GAAG,MAAM,GAAG,MAAM,GAAG,SAAS;IAalE,IAAI,CAAC,OAAO,EAAE,YAAY,GAAG,OAAO,CAAC,aAAa,CAAC;YAiB3C,UAAU;CA2JzB"}
1
+ {"version":3,"file":"anthropic-adapter.d.ts","sourceRoot":"","sources":["../../../src/services/providers/anthropic-adapter.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,mBAAmB,EAAmB,MAAM,mBAAmB,CAAC;AACzE,OAAO,EACL,YAAY,EACZ,aAAa,EACb,IAAI,EACJ,YAAY,EACZ,UAAU,EACX,MAAM,eAAe,CAAC;AAavB,qBAAa,gBAAiB,SAAQ,mBAAmB;IACvD,SAAS,CAAC,QAAQ,SAAe;IAEjC,SAAS,CAAC,YAAY,EAAE,MAAM,CAAC,IAAI,EAAE,MAAM,CAAC,CAQ1C;IAEF,SAAS,CAAC,kBAAkB,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,GAAG,MAAM,CAAC,CAI3D;IAEF,SAAS,CAAC,oBAAoB,EAAE,MAAM,CAAC,MAAM,EAAE,YAAY,CAAC,CAM1D;IAEF,SAAS,CAAC,aAAa,CAAC,MAAM,EAAE,UAAU,GAAG,MAAM,GAAG,MAAM,GAAG,SAAS;IAalE,IAAI,CAAC,OAAO,EAAE,YAAY,GAAG,OAAO,CAAC,aAAa,CAAC;YAiB3C,UAAU;CA4JzB"}
@@ -51,13 +51,13 @@ export class AnthropicAdapter extends BaseProviderAdapter {
51
51
  case 'chat':
52
52
  return this.handleChat(request);
53
53
  case 'image':
54
- throw new Error('image generation not yet supported by LayerAI');
54
+ throw new Error('Image generation not yet supported by Anthropic');
55
55
  case 'embeddings':
56
- throw new Error('embeddings not yet supported by LayerAI');
56
+ throw new Error('Embeddings not yet supported by Anthropic');
57
57
  case 'tts':
58
- throw new Error('tts generation not yet supported by LayerAI');
58
+ throw new Error('TTS generation not yet supported by Anthropic');
59
59
  case 'video':
60
- throw new Error('Video generation not yet supported by LayerAI');
60
+ throw new Error('Video generation not yet supported by Anthropic');
61
61
  default:
62
62
  throw new Error(`Unknown modality: ${request.type}`);
63
63
  }
@@ -76,59 +76,64 @@ export class AnthropicAdapter extends BaseProviderAdapter {
76
76
  if (msg.role === 'system')
77
77
  continue;
78
78
  const role = this.mapRole(msg.role);
79
- if (msg.images && msg.images.length > 0) {
79
+ // Handle tool responses (mutually exclusive with other content types)
80
+ if (msg.toolCallId) {
81
+ messages.push({
82
+ role: 'user',
83
+ content: [{
84
+ type: 'tool_result',
85
+ tool_use_id: msg.toolCallId,
86
+ content: msg.content || '',
87
+ }],
88
+ });
89
+ }
90
+ // Handle messages with images and/or tool calls
91
+ else if (msg.images?.length || msg.toolCalls?.length) {
80
92
  const content = [];
93
+ // Add text content if present
81
94
  if (msg.content) {
82
95
  content.push({ type: 'text', text: msg.content });
83
96
  }
84
- for (const image of msg.images) {
85
- if (image.url) {
86
- content.push({
87
- type: 'image',
88
- source: {
89
- type: 'url',
90
- url: image.url,
91
- }
92
- });
97
+ // Add images if present
98
+ if (msg.images) {
99
+ for (const image of msg.images) {
100
+ if (image.url) {
101
+ content.push({
102
+ type: 'image',
103
+ source: {
104
+ type: 'url',
105
+ url: image.url,
106
+ }
107
+ });
108
+ }
109
+ else if (image.base64) {
110
+ content.push({
111
+ type: 'image',
112
+ source: {
113
+ type: 'base64',
114
+ media_type: image.mimeType || 'image/jpeg',
115
+ data: image.base64
116
+ }
117
+ });
118
+ }
93
119
  }
94
- else if (image.base64) {
120
+ }
121
+ // Add tool calls if present
122
+ if (msg.toolCalls) {
123
+ for (const toolCall of msg.toolCalls) {
95
124
  content.push({
96
- type: 'image',
97
- source: {
98
- type: 'base64',
99
- media_type: image.mimeType || 'image/jpeg',
100
- data: image.base64
101
- }
125
+ type: 'tool_use',
126
+ id: toolCall.id,
127
+ name: toolCall.function.name,
128
+ input: JSON.parse(toolCall.function.arguments),
102
129
  });
103
130
  }
104
131
  }
105
- messages.push({ role: role, content });
106
- }
107
- else if (msg.toolCalls) {
108
- const content = [];
109
- if (msg.content) {
110
- content.push({ type: 'text', text: msg.content });
111
- }
112
- for (const toolCall of msg.toolCalls) {
113
- content.push({
114
- type: 'tool_use',
115
- id: toolCall.id,
116
- name: toolCall.function.name,
117
- input: JSON.parse(toolCall.function.arguments),
118
- });
119
- }
120
- messages.push({ role: 'assistant', content });
121
- }
122
- else if (msg.toolCallId) {
123
- messages.push({
124
- role: 'user',
125
- content: [{
126
- type: 'tool_result',
127
- tool_use_id: msg.toolCallId,
128
- content: msg.content || '',
129
- }],
130
- });
132
+ // Determine role based on content
133
+ const messageRole = msg.images?.length ? 'user' : (msg.toolCalls?.length ? 'assistant' : role);
134
+ messages.push({ role: messageRole, content });
131
135
  }
136
+ // Handle regular text messages
132
137
  else {
133
138
  messages.push({
134
139
  role: role,
@@ -1 +1 @@
1
- {"version":3,"file":"openai-adapter.d.ts","sourceRoot":"","sources":["../../../src/services/providers/openai-adapter.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,mBAAmB,EAAE,MAAM,mBAAmB,CAAC;AACxD,OAAO,EACL,YAAY,EACZ,aAAa,EACb,IAAI,EACJ,WAAW,EACX,SAAS,EACT,YAAY,EACZ,UAAU,EACV,SAAS,EACT,WAAW,EACX,YAAY,EACb,MAAM,eAAe,CAAC;AAavB,qBAAa,aAAc,SAAQ,mBAAmB;IACpD,SAAS,CAAC,QAAQ,SAAY;IAE9B,SAAS,CAAC,YAAY,EAAE,MAAM,CAAC,IAAI,EAAE,MAAM,CAAC,CAQ1C;IAEF,SAAS,CAAC,mBAAmB,EAAE,MAAM,CAAC,WAAW,EAAE,MAAM,CAAC,CAIxD;IAEF,SAAS,CAAC,oBAAoB,EAAE,MAAM,CAAC,MAAM,EAAE,YAAY,CAAC,CAK1D;IAEF,SAAS,CAAC,iBAAiB,EAAE,MAAM,CAAC,SAAS,EAAE,MAAM,CAAC,CAQpD;IAEF,SAAS,CAAC,oBAAoB,EAAE,MAAM,CAAC,YAAY,EAAE,MAAM,CAAC,CAG1D;IAEF,SAAS,CAAC,kBAAkB,EAAE,MAAM,CAAC,UAAU,EAAE,MAAM,CAAC,CAGtD;IAEF,SAAS,CAAC,iBAAiB,EAAE,MAAM,CAAC,SAAS,EAAE,MAAM,CAAC,CAKpD;IAEF,SAAS,CAAC,mBAAmB,EAAE,MAAM,CAAC,WAAW,EAAE,MAAM,CAAC,CAOxD;IAEI,IAAI,CAAC,OAAO,EAAE,YAAY,GAAG,OAAO,CAAC,aAAa,CAAC;YAiB3C,UAAU;YAoGV,qBAAqB;YA6BrB,gBAAgB;YAiChB,kBAAkB;CA8BjC"}
1
+ {"version":3,"file":"openai-adapter.d.ts","sourceRoot":"","sources":["../../../src/services/providers/openai-adapter.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,mBAAmB,EAAE,MAAM,mBAAmB,CAAC;AACxD,OAAO,EACL,YAAY,EACZ,aAAa,EACb,IAAI,EACJ,WAAW,EACX,SAAS,EACT,YAAY,EACZ,UAAU,EACV,SAAS,EACT,WAAW,EACX,YAAY,EACb,MAAM,eAAe,CAAC;AAavB,qBAAa,aAAc,SAAQ,mBAAmB;IACpD,SAAS,CAAC,QAAQ,SAAY;IAE9B,SAAS,CAAC,YAAY,EAAE,MAAM,CAAC,IAAI,EAAE,MAAM,CAAC,CAQ1C;IAEF,SAAS,CAAC,mBAAmB,EAAE,MAAM,CAAC,WAAW,EAAE,MAAM,CAAC,CAIxD;IAEF,SAAS,CAAC,oBAAoB,EAAE,MAAM,CAAC,MAAM,EAAE,YAAY,CAAC,CAK1D;IAEF,SAAS,CAAC,iBAAiB,EAAE,MAAM,CAAC,SAAS,EAAE,MAAM,CAAC,CAQpD;IAEF,SAAS,CAAC,oBAAoB,EAAE,MAAM,CAAC,YAAY,EAAE,MAAM,CAAC,CAG1D;IAEF,SAAS,CAAC,kBAAkB,EAAE,MAAM,CAAC,UAAU,EAAE,MAAM,CAAC,CAGtD;IAEF,SAAS,CAAC,iBAAiB,EAAE,MAAM,CAAC,SAAS,EAAE,MAAM,CAAC,CAKpD;IAEF,SAAS,CAAC,mBAAmB,EAAE,MAAM,CAAC,WAAW,EAAE,MAAM,CAAC,CAOxD;IAEI,IAAI,CAAC,OAAO,EAAE,YAAY,GAAG,OAAO,CAAC,aAAa,CAAC;YAiB3C,UAAU;YA2GV,qBAAqB;YA6BrB,gBAAgB;YAiChB,kBAAkB;CA8BjC"}
@@ -76,7 +76,7 @@ export class OpenAIAdapter extends BaseProviderAdapter {
76
76
  case 'tts':
77
77
  return this.handleTextToSpeech(request);
78
78
  case 'video':
79
- throw new Error('Video generation not yet supported by LayerAI');
79
+ throw new Error('Video generation not yet supported by OpenAI');
80
80
  default:
81
81
  throw new Error(`Unknown modality: ${request.type}`);
82
82
  }
@@ -94,6 +94,7 @@ export class OpenAIAdapter extends BaseProviderAdapter {
94
94
  }
95
95
  for (const msg of chat.messages) {
96
96
  const role = this.mapRole(msg.role);
97
+ // Handle vision messages (content + images)
97
98
  if (msg.images && msg.images.length > 0) {
98
99
  const content = [];
99
100
  if (msg.content) {
@@ -111,13 +112,7 @@ export class OpenAIAdapter extends BaseProviderAdapter {
111
112
  }
112
113
  messages.push({ role: role, content });
113
114
  }
114
- else if (msg.toolCalls) {
115
- messages.push({
116
- role: 'assistant',
117
- content: msg.content || null,
118
- tool_calls: msg.toolCalls,
119
- });
120
- }
115
+ // Handle tool responses (mutually exclusive)
121
116
  else if (msg.toolCallId) {
122
117
  messages.push({
123
118
  role: 'tool',
@@ -125,6 +120,15 @@ export class OpenAIAdapter extends BaseProviderAdapter {
125
120
  tool_call_id: msg.toolCallId,
126
121
  });
127
122
  }
123
+ // Handle assistant messages with tool calls (can have content + tool_calls)
124
+ else if (msg.toolCalls) {
125
+ messages.push({
126
+ role: 'assistant',
127
+ content: msg.content || null,
128
+ tool_calls: msg.toolCalls,
129
+ });
130
+ }
131
+ // Handle regular text messages
128
132
  else {
129
133
  messages.push({
130
134
  role,
@@ -48,6 +48,7 @@ async function testChatWithVision() {
48
48
  }
49
49
  async function testToolCalls() {
50
50
  console.log('Testing tool calls...');
51
+ // Step 1: Initial request with tool available
51
52
  const request = {
52
53
  gate: 'test-gate',
53
54
  model: 'claude-sonnet-4-5-20250929',
@@ -75,19 +76,46 @@ async function testToolCalls() {
75
76
  },
76
77
  },
77
78
  ],
78
- maxTokens: 100,
79
+ maxTokens: 200,
79
80
  }
80
81
  };
81
82
  const response = await adapter.call(request);
82
- console.log('Response:', response.content);
83
+ console.log('Response content:', response.content);
83
84
  console.log('Tool calls:', response.toolCalls);
84
85
  console.log('Finish reason:', response.finishReason);
85
- if (response.toolCalls && response.toolCalls.length > 0) {
86
- console.log('✅ Tool calls test passed\n');
87
- }
88
- else {
86
+ if (!response.toolCalls || response.toolCalls.length === 0) {
89
87
  throw new Error('Expected tool calls but got none');
90
88
  }
89
+ const toolCall = response.toolCalls[0];
90
+ console.log('Function called:', toolCall.function.name);
91
+ console.log('Function arguments:', toolCall.function.arguments);
92
+ // Step 2: Send tool response back
93
+ const toolResponseRequest = {
94
+ gate: 'test-gate',
95
+ model: 'claude-sonnet-4-5-20250929',
96
+ type: 'chat',
97
+ data: {
98
+ messages: [
99
+ { role: 'user', content: 'What is the weather in San Francisco?' },
100
+ {
101
+ role: 'assistant',
102
+ content: response.content,
103
+ toolCalls: response.toolCalls,
104
+ },
105
+ {
106
+ role: 'tool',
107
+ toolCallId: toolCall.id,
108
+ name: toolCall.function.name,
109
+ content: JSON.stringify({ temperature: 72, condition: 'sunny', humidity: 65 }),
110
+ },
111
+ ],
112
+ tools: request.data.tools,
113
+ maxTokens: 200,
114
+ }
115
+ };
116
+ const finalResponse = await adapter.call(toolResponseRequest);
117
+ console.log('Final response:', finalResponse.content);
118
+ console.log('✅ Tool calls test passed\n');
91
119
  }
92
120
  async function runTests() {
93
121
  try {
@@ -100,11 +100,124 @@ async function testTextToSpeech() {
100
100
  console.log('Audio base64 length:', response.audio?.base64?.length);
101
101
  console.log('✅ Text-to-speech test passed\n');
102
102
  }
103
+ async function testToolCalling() {
104
+ console.log('Testing tool calling...');
105
+ // Step 1: Initial request with tool available
106
+ const request = {
107
+ gate: 'test-gate',
108
+ model: 'gpt-4o-mini',
109
+ type: 'chat',
110
+ data: {
111
+ messages: [
112
+ { role: 'user', content: 'What is the weather in San Francisco?' }
113
+ ],
114
+ tools: [
115
+ {
116
+ type: 'function',
117
+ function: {
118
+ name: 'get_weather',
119
+ description: 'Get the current weather for a location',
120
+ parameters: {
121
+ type: 'object',
122
+ properties: {
123
+ location: {
124
+ type: 'string',
125
+ description: 'The city and state, e.g. San Francisco, CA',
126
+ },
127
+ },
128
+ required: ['location'],
129
+ },
130
+ },
131
+ },
132
+ ],
133
+ maxTokens: 100,
134
+ }
135
+ };
136
+ const response = await adapter.call(request);
137
+ console.log('Response content:', response.content);
138
+ console.log('Tool calls:', response.toolCalls);
139
+ console.log('Finish reason:', response.finishReason);
140
+ if (!response.toolCalls || response.toolCalls.length === 0) {
141
+ throw new Error('Expected tool calls but got none');
142
+ }
143
+ const toolCall = response.toolCalls[0];
144
+ console.log('Function called:', toolCall.function.name);
145
+ console.log('Function arguments:', toolCall.function.arguments);
146
+ // Step 2: Send tool response back
147
+ const toolResponseRequest = {
148
+ gate: 'test-gate',
149
+ model: 'gpt-4o-mini',
150
+ type: 'chat',
151
+ data: {
152
+ messages: [
153
+ { role: 'user', content: 'What is the weather in San Francisco?' },
154
+ {
155
+ role: 'assistant',
156
+ content: response.content,
157
+ toolCalls: response.toolCalls,
158
+ },
159
+ {
160
+ role: 'tool',
161
+ toolCallId: toolCall.id,
162
+ content: JSON.stringify({ temperature: 72, condition: 'sunny' }),
163
+ },
164
+ ],
165
+ tools: request.data.tools,
166
+ }
167
+ };
168
+ const finalResponse = await adapter.call(toolResponseRequest);
169
+ console.log('Final response:', finalResponse.content);
170
+ console.log('✅ Tool calling test passed\n');
171
+ }
172
+ async function testContentAndToolCalls() {
173
+ console.log('Testing content + tool calls in same message...');
174
+ // This tests the fix we made - assistant messages can have BOTH content and toolCalls
175
+ const request = {
176
+ gate: 'test-gate',
177
+ model: 'gpt-4o-mini',
178
+ type: 'chat',
179
+ data: {
180
+ messages: [
181
+ { role: 'user', content: 'Calculate 5 + 3 and explain why' },
182
+ {
183
+ role: 'assistant',
184
+ content: 'Let me calculate that for you.',
185
+ toolCalls: [
186
+ {
187
+ id: 'call_test_123',
188
+ type: 'function',
189
+ function: {
190
+ name: 'calculate',
191
+ arguments: JSON.stringify({ operation: 'add', a: 5, b: 3 }),
192
+ },
193
+ },
194
+ ],
195
+ },
196
+ {
197
+ role: 'tool',
198
+ toolCallId: 'call_test_123',
199
+ content: JSON.stringify({ result: 8 }),
200
+ },
201
+ ],
202
+ maxTokens: 100,
203
+ }
204
+ };
205
+ const response = await adapter.call(request);
206
+ console.log('Response:', response.content);
207
+ if (!response.content) {
208
+ throw new Error('Expected content in response');
209
+ }
210
+ console.log('✅ Content + tool calls test passed\n');
211
+ }
103
212
  async function runTests() {
104
213
  try {
105
214
  await testChatCompletion();
106
215
  console.log('Testing vision...');
107
216
  await testChatWithVision();
217
+ console.log('Testing tool calling...');
218
+ await testToolCalling();
219
+ console.log('Testing content + tool calls...');
220
+ await testContentAndToolCalls();
108
221
  await testImageGeneration();
109
222
  await testEmbeddings();
110
223
  await testTextToSpeech();
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@layer-ai/core",
3
- "version": "0.2.1",
3
+ "version": "0.2.2",
4
4
  "description": "Core API routes and services for Layer AI",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",