@kernl-sdk/ai 0.2.5 → 0.2.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/.turbo/turbo-build.log +5 -4
  2. package/.turbo/turbo-check-types.log +1 -1
  3. package/CHANGELOG.md +16 -0
  4. package/dist/__tests__/embed-integration.test.d.ts +2 -0
  5. package/dist/__tests__/embed-integration.test.d.ts.map +1 -0
  6. package/dist/__tests__/embed-integration.test.js +70 -0
  7. package/dist/__tests__/integration.test.js +1 -1
  8. package/dist/__tests__/language-model.test.js +1 -1
  9. package/dist/convert/__tests__/message.test.js +1 -1
  10. package/dist/convert/__tests__/response.test.js +1 -1
  11. package/dist/convert/__tests__/settings.test.js +1 -1
  12. package/dist/convert/__tests__/stream.test.js +1 -1
  13. package/dist/convert/__tests__/tools.test.js +1 -1
  14. package/dist/convert/__tests__/ui-message.test.js +1 -1
  15. package/dist/convert/__tests__/ui-stream.test.js +1 -1
  16. package/dist/convert/embedding.d.ts +12 -0
  17. package/dist/convert/embedding.d.ts.map +1 -0
  18. package/dist/convert/embedding.js +16 -0
  19. package/dist/convert/settings.js +1 -1
  20. package/dist/convert/stream.js +1 -1
  21. package/dist/embedding-model.d.ts +16 -0
  22. package/dist/embedding-model.d.ts.map +1 -0
  23. package/dist/embedding-model.js +39 -0
  24. package/dist/index.d.ts +10 -8
  25. package/dist/index.d.ts.map +1 -1
  26. package/dist/index.js +10 -8
  27. package/dist/language-model.js +5 -5
  28. package/dist/providers/anthropic.d.ts +1 -1
  29. package/dist/providers/anthropic.js +2 -1
  30. package/dist/providers/google.d.ts +1 -1
  31. package/dist/providers/google.d.ts.map +1 -1
  32. package/dist/providers/google.js +5 -1
  33. package/dist/providers/openai.d.ts +1 -1
  34. package/dist/providers/openai.d.ts.map +1 -1
  35. package/dist/providers/openai.js +5 -1
  36. package/package.json +5 -4
  37. package/src/__tests__/embed-integration.test.ts +86 -0
  38. package/src/convert/embedding.ts +33 -0
  39. package/src/embedding-model.ts +54 -0
  40. package/src/index.ts +2 -0
  41. package/src/providers/anthropic.ts +2 -0
  42. package/src/providers/google.ts +7 -0
  43. package/src/providers/openai.ts +7 -0
@@ -1,4 +1,5 @@
1
-
2
- > @kernl-sdk/ai@0.2.4 build /Users/andjones/Documents/projects/kernl/packages/providers/ai
3
- > tsc && tsc-alias
4
-
1
+
2
+ 
3
+ > @kernl-sdk/ai@0.2.6 build /Users/andjones/Documents/projects/kernl/packages/providers/ai
4
+ > tsc && tsc-alias --resolve-full-paths
5
+
@@ -1,4 +1,4 @@
1
1
 
2
- > @kernl-sdk/ai@0.1.4 check-types /Users/andjones/Documents/projects/kernl/packages/_ai
2
+ > @kernl-sdk/ai@0.2.6 check-types /Users/andjones/Documents/projects/kernl/packages/providers/ai
3
3
  > tsc --noEmit
4
4
 
package/CHANGELOG.md CHANGED
@@ -1,5 +1,21 @@
1
1
  # @kernl/ai
2
2
 
3
+ ## 0.2.7
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [a7d6138]
8
+ - @kernl-sdk/retrieval@0.1.0
9
+
10
+ ## 0.2.6
11
+
12
+ ### Patch Changes
13
+
14
+ - Fix ESM compatibility by adding --resolve-full-paths to tsc-alias build
15
+ - Updated dependencies
16
+ - @kernl-sdk/shared@0.1.6
17
+ - @kernl-sdk/protocol@0.2.5
18
+
3
19
  ## 0.2.5
4
20
 
5
21
  ### Patch Changes
@@ -0,0 +1,2 @@
1
+ export {};
2
+ //# sourceMappingURL=embed-integration.test.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"embed-integration.test.d.ts","sourceRoot":"","sources":["../../src/__tests__/embed-integration.test.ts"],"names":[],"mappings":""}
@@ -0,0 +1,70 @@
1
+ import { describe, it, expect } from "vitest";
2
+ import { embed, embedMany } from "@kernl-sdk/retrieval";
3
+ import { openai } from "../providers/openai.js";
4
+ import { google } from "../providers/google.js";
5
+ // Force module evaluation by referencing exports
6
+ void openai;
7
+ void google;
8
+ // Integration tests for embedding functions with real APIs.
9
+ // Skip these in CI if API keys are not available.
10
+ describe.skipIf(!process.env.OPENAI_API_KEY)("embed integration (OpenAI)", () => {
11
+ it("should embed single text with OpenAI", async () => {
12
+ const result = await embed({
13
+ model: "openai/text-embedding-3-small",
14
+ text: "The quick brown fox jumps over the lazy dog",
15
+ });
16
+ expect(result.embedding).toBeDefined();
17
+ expect(Array.isArray(result.embedding)).toBe(true);
18
+ expect(result.embedding.length).toBe(1536); // text-embedding-3-small dimensions
19
+ expect(result.embedding[0]).toBeTypeOf("number");
20
+ });
21
+ it("should embed multiple texts with OpenAI", async () => {
22
+ const result = await embedMany({
23
+ model: "openai/text-embedding-3-small",
24
+ texts: [
25
+ "Hello world",
26
+ "Machine learning is fascinating",
27
+ "TypeScript is great",
28
+ ],
29
+ });
30
+ expect(result.embeddings).toBeDefined();
31
+ expect(result.embeddings.length).toBe(3);
32
+ expect(result.embeddings[0].length).toBe(1536);
33
+ expect(result.embeddings[1].length).toBe(1536);
34
+ expect(result.embeddings[2].length).toBe(1536);
35
+ });
36
+ it("should handle concurrency with OpenAI", async () => {
37
+ const texts = Array.from({ length: 10 }, (_, i) => `Text number ${i}`);
38
+ const result = await embedMany({
39
+ model: "openai/text-embedding-3-small",
40
+ texts,
41
+ concurrency: 3,
42
+ });
43
+ expect(result.embeddings.length).toBe(10);
44
+ result.embeddings.forEach((embedding) => {
45
+ expect(embedding.length).toBe(1536);
46
+ });
47
+ });
48
+ it("should retry on failure", async () => {
49
+ // This test might be flaky, but demonstrates retry behavior
50
+ const result = await embed({
51
+ model: "openai/text-embedding-3-small",
52
+ text: "Test retry behavior",
53
+ retries: 2,
54
+ });
55
+ expect(result.embedding).toBeDefined();
56
+ expect(result.embedding.length).toBe(1536);
57
+ });
58
+ });
59
+ describe.skipIf(!process.env.GOOGLE_GENERATIVE_AI_API_KEY)("embed integration (Google)", () => {
60
+ it("should embed single text with Google", async () => {
61
+ const result = await embed({
62
+ model: "google/text-embedding-004",
63
+ text: "The quick brown fox jumps over the lazy dog",
64
+ });
65
+ expect(result.embedding).toBeDefined();
66
+ expect(Array.isArray(result.embedding)).toBe(true);
67
+ expect(result.embedding.length).toBeGreaterThan(0);
68
+ expect(result.embedding[0]).toBeTypeOf("number");
69
+ });
70
+ });
@@ -2,7 +2,7 @@ import { describe, it, expect, beforeAll } from "vitest";
2
2
  import { openai } from "@ai-sdk/openai";
3
3
  import { anthropic } from "@ai-sdk/anthropic";
4
4
  import { IN_PROGRESS } from "@kernl-sdk/protocol";
5
- import { AISDKLanguageModel } from "../language-model";
5
+ import { AISDKLanguageModel } from "../language-model.js";
6
6
  /**
7
7
  * Integration tests for AISDKLanguageModel with real AI SDK providers.
8
8
  *
@@ -1,6 +1,6 @@
1
1
  import { describe, it, expect, vi } from "vitest";
2
2
  import { IN_PROGRESS } from "@kernl-sdk/protocol";
3
- import { AISDKLanguageModel } from "../language-model";
3
+ import { AISDKLanguageModel } from "../language-model.js";
4
4
  /**
5
5
  * Unit tests for AISDKLanguageModel stream accumulation behavior
6
6
  */
@@ -1,5 +1,5 @@
1
1
  import { describe, it, expect } from "vitest";
2
- import { MESSAGE } from "../message";
2
+ import { MESSAGE } from "../message.js";
3
3
  describe("MESSAGE codec", () => {
4
4
  describe("encode - system messages", () => {
5
5
  it("should encode system message with single text part", () => {
@@ -1,5 +1,5 @@
1
1
  import { describe, it, expect } from "vitest";
2
- import { WARNING } from "../response";
2
+ import { WARNING } from "../response.js";
3
3
  describe("WARNING codec", () => {
4
4
  describe("decode", () => {
5
5
  it("should decode unsupported-setting warning", () => {
@@ -1,5 +1,5 @@
1
1
  import { describe, it, expect } from "vitest";
2
- import { MODEL_SETTINGS } from "../settings";
2
+ import { MODEL_SETTINGS } from "../settings.js";
3
3
  describe("MODEL_SETTINGS codec", () => {
4
4
  describe("encode", () => {
5
5
  it("should encode temperature setting", () => {
@@ -1,6 +1,6 @@
1
1
  import { describe, it, expect } from "vitest";
2
2
  import { IN_PROGRESS } from "@kernl-sdk/protocol";
3
- import { STREAM_PART, convertStream } from "../stream";
3
+ import { STREAM_PART, convertStream } from "../stream.js";
4
4
  describe("STREAM_PART codec", () => {
5
5
  describe("decode - text events", () => {
6
6
  it("should decode text-start event", () => {
@@ -1,5 +1,5 @@
1
1
  import { describe, it, expect } from "vitest";
2
- import { TOOL, TOOL_CHOICE } from "../tools";
2
+ import { TOOL, TOOL_CHOICE } from "../tools.js";
3
3
  describe("TOOL codec", () => {
4
4
  describe("encode - function tools", () => {
5
5
  it("should encode function tool with basic schema", () => {
@@ -1,6 +1,6 @@
1
1
  import { describe, it, expect } from "vitest";
2
2
  import { IN_PROGRESS, COMPLETED, FAILED } from "@kernl-sdk/protocol";
3
- import { UIMessageCodec, historyToUIMessages } from "../ui-message";
3
+ import { UIMessageCodec, historyToUIMessages } from "../ui-message.js";
4
4
  describe("UIMessageCodec", () => {
5
5
  // ----------------------------
6
6
  // Text parts
@@ -1,6 +1,6 @@
1
1
  import { describe, it, expect } from "vitest";
2
2
  import { COMPLETED, FAILED, IN_PROGRESS } from "@kernl-sdk/protocol";
3
- import { STREAM_UI_PART, toUIMessageStream } from "../ui-stream";
3
+ import { STREAM_UI_PART, toUIMessageStream } from "../ui-stream.js";
4
4
  describe("STREAM_UI_PART codec", () => {
5
5
  describe("encode - text events", () => {
6
6
  it("should encode text-start event", () => {
@@ -0,0 +1,12 @@
1
+ import type { Codec } from "@kernl-sdk/shared/lib";
2
+ import type { EmbeddingModelRequestSettings } from "@kernl-sdk/protocol";
3
+ import type { SharedV3ProviderOptions } from "@ai-sdk/provider";
4
+ /**
5
+ * AI SDK embedding call options extracted from settings.
6
+ */
7
+ export interface AISdkEmbeddingOptions {
8
+ dimensions?: number;
9
+ providerOptions?: SharedV3ProviderOptions;
10
+ }
11
+ export declare const EMBEDDING_SETTINGS: Codec<EmbeddingModelRequestSettings, AISdkEmbeddingOptions>;
12
+ //# sourceMappingURL=embedding.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"embedding.d.ts","sourceRoot":"","sources":["../../src/convert/embedding.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,KAAK,EAAE,MAAM,uBAAuB,CAAC;AACnD,OAAO,KAAK,EAAE,6BAA6B,EAAE,MAAM,qBAAqB,CAAC;AACzE,OAAO,KAAK,EAAE,uBAAuB,EAAE,MAAM,kBAAkB,CAAC;AAEhE;;GAEG;AACH,MAAM,WAAW,qBAAqB;IACpC,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB,eAAe,CAAC,EAAE,uBAAuB,CAAC;CAC3C;AAED,eAAO,MAAM,kBAAkB,EAAE,KAAK,CACpC,6BAA6B,EAC7B,qBAAqB,CAkBtB,CAAC"}
@@ -0,0 +1,16 @@
1
+ export const EMBEDDING_SETTINGS = {
2
+ encode: (settings) => {
3
+ const options = {};
4
+ if (settings.dimensions !== undefined) {
5
+ options.dimensions = settings.dimensions;
6
+ }
7
+ if (settings.providerOptions !== undefined) {
8
+ options.providerOptions =
9
+ settings.providerOptions;
10
+ }
11
+ return options;
12
+ },
13
+ decode: () => {
14
+ throw new Error("codec:unimplemented");
15
+ },
16
+ };
@@ -1,4 +1,4 @@
1
- import { TOOL_CHOICE } from "./tools";
1
+ import { TOOL_CHOICE } from "./tools.js";
2
2
  export const MODEL_SETTINGS = {
3
3
  encode: (settings) => {
4
4
  const options = {};
@@ -1,5 +1,5 @@
1
1
  import { COMPLETED, FAILED, IN_PROGRESS, } from "@kernl-sdk/protocol";
2
- import { WARNING } from "./response";
2
+ import { WARNING } from "./response.js";
3
3
  /**
4
4
  * Convert AI SDK stream to async iterable of kernl stream events.
5
5
  */
@@ -0,0 +1,16 @@
1
+ import type { EmbeddingModelV3 } from "@ai-sdk/provider";
2
+ import type { EmbeddingModel, EmbeddingModelRequest, EmbeddingModelResponse } from "@kernl-sdk/protocol";
3
+ /**
4
+ * EmbeddingModel adapter for the AI SDK EmbeddingModelV3.
5
+ */
6
+ export declare class AISDKEmbeddingModel<TValue = string> implements EmbeddingModel<TValue> {
7
+ private model;
8
+ readonly spec: "1.0";
9
+ readonly provider: string;
10
+ readonly modelId: string;
11
+ readonly maxEmbeddingsPerCall?: number;
12
+ readonly supportsParallelCalls?: boolean;
13
+ constructor(model: EmbeddingModelV3<TValue>);
14
+ embed(request: EmbeddingModelRequest<TValue>): Promise<EmbeddingModelResponse>;
15
+ }
16
+ //# sourceMappingURL=embedding-model.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"embedding-model.d.ts","sourceRoot":"","sources":["../src/embedding-model.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,kBAAkB,CAAC;AACzD,OAAO,KAAK,EACV,cAAc,EACd,qBAAqB,EACrB,sBAAsB,EACvB,MAAM,qBAAqB,CAAC;AAI7B;;GAEG;AACH,qBAAa,mBAAmB,CAAC,MAAM,GAAG,MAAM,CAC9C,YAAW,cAAc,CAAC,MAAM,CAAC;IAQrB,OAAO,CAAC,KAAK;IANzB,QAAQ,CAAC,IAAI,EAAG,KAAK,CAAU;IAC/B,QAAQ,CAAC,QAAQ,EAAE,MAAM,CAAC;IAC1B,QAAQ,CAAC,OAAO,EAAE,MAAM,CAAC;IACzB,QAAQ,CAAC,oBAAoB,CAAC,EAAE,MAAM,CAAC;IACvC,QAAQ,CAAC,qBAAqB,CAAC,EAAE,OAAO,CAAC;gBAErB,KAAK,EAAE,gBAAgB,CAAC,MAAM,CAAC;IAa7C,KAAK,CACT,OAAO,EAAE,qBAAqB,CAAC,MAAM,CAAC,GACrC,OAAO,CAAC,sBAAsB,CAAC;CAiBnC"}
@@ -0,0 +1,39 @@
1
+ import { EMBEDDING_SETTINGS } from "./convert/embedding.js";
2
+ /**
3
+ * EmbeddingModel adapter for the AI SDK EmbeddingModelV3.
4
+ */
5
+ export class AISDKEmbeddingModel {
6
+ model;
7
+ spec = "1.0";
8
+ provider;
9
+ modelId;
10
+ maxEmbeddingsPerCall;
11
+ supportsParallelCalls;
12
+ constructor(model) {
13
+ this.model = model;
14
+ this.provider = model.provider;
15
+ this.modelId = model.modelId;
16
+ // AI SDK supports async values for these, we handle sync case
17
+ if (typeof model.maxEmbeddingsPerCall === "number") {
18
+ this.maxEmbeddingsPerCall = model.maxEmbeddingsPerCall;
19
+ }
20
+ if (typeof model.supportsParallelCalls === "boolean") {
21
+ this.supportsParallelCalls = model.supportsParallelCalls;
22
+ }
23
+ }
24
+ async embed(request) {
25
+ const settings = request.settings
26
+ ? EMBEDDING_SETTINGS.encode(request.settings)
27
+ : {};
28
+ const result = await this.model.doEmbed({
29
+ values: request.values,
30
+ abortSignal: request.abort,
31
+ ...settings,
32
+ });
33
+ return {
34
+ embeddings: result.embeddings,
35
+ usage: { inputTokens: result.usage?.tokens },
36
+ providerMetadata: result.providerMetadata,
37
+ };
38
+ }
39
+ }
package/dist/index.d.ts CHANGED
@@ -11,12 +11,14 @@
11
11
  * const response = await claude.generate([...], {});
12
12
  * ```
13
13
  */
14
- export { AISDKLanguageModel } from "./language-model";
15
- export { MESSAGE } from "./convert/message";
16
- export { TOOL, TOOL_CHOICE } from "./convert/tools";
17
- export { MODEL_SETTINGS } from "./convert/settings";
18
- export { MODEL_RESPONSE, WARNING } from "./convert/response";
19
- export { convertStream } from "./convert/stream";
20
- export { UIMessageCodec, historyToUIMessages } from "./convert/ui-message";
21
- export { STREAM_UI_PART, toUIMessageStream } from "./convert/ui-stream";
14
+ export { AISDKLanguageModel } from "./language-model.js";
15
+ export { AISDKEmbeddingModel } from "./embedding-model.js";
16
+ export { MESSAGE } from "./convert/message.js";
17
+ export { TOOL, TOOL_CHOICE } from "./convert/tools.js";
18
+ export { MODEL_SETTINGS } from "./convert/settings.js";
19
+ export { EMBEDDING_SETTINGS } from "./convert/embedding.js";
20
+ export { MODEL_RESPONSE, WARNING } from "./convert/response.js";
21
+ export { convertStream } from "./convert/stream.js";
22
+ export { UIMessageCodec, historyToUIMessages } from "./convert/ui-message.js";
23
+ export { STREAM_UI_PART, toUIMessageStream } from "./convert/ui-stream.js";
22
24
  //# sourceMappingURL=index.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;GAYG;AAEH,OAAO,EAAE,kBAAkB,EAAE,MAAM,kBAAkB,CAAC;AAGtD,OAAO,EAAE,OAAO,EAAE,MAAM,mBAAmB,CAAC;AAC5C,OAAO,EAAE,IAAI,EAAE,WAAW,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,EAAE,cAAc,EAAE,MAAM,oBAAoB,CAAC;AACpD,OAAO,EAAE,cAAc,EAAE,OAAO,EAAE,MAAM,oBAAoB,CAAC;AAC7D,OAAO,EAAE,aAAa,EAAE,MAAM,kBAAkB,CAAC;AACjD,OAAO,EAAE,cAAc,EAAE,mBAAmB,EAAE,MAAM,sBAAsB,CAAC;AAC3E,OAAO,EAAE,cAAc,EAAE,iBAAiB,EAAE,MAAM,qBAAqB,CAAC"}
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;GAYG;AAEH,OAAO,EAAE,kBAAkB,EAAE,MAAM,kBAAkB,CAAC;AACtD,OAAO,EAAE,mBAAmB,EAAE,MAAM,mBAAmB,CAAC;AAGxD,OAAO,EAAE,OAAO,EAAE,MAAM,mBAAmB,CAAC;AAC5C,OAAO,EAAE,IAAI,EAAE,WAAW,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,EAAE,cAAc,EAAE,MAAM,oBAAoB,CAAC;AACpD,OAAO,EAAE,kBAAkB,EAAE,MAAM,qBAAqB,CAAC;AACzD,OAAO,EAAE,cAAc,EAAE,OAAO,EAAE,MAAM,oBAAoB,CAAC;AAC7D,OAAO,EAAE,aAAa,EAAE,MAAM,kBAAkB,CAAC;AACjD,OAAO,EAAE,cAAc,EAAE,mBAAmB,EAAE,MAAM,sBAAsB,CAAC;AAC3E,OAAO,EAAE,cAAc,EAAE,iBAAiB,EAAE,MAAM,qBAAqB,CAAC"}
package/dist/index.js CHANGED
@@ -11,12 +11,14 @@
11
11
  * const response = await claude.generate([...], {});
12
12
  * ```
13
13
  */
14
- export { AISDKLanguageModel } from "./language-model";
14
+ export { AISDKLanguageModel } from "./language-model.js";
15
+ export { AISDKEmbeddingModel } from "./embedding-model.js";
15
16
  // Re-export codecs for custom provider implementations
16
- export { MESSAGE } from "./convert/message";
17
- export { TOOL, TOOL_CHOICE } from "./convert/tools";
18
- export { MODEL_SETTINGS } from "./convert/settings";
19
- export { MODEL_RESPONSE, WARNING } from "./convert/response";
20
- export { convertStream } from "./convert/stream";
21
- export { UIMessageCodec, historyToUIMessages } from "./convert/ui-message";
22
- export { STREAM_UI_PART, toUIMessageStream } from "./convert/ui-stream";
17
+ export { MESSAGE } from "./convert/message.js";
18
+ export { TOOL, TOOL_CHOICE } from "./convert/tools.js";
19
+ export { MODEL_SETTINGS } from "./convert/settings.js";
20
+ export { EMBEDDING_SETTINGS } from "./convert/embedding.js";
21
+ export { MODEL_RESPONSE, WARNING } from "./convert/response.js";
22
+ export { convertStream } from "./convert/stream.js";
23
+ export { UIMessageCodec, historyToUIMessages } from "./convert/ui-message.js";
24
+ export { STREAM_UI_PART, toUIMessageStream } from "./convert/ui-stream.js";
@@ -1,9 +1,9 @@
1
1
  import { message, reasoning } from "@kernl-sdk/protocol";
2
- import { MESSAGE } from "./convert/message";
3
- import { TOOL } from "./convert/tools";
4
- import { MODEL_SETTINGS } from "./convert/settings";
5
- import { MODEL_RESPONSE } from "./convert/response";
6
- import { convertStream } from "./convert/stream";
2
+ import { MESSAGE } from "./convert/message.js";
3
+ import { TOOL } from "./convert/tools.js";
4
+ import { MODEL_SETTINGS } from "./convert/settings.js";
5
+ import { MODEL_RESPONSE } from "./convert/response.js";
6
+ import { convertStream } from "./convert/stream.js";
7
7
  /**
8
8
  * LanguageModel adapter for the AI SDK LanguageModelV3.
9
9
  */
@@ -1,4 +1,4 @@
1
- import { AISDKLanguageModel } from "../language-model";
1
+ import { AISDKLanguageModel } from "../language-model.js";
2
2
  /**
3
3
  * Create a kernl-compatible Anthropic language model.
4
4
  *
@@ -1,5 +1,5 @@
1
1
  import { anthropic as createAnthropicModel } from "@ai-sdk/anthropic";
2
- import { AISDKLanguageModel } from "../language-model";
2
+ import { AISDKLanguageModel } from "../language-model.js";
3
3
  /**
4
4
  * Create a kernl-compatible Anthropic language model.
5
5
  *
@@ -15,3 +15,4 @@ export function anthropic(modelId) {
15
15
  const model = createAnthropicModel(modelId);
16
16
  return new AISDKLanguageModel(model);
17
17
  }
18
+ // Note: Anthropic does not currently support embeddings
@@ -1,4 +1,4 @@
1
- import { AISDKLanguageModel } from "../language-model";
1
+ import { AISDKLanguageModel } from "../language-model.js";
2
2
  /**
3
3
  * Create a kernl-compatible Google Generative AI language model.
4
4
  *
@@ -1 +1 @@
1
- {"version":3,"file":"google.d.ts","sourceRoot":"","sources":["../../src/providers/google.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,kBAAkB,EAAE,MAAM,mBAAmB,CAAC;AAEvD;;;;;;;;;;GAUG;AACH,wBAAgB,MAAM,CAAC,OAAO,EAAE,MAAM,sBAGrC"}
1
+ {"version":3,"file":"google.d.ts","sourceRoot":"","sources":["../../src/providers/google.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,kBAAkB,EAAE,MAAM,mBAAmB,CAAC;AAIvD;;;;;;;;;;GAUG;AACH,wBAAgB,MAAM,CAAC,OAAO,EAAE,MAAM,sBAGrC"}
@@ -1,5 +1,7 @@
1
1
  import { google as createGoogleModel } from "@ai-sdk/google";
2
- import { AISDKLanguageModel } from "../language-model";
2
+ import { AISDKLanguageModel } from "../language-model.js";
3
+ import { AISDKEmbeddingModel } from "../embedding-model.js";
4
+ import { registerEmbeddingProvider } from "@kernl-sdk/retrieval";
3
5
  /**
4
6
  * Create a kernl-compatible Google Generative AI language model.
5
7
  *
@@ -15,3 +17,5 @@ export function google(modelId) {
15
17
  const model = createGoogleModel(modelId);
16
18
  return new AISDKLanguageModel(model);
17
19
  }
20
+ // Auto-register Google embedding provider
21
+ registerEmbeddingProvider("google", (id) => new AISDKEmbeddingModel(createGoogleModel.textEmbedding(id)));
@@ -1,4 +1,4 @@
1
- import { AISDKLanguageModel } from "../language-model";
1
+ import { AISDKLanguageModel } from "../language-model.js";
2
2
  /**
3
3
  * Create a kernl-compatible OpenAI language model.
4
4
  *
@@ -1 +1 @@
1
- {"version":3,"file":"openai.d.ts","sourceRoot":"","sources":["../../src/providers/openai.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,kBAAkB,EAAE,MAAM,mBAAmB,CAAC;AAEvD;;;;;;;;;;GAUG;AACH,wBAAgB,MAAM,CAAC,OAAO,EAAE,MAAM,sBAGrC"}
1
+ {"version":3,"file":"openai.d.ts","sourceRoot":"","sources":["../../src/providers/openai.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,kBAAkB,EAAE,MAAM,mBAAmB,CAAC;AAIvD;;;;;;;;;;GAUG;AACH,wBAAgB,MAAM,CAAC,OAAO,EAAE,MAAM,sBAGrC"}
@@ -1,5 +1,7 @@
1
1
  import { openai as createOpenAIModel } from "@ai-sdk/openai";
2
- import { AISDKLanguageModel } from "../language-model";
2
+ import { AISDKLanguageModel } from "../language-model.js";
3
+ import { AISDKEmbeddingModel } from "../embedding-model.js";
4
+ import { registerEmbeddingProvider } from "@kernl-sdk/retrieval";
3
5
  /**
4
6
  * Create a kernl-compatible OpenAI language model.
5
7
  *
@@ -15,3 +17,5 @@ export function openai(modelId) {
15
17
  const model = createOpenAIModel(modelId);
16
18
  return new AISDKLanguageModel(model);
17
19
  }
20
+ // Auto-register OpenAI embedding provider
21
+ registerEmbeddingProvider("openai", (id) => new AISDKEmbeddingModel(createOpenAIModel.embedding(id)));
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@kernl-sdk/ai",
3
- "version": "0.2.5",
3
+ "version": "0.2.7",
4
4
  "description": "Vercel AI SDK adapter for kernl",
5
5
  "keywords": [
6
6
  "kernl",
@@ -68,11 +68,12 @@
68
68
  "vitest": "^4.0.8"
69
69
  },
70
70
  "dependencies": {
71
- "@kernl-sdk/shared": "^0.1.5",
72
- "@kernl-sdk/protocol": "0.2.4"
71
+ "@kernl-sdk/protocol": "0.2.5",
72
+ "@kernl-sdk/shared": "^0.1.6",
73
+ "@kernl-sdk/retrieval": "0.1.0"
73
74
  },
74
75
  "scripts": {
75
- "build": "tsc && tsc-alias",
76
+ "build": "tsc && tsc-alias --resolve-full-paths",
76
77
  "dev": "tsc --watch",
77
78
  "lint": "eslint src/",
78
79
  "check-types": "tsc --noEmit",
@@ -0,0 +1,86 @@
1
+ import { describe, it, expect } from "vitest";
2
+ import { embed, embedMany } from "@kernl-sdk/retrieval";
3
+ import { openai } from "../providers/openai";
4
+ import { google } from "../providers/google";
5
+
6
+ // Force module evaluation by referencing exports
7
+ void openai;
8
+ void google;
9
+
10
+ // Integration tests for embedding functions with real APIs.
11
+ // Skip these in CI if API keys are not available.
12
+
13
+ describe.skipIf(!process.env.OPENAI_API_KEY)("embed integration (OpenAI)", () => {
14
+ it("should embed single text with OpenAI", async () => {
15
+ const result = await embed({
16
+ model: "openai/text-embedding-3-small",
17
+ text: "The quick brown fox jumps over the lazy dog",
18
+ });
19
+
20
+ expect(result.embedding).toBeDefined();
21
+ expect(Array.isArray(result.embedding)).toBe(true);
22
+ expect(result.embedding.length).toBe(1536); // text-embedding-3-small dimensions
23
+ expect(result.embedding[0]).toBeTypeOf("number");
24
+ });
25
+
26
+ it("should embed multiple texts with OpenAI", async () => {
27
+ const result = await embedMany({
28
+ model: "openai/text-embedding-3-small",
29
+ texts: [
30
+ "Hello world",
31
+ "Machine learning is fascinating",
32
+ "TypeScript is great",
33
+ ],
34
+ });
35
+
36
+ expect(result.embeddings).toBeDefined();
37
+ expect(result.embeddings.length).toBe(3);
38
+ expect(result.embeddings[0].length).toBe(1536);
39
+ expect(result.embeddings[1].length).toBe(1536);
40
+ expect(result.embeddings[2].length).toBe(1536);
41
+ });
42
+
43
+ it("should handle concurrency with OpenAI", async () => {
44
+ const texts = Array.from({ length: 10 }, (_, i) => `Text number ${i}`);
45
+
46
+ const result = await embedMany({
47
+ model: "openai/text-embedding-3-small",
48
+ texts,
49
+ concurrency: 3,
50
+ });
51
+
52
+ expect(result.embeddings.length).toBe(10);
53
+ result.embeddings.forEach((embedding) => {
54
+ expect(embedding.length).toBe(1536);
55
+ });
56
+ });
57
+
58
+ it("should retry on failure", async () => {
59
+ // This test might be flaky, but demonstrates retry behavior
60
+ const result = await embed({
61
+ model: "openai/text-embedding-3-small",
62
+ text: "Test retry behavior",
63
+ retries: 2,
64
+ });
65
+
66
+ expect(result.embedding).toBeDefined();
67
+ expect(result.embedding.length).toBe(1536);
68
+ });
69
+ });
70
+
71
+ describe.skipIf(!process.env.GOOGLE_GENERATIVE_AI_API_KEY)(
72
+ "embed integration (Google)",
73
+ () => {
74
+ it("should embed single text with Google", async () => {
75
+ const result = await embed({
76
+ model: "google/text-embedding-004",
77
+ text: "The quick brown fox jumps over the lazy dog",
78
+ });
79
+
80
+ expect(result.embedding).toBeDefined();
81
+ expect(Array.isArray(result.embedding)).toBe(true);
82
+ expect(result.embedding.length).toBeGreaterThan(0);
83
+ expect(result.embedding[0]).toBeTypeOf("number");
84
+ });
85
+ },
86
+ );
@@ -0,0 +1,33 @@
1
+ import type { Codec } from "@kernl-sdk/shared/lib";
2
+ import type { EmbeddingModelRequestSettings } from "@kernl-sdk/protocol";
3
+ import type { SharedV3ProviderOptions } from "@ai-sdk/provider";
4
+
5
+ /**
6
+ * AI SDK embedding call options extracted from settings.
7
+ */
8
+ export interface AISdkEmbeddingOptions {
9
+ dimensions?: number;
10
+ providerOptions?: SharedV3ProviderOptions;
11
+ }
12
+
13
+ export const EMBEDDING_SETTINGS: Codec<
14
+ EmbeddingModelRequestSettings,
15
+ AISdkEmbeddingOptions
16
+ > = {
17
+ encode: (settings: EmbeddingModelRequestSettings) => {
18
+ const options: AISdkEmbeddingOptions = {};
19
+
20
+ if (settings.dimensions !== undefined) {
21
+ options.dimensions = settings.dimensions;
22
+ }
23
+ if (settings.providerOptions !== undefined) {
24
+ options.providerOptions =
25
+ settings.providerOptions as SharedV3ProviderOptions;
26
+ }
27
+
28
+ return options;
29
+ },
30
+ decode: () => {
31
+ throw new Error("codec:unimplemented");
32
+ },
33
+ };
@@ -0,0 +1,54 @@
1
+ import type { EmbeddingModelV3 } from "@ai-sdk/provider";
2
+ import type {
3
+ EmbeddingModel,
4
+ EmbeddingModelRequest,
5
+ EmbeddingModelResponse,
6
+ } from "@kernl-sdk/protocol";
7
+
8
+ import { EMBEDDING_SETTINGS } from "./convert/embedding";
9
+
10
+ /**
11
+ * EmbeddingModel adapter for the AI SDK EmbeddingModelV3.
12
+ */
13
+ export class AISDKEmbeddingModel<TValue = string>
14
+ implements EmbeddingModel<TValue>
15
+ {
16
+ readonly spec = "1.0" as const;
17
+ readonly provider: string;
18
+ readonly modelId: string;
19
+ readonly maxEmbeddingsPerCall?: number;
20
+ readonly supportsParallelCalls?: boolean;
21
+
22
+ constructor(private model: EmbeddingModelV3<TValue>) {
23
+ this.provider = model.provider;
24
+ this.modelId = model.modelId;
25
+
26
+ // AI SDK supports async values for these, we handle sync case
27
+ if (typeof model.maxEmbeddingsPerCall === "number") {
28
+ this.maxEmbeddingsPerCall = model.maxEmbeddingsPerCall;
29
+ }
30
+ if (typeof model.supportsParallelCalls === "boolean") {
31
+ this.supportsParallelCalls = model.supportsParallelCalls;
32
+ }
33
+ }
34
+
35
+ async embed(
36
+ request: EmbeddingModelRequest<TValue>,
37
+ ): Promise<EmbeddingModelResponse> {
38
+ const settings = request.settings
39
+ ? EMBEDDING_SETTINGS.encode(request.settings)
40
+ : {};
41
+
42
+ const result = await this.model.doEmbed({
43
+ values: request.values,
44
+ abortSignal: request.abort,
45
+ ...settings,
46
+ });
47
+
48
+ return {
49
+ embeddings: result.embeddings,
50
+ usage: { inputTokens: result.usage?.tokens },
51
+ providerMetadata: result.providerMetadata,
52
+ };
53
+ }
54
+ }
package/src/index.ts CHANGED
@@ -13,11 +13,13 @@
13
13
  */
14
14
 
15
15
  export { AISDKLanguageModel } from "./language-model";
16
+ export { AISDKEmbeddingModel } from "./embedding-model";
16
17
 
17
18
  // Re-export codecs for custom provider implementations
18
19
  export { MESSAGE } from "./convert/message";
19
20
  export { TOOL, TOOL_CHOICE } from "./convert/tools";
20
21
  export { MODEL_SETTINGS } from "./convert/settings";
22
+ export { EMBEDDING_SETTINGS } from "./convert/embedding";
21
23
  export { MODEL_RESPONSE, WARNING } from "./convert/response";
22
24
  export { convertStream } from "./convert/stream";
23
25
  export { UIMessageCodec, historyToUIMessages } from "./convert/ui-message";
@@ -16,3 +16,5 @@ export function anthropic(modelId: string) {
16
16
  const model = createAnthropicModel(modelId);
17
17
  return new AISDKLanguageModel(model);
18
18
  }
19
+
20
+ // Note: Anthropic does not currently support embeddings
@@ -1,5 +1,7 @@
1
1
  import { google as createGoogleModel } from "@ai-sdk/google";
2
2
  import { AISDKLanguageModel } from "../language-model";
3
+ import { AISDKEmbeddingModel } from "../embedding-model";
4
+ import { registerEmbeddingProvider } from "@kernl-sdk/retrieval";
3
5
 
4
6
  /**
5
7
  * Create a kernl-compatible Google Generative AI language model.
@@ -16,3 +18,8 @@ export function google(modelId: string) {
16
18
  const model = createGoogleModel(modelId);
17
19
  return new AISDKLanguageModel(model);
18
20
  }
21
+
22
+ // Auto-register Google embedding provider
23
+ registerEmbeddingProvider("google", (id) =>
24
+ new AISDKEmbeddingModel(createGoogleModel.textEmbedding(id)),
25
+ );
@@ -1,5 +1,7 @@
1
1
  import { openai as createOpenAIModel } from "@ai-sdk/openai";
2
2
  import { AISDKLanguageModel } from "../language-model";
3
+ import { AISDKEmbeddingModel } from "../embedding-model";
4
+ import { registerEmbeddingProvider } from "@kernl-sdk/retrieval";
3
5
 
4
6
  /**
5
7
  * Create a kernl-compatible OpenAI language model.
@@ -16,3 +18,8 @@ export function openai(modelId: string) {
16
18
  const model = createOpenAIModel(modelId);
17
19
  return new AISDKLanguageModel(model);
18
20
  }
21
+
22
+ // Auto-register OpenAI embedding provider
23
+ registerEmbeddingProvider("openai", (id) =>
24
+ new AISDKEmbeddingModel(createOpenAIModel.embedding(id)),
25
+ );