@effect/ai-openai 0.13.2 → 0.13.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/dist/cjs/OpenAiClient.js +3 -2
  2. package/dist/cjs/OpenAiClient.js.map +1 -1
  3. package/dist/cjs/OpenAiCompletions.js +62 -7
  4. package/dist/cjs/OpenAiCompletions.js.map +1 -1
  5. package/dist/cjs/OpenAiConfig.js.map +1 -1
  6. package/dist/cjs/OpenAiEmbeddings.js +94 -18
  7. package/dist/cjs/OpenAiEmbeddings.js.map +1 -1
  8. package/dist/cjs/OpenAiTokenizer.js +9 -5
  9. package/dist/cjs/OpenAiTokenizer.js.map +1 -1
  10. package/dist/dts/Generated.d.ts +203 -603
  11. package/dist/dts/Generated.d.ts.map +1 -1
  12. package/dist/dts/OpenAiClient.d.ts +3 -2
  13. package/dist/dts/OpenAiClient.d.ts.map +1 -1
  14. package/dist/dts/OpenAiCompletions.d.ts +51 -1
  15. package/dist/dts/OpenAiCompletions.d.ts.map +1 -1
  16. package/dist/dts/OpenAiConfig.d.ts +2 -4
  17. package/dist/dts/OpenAiConfig.d.ts.map +1 -1
  18. package/dist/dts/OpenAiEmbeddings.d.ts +63 -8
  19. package/dist/dts/OpenAiEmbeddings.d.ts.map +1 -1
  20. package/dist/dts/OpenAiTokenizer.d.ts +7 -0
  21. package/dist/dts/OpenAiTokenizer.d.ts.map +1 -1
  22. package/dist/esm/OpenAiClient.js +3 -2
  23. package/dist/esm/OpenAiClient.js.map +1 -1
  24. package/dist/esm/OpenAiCompletions.js +59 -6
  25. package/dist/esm/OpenAiCompletions.js.map +1 -1
  26. package/dist/esm/OpenAiConfig.js.map +1 -1
  27. package/dist/esm/OpenAiEmbeddings.js +91 -16
  28. package/dist/esm/OpenAiEmbeddings.js.map +1 -1
  29. package/dist/esm/OpenAiTokenizer.js +8 -5
  30. package/dist/esm/OpenAiTokenizer.js.map +1 -1
  31. package/package.json +5 -5
  32. package/src/OpenAiClient.ts +9 -3
  33. package/src/OpenAiCompletions.ts +206 -93
  34. package/src/OpenAiConfig.ts +2 -13
  35. package/src/OpenAiEmbeddings.ts +165 -38
  36. package/src/OpenAiTokenizer.ts +8 -5
@@ -3,21 +3,24 @@
3
3
  */
4
4
  import { AiError } from "@effect/ai/AiError"
5
5
  import type * as AiInput from "@effect/ai/AiInput"
6
+ import * as AiModel from "@effect/ai/AiModel"
6
7
  import * as AiResponse from "@effect/ai/AiResponse"
7
8
  import * as AiRole from "@effect/ai/AiRole"
8
9
  import * as Completions from "@effect/ai/Completions"
9
- import type * as Tokenizer from "@effect/ai/Tokenizer"
10
+ import * as Tokenizer from "@effect/ai/Tokenizer"
10
11
  import * as Arr from "effect/Array"
12
+ import * as Context from "effect/Context"
11
13
  import * as Effect from "effect/Effect"
14
+ import { dual } from "effect/Function"
12
15
  import * as Layer from "effect/Layer"
13
16
  import type * as Option from "effect/Option"
14
17
  import * as Predicate from "effect/Predicate"
15
18
  import * as Stream from "effect/Stream"
16
19
  import type { Span } from "effect/Tracer"
20
+ import type { Simplify } from "effect/Types"
17
21
  import type * as Generated from "./Generated.js"
18
22
  import type { StreamChunk } from "./OpenAiClient.js"
19
23
  import { OpenAiClient } from "./OpenAiClient.js"
20
- import { OpenAiConfig } from "./OpenAiConfig.js"
21
24
  import { addGenAIAnnotations } from "./OpenAiTelemetry.js"
22
25
  import * as OpenAiTokenizer from "./OpenAiTokenizer.js"
23
26
 
@@ -27,105 +30,177 @@ import * as OpenAiTokenizer from "./OpenAiTokenizer.js"
27
30
  */
28
31
  export type Model = typeof Generated.CreateChatCompletionRequestModelEnum.Encoded
29
32
 
30
- const make = (options: {
31
- readonly model: (string & {}) | Model
32
- }) =>
33
- Effect.gen(function*() {
34
- const client = yield* OpenAiClient
35
- const config = yield* OpenAiConfig.getOrUndefined
33
+ // =============================================================================
34
+ // Configuration
35
+ // =============================================================================
36
+
37
+ /**
38
+ * @since 1.0.0
39
+ * @category tags
40
+ */
41
+ export class Config extends Context.Tag("@effect/ai-openai/OpenAiCompletions/Config")<
42
+ Config,
43
+ Config.Service
44
+ >() {
45
+ /**
46
+ * @since 1.0.0
47
+ */
48
+ static readonly getOrUndefined: Effect.Effect<Config.Service | undefined> = Effect.map(
49
+ Effect.context<never>(),
50
+ (context) => context.unsafeMap.get(Config.key)
51
+ )
52
+ }
36
53
 
37
- const makeRequest = ({ input, required, system, tools }: Completions.CompletionOptions) => {
38
- const useStructured = tools.length === 1 && tools[0].structured
39
- return Effect.map(
40
- Effect.context<never>(),
41
- (context): typeof Generated.CreateChatCompletionRequest.Encoded => ({
42
- model: options.model,
43
- ...config,
44
- ...context.unsafeMap.get(OpenAiConfig.key),
45
- messages: makeMessages(input, system),
46
- response_format: useStructured ?
47
- {
48
- type: "json_schema",
49
- json_schema: {
50
- strict: true,
51
- name: tools[0].name,
52
- description: tools[0].description,
53
- schema: tools[0].parameters
54
- }
55
- } :
56
- undefined,
57
- tools: !useStructured && tools.length > 0 ?
58
- tools.map((tool) => ({
59
- type: "function",
60
- function: {
61
- name: tool.name,
62
- description: tool.description,
63
- parameters: tool.parameters as any,
64
- strict: true
65
- }
66
- })) :
67
- undefined,
68
- tool_choice: !useStructured && tools.length > 0 ?
69
- typeof required === "boolean" ? (required ? "required" : "auto") : {
70
- type: "function",
71
- function: { name: required }
72
- } :
73
- undefined
74
- })
54
+ /**
55
+ * @since 1.0.0
56
+ */
57
+ export declare namespace Config {
58
+ /**
59
+ * @since 1.0.0
60
+ * @category configuration
61
+ */
62
+ export interface Service extends
63
+ Simplify<
64
+ Partial<
65
+ Omit<
66
+ typeof Generated.CreateChatCompletionRequest.Encoded,
67
+ "messages" | "tools" | "tool_choice" | "stream" | "stream_options" | "functions"
68
+ >
69
+ >
70
+ >
71
+ {}
72
+ }
73
+
74
+ // =============================================================================
75
+ // OpenAi Completions
76
+ // =============================================================================
77
+
78
+ const modelCacheKey = Symbol.for("@effect/ai-openai/OpenAiCompletions/AiModel")
79
+
80
+ /**
81
+ * @since 1.0.0
82
+ * @category ai models
83
+ */
84
+ export const model = (
85
+ model: (string & {}) | Model,
86
+ config?: Omit<Config.Service, "model">
87
+ ): AiModel.AiModel<Completions.Completions | Tokenizer.Tokenizer, OpenAiClient> =>
88
+ AiModel.make({
89
+ model,
90
+ cacheKey: modelCacheKey,
91
+ requires: OpenAiClient,
92
+ provides: Effect.map(
93
+ make({ model, config }),
94
+ (completions) => Context.make(Completions.Completions, completions)
95
+ ) as Effect.Effect<Context.Context<Completions.Completions | Tokenizer.Tokenizer>>,
96
+ updateContext: (context) => {
97
+ const innerConfig = context.unsafeMap.get(Config.key) as Config.Service | undefined
98
+ return Context.mergeAll(
99
+ context,
100
+ Context.make(Config, { model, ...config, ...innerConfig }),
101
+ Context.make(Tokenizer.Tokenizer, OpenAiTokenizer.make({ model: innerConfig?.model ?? model }))
75
102
  )
76
103
  }
104
+ })
77
105
 
78
- return yield* Completions.make({
79
- create({ span, ...options }) {
80
- return makeRequest(options).pipe(
81
- Effect.tap((request) => annotateRequest(span, request)),
82
- Effect.flatMap(client.client.createChatCompletion),
83
- Effect.tap((response) => annotateChatResponse(span, response)),
84
- Effect.flatMap((response) =>
85
- makeResponse(
86
- response,
87
- "create",
88
- options.tools.length === 1 && options.tools[0].structured
89
- ? options.tools[0]
90
- : undefined
91
- )
92
- ),
93
- Effect.catchAll((cause) =>
94
- Effect.fail(
95
- new AiError({
96
- module: "OpenAiCompletions",
97
- method: "create",
98
- description: "An error occurred",
99
- cause
100
- })
101
- )
106
+ const make = Effect.fnUntraced(function*(options: {
107
+ readonly model: (string & {}) | Model
108
+ readonly config?: Omit<Config.Service, "model">
109
+ }) {
110
+ const client = yield* OpenAiClient
111
+
112
+ const makeRequest = ({ input, required, system, tools }: Completions.CompletionOptions) => {
113
+ const useStructured = tools.length === 1 && tools[0].structured
114
+ return Effect.map(
115
+ Effect.context<never>(),
116
+ (context): typeof Generated.CreateChatCompletionRequest.Encoded => ({
117
+ model: options.model,
118
+ ...options.config,
119
+ ...context.unsafeMap.get(Config.key),
120
+ messages: makeMessages(input, system),
121
+ response_format: useStructured ?
122
+ {
123
+ type: "json_schema",
124
+ json_schema: {
125
+ strict: true,
126
+ name: tools[0].name,
127
+ description: tools[0].description,
128
+ schema: tools[0].parameters as any
129
+ }
130
+ } :
131
+ undefined,
132
+ tools: !useStructured && tools.length > 0 ?
133
+ tools.map((tool) => ({
134
+ type: "function",
135
+ function: {
136
+ name: tool.name,
137
+ description: tool.description,
138
+ parameters: tool.parameters as any,
139
+ strict: true
140
+ }
141
+ })) :
142
+ undefined,
143
+ tool_choice: !useStructured && tools.length > 0 ?
144
+ typeof required === "boolean" ? (required ? "required" : "auto") : {
145
+ type: "function",
146
+ function: { name: required }
147
+ } :
148
+ undefined
149
+ })
150
+ )
151
+ }
152
+
153
+ return yield* Completions.make({
154
+ create({ span, ...options }) {
155
+ return makeRequest(options).pipe(
156
+ Effect.tap((request) => annotateRequest(span, request)),
157
+ Effect.flatMap(client.client.createChatCompletion),
158
+ Effect.tap((response) => annotateChatResponse(span, response)),
159
+ Effect.flatMap((response) =>
160
+ makeResponse(
161
+ response,
162
+ "create",
163
+ options.tools.length === 1 && options.tools[0].structured
164
+ ? options.tools[0]
165
+ : undefined
166
+ )
167
+ ),
168
+ Effect.catchAll((cause) =>
169
+ Effect.fail(
170
+ new AiError({
171
+ module: "OpenAiCompletions",
172
+ method: "create",
173
+ description: "An error occurred",
174
+ cause
175
+ })
102
176
  )
103
177
  )
104
- },
105
- stream({ span, ...options }) {
106
- return makeRequest(options).pipe(
107
- Effect.tap((request) => annotateRequest(span, request)),
108
- Effect.map(client.stream),
109
- Stream.unwrap,
110
- Stream.tap((response) => {
111
- annotateStreamResponse(span, response)
112
- return Effect.void
113
- }),
114
- Stream.map((response) => response.asAiResponse),
115
- Stream.catchAll((cause) =>
116
- Effect.fail(
117
- new AiError({
118
- module: "OpenAiCompletions",
119
- method: "stream",
120
- description: "An error occurred",
121
- cause
122
- })
123
- )
178
+ )
179
+ },
180
+ stream({ span, ...options }) {
181
+ return makeRequest(options).pipe(
182
+ Effect.tap((request) => annotateRequest(span, request)),
183
+ Effect.map(client.stream),
184
+ Stream.unwrap,
185
+ Stream.tap((response) => {
186
+ annotateStreamResponse(span, response)
187
+ return Effect.void
188
+ }),
189
+ Stream.map((response) => response.asAiResponse),
190
+ Stream.catchAll((cause) =>
191
+ Effect.fail(
192
+ new AiError({
193
+ module: "OpenAiCompletions",
194
+ method: "stream",
195
+ description: "An error occurred",
196
+ cause
197
+ })
124
198
  )
125
199
  )
126
- }
127
- })
200
+ )
201
+ }
128
202
  })
203
+ })
129
204
 
130
205
  /**
131
206
  * @since 1.0.0
@@ -133,7 +208,12 @@ const make = (options: {
133
208
  */
134
209
  export const layerCompletions = (options: {
135
210
  readonly model: (string & {}) | Model
136
- }): Layer.Layer<Completions.Completions, never, OpenAiClient> => Layer.effect(Completions.Completions, make(options))
211
+ readonly config?: Omit<Config.Service, "model">
212
+ }): Layer.Layer<Completions.Completions, never, OpenAiClient> =>
213
+ Layer.effect(
214
+ Completions.Completions,
215
+ make({ model: options.model, config: options.config })
216
+ )
137
217
 
138
218
  /**
139
219
  * @since 1.0.0
@@ -141,9 +221,42 @@ export const layerCompletions = (options: {
141
221
  */
142
222
  export const layer = (options: {
143
223
  readonly model: (string & {}) | Model
224
+ readonly config?: Omit<Config.Service, "model">
144
225
  }): Layer.Layer<Completions.Completions | Tokenizer.Tokenizer, never, OpenAiClient> =>
145
226
  Layer.merge(layerCompletions(options), OpenAiTokenizer.layer(options))
146
227
 
228
+ /**
229
+ * @since 1.0.0
230
+ * @category configuration
231
+ */
232
+ export const withConfigOverride: {
233
+ /**
234
+ * @since 1.0.0
235
+ * @category configuration
236
+ */
237
+ (overrides: Config.Service): <A, E, R>(self: Effect.Effect<A, E, R>) => Effect.Effect<A, E, R>
238
+ /**
239
+ * @since 1.0.0
240
+ * @category configuration
241
+ */
242
+ <A, E, R>(self: Effect.Effect<A, E, R>, overrides: Config.Service): Effect.Effect<A, E, R>
243
+ } = dual<
244
+ /**
245
+ * @since 1.0.0
246
+ * @category configuration
247
+ */
248
+ (overrides: Config.Service) => <A, E, R>(self: Effect.Effect<A, E, R>) => Effect.Effect<A, E, R>,
249
+ /**
250
+ * @since 1.0.0
251
+ * @category configuration
252
+ */
253
+ <A, E, R>(self: Effect.Effect<A, E, R>, overrides: Config.Service) => Effect.Effect<A, E, R>
254
+ >(2, (self, overrides) =>
255
+ Effect.flatMap(
256
+ Config.getOrUndefined,
257
+ (config) => Effect.provideService(self, Config, { ...config, ...overrides })
258
+ ))
259
+
147
260
  const makeMessages = (
148
261
  input: AiInput.AiInput,
149
262
  system: Option.Option<string>
@@ -5,8 +5,6 @@ import type { HttpClient } from "@effect/platform/HttpClient"
5
5
  import * as Context from "effect/Context"
6
6
  import * as Effect from "effect/Effect"
7
7
  import { dual } from "effect/Function"
8
- import type { Simplify } from "effect/Types"
9
- import type * as Generated from "./Generated.js"
10
8
 
11
9
  /**
12
10
  * @since 1.0.0
@@ -31,19 +29,10 @@ export class OpenAiConfig extends Context.Tag("@effect/ai-openai/OpenAiConfig")<
31
29
  */
32
30
  export declare namespace OpenAiConfig {
33
31
  /**
34
- * @since 1.0.0
32
+ * @since 1.0.
35
33
  * @category models
36
34
  */
37
- export interface Service extends
38
- Simplify<
39
- Partial<
40
- Omit<
41
- typeof Generated.CreateChatCompletionRequest.Encoded,
42
- "messages" | "tools" | "tool_choice" | "stream" | "stream_options" | "functions"
43
- >
44
- >
45
- >
46
- {
35
+ export interface Service {
47
36
  readonly transformClient?: (client: HttpClient) => HttpClient
48
37
  }
49
38
  }
@@ -2,14 +2,19 @@
2
2
  * @since 1.0.0
3
3
  */
4
4
  import { AiError } from "@effect/ai/AiError"
5
+ import * as AiModel from "@effect/ai/AiModel"
5
6
  import * as Embeddings from "@effect/ai/Embeddings"
7
+ import * as Tokenizer from "@effect/ai/Tokenizer"
6
8
  import * as Context from "effect/Context"
7
9
  import type * as Duration from "effect/Duration"
8
10
  import * as Effect from "effect/Effect"
11
+ import { dual } from "effect/Function"
9
12
  import * as Layer from "effect/Layer"
13
+ import * as Struct from "effect/Struct"
10
14
  import type { Simplify } from "effect/Types"
11
15
  import type * as Generated from "./Generated.js"
12
16
  import { OpenAiClient } from "./OpenAiClient.js"
17
+ import * as OpenAiTokenizer from "./OpenAiTokenizer.js"
13
18
 
14
19
  /**
15
20
  * @since 1.0.0
@@ -17,49 +22,121 @@ import { OpenAiClient } from "./OpenAiClient.js"
17
22
  */
18
23
  export type Model = typeof Generated.CreateEmbeddingRequestModelEnum.Encoded
19
24
 
25
+ // =============================================================================
26
+ // Configuration
27
+ // =============================================================================
28
+
20
29
  /**
21
30
  * @since 1.0.0
22
31
  * @category tags
23
32
  */
24
- export class OpenAiEmbeddingsConfig extends Context.Tag("@effect/ai-openai/OpenAiEmbeddings/Config")<
25
- OpenAiEmbeddingsConfig,
26
- Simplify<
27
- Partial<
28
- Omit<
29
- typeof Generated.CreateEmbeddingRequest.Encoded,
30
- "input"
31
- >
32
- >
33
- >
33
+ export class Config extends Context.Tag("@effect/ai-openai/OpenAiEmbeddings/Config")<
34
+ Config,
35
+ Config.Service
34
36
  >() {
35
37
  /**
36
38
  * @since 1.0.0
37
39
  */
38
- static readonly getOrUndefined: Effect.Effect<typeof OpenAiEmbeddingsConfig.Service | undefined> = Effect.map(
40
+ static readonly getOrUndefined: Effect.Effect<Config.Service | undefined> = Effect.map(
39
41
  Effect.context<never>(),
40
- (context) => context.unsafeMap.get(OpenAiEmbeddingsConfig.key)
42
+ (context) => context.unsafeMap.get(Config.key)
41
43
  )
42
44
  }
43
45
 
44
- const makeRequest = (
45
- client: OpenAiClient.Service,
46
- input: ReadonlyArray<string>,
47
- parentConfig: typeof OpenAiEmbeddingsConfig.Service | undefined,
48
- options: {
49
- readonly model: string
46
+ /**
47
+ * @since 1.0.0
48
+ */
49
+ export declare namespace Config {
50
+ /**
51
+ * @since 1.0.
52
+ * @category configuration
53
+ */
54
+ export interface Service extends
55
+ Simplify<
56
+ Partial<
57
+ Omit<
58
+ typeof Generated.CreateEmbeddingRequest.Encoded,
59
+ "input"
60
+ >
61
+ >
62
+ >
63
+ {}
64
+
65
+ /**
66
+ * @since 1.0.
67
+ * @category configuration
68
+ */
69
+ export interface Batched extends Omit<Config.Service, "model"> {
50
70
  readonly maxBatchSize?: number
51
71
  readonly cache?: {
52
72
  readonly capacity: number
53
73
  readonly timeToLive: Duration.DurationInput
54
74
  }
55
75
  }
76
+
77
+ /**
78
+ * @since 1.0.
79
+ * @category configuration
80
+ */
81
+ export interface DataLoader extends Omit<Config.Service, "model"> {
82
+ readonly window: Duration.DurationInput
83
+ readonly maxBatchSize?: number
84
+ }
85
+ }
86
+
87
+ // =============================================================================
88
+ // OpenAi Embeddings
89
+ // =============================================================================
90
+
91
+ const batchedModelCacheKey = Symbol.for("@effect/ai-openai/OpenAiEmbeddings/Batched/AiModel")
92
+ const dataLoaderModelCacheKey = Symbol.for("@effect/ai-openai/OpenAiEmbeddings/DataLoader/AiModel")
93
+
94
+ /**
95
+ * @since 1.0.0
96
+ * @category ai models
97
+ */
98
+ export const model = (
99
+ model: (string & {}) | Model,
100
+ config: Simplify<
101
+ (
102
+ | ({ readonly mode: "batched" } & Config.Batched)
103
+ | ({ readonly mode: "data-loader" } & Config.DataLoader)
104
+ )
105
+ >
106
+ ): AiModel.AiModel<Embeddings.Embeddings | Tokenizer.Tokenizer, OpenAiClient> =>
107
+ AiModel.make({
108
+ model,
109
+ cacheKey: config.mode === "batched" ? batchedModelCacheKey : dataLoaderModelCacheKey,
110
+ requires: OpenAiClient,
111
+ provides: Effect.map(
112
+ config.mode === "batched"
113
+ ? makeBatched({ model, config })
114
+ : makeDataLoader({ model, config }),
115
+ (embeddings) => Context.make(Embeddings.Embeddings, embeddings)
116
+ ) as Effect.Effect<Context.Context<Embeddings.Embeddings | Tokenizer.Tokenizer>>,
117
+ updateContext: (context) => {
118
+ const outerConfig = config.mode === "batched"
119
+ ? Struct.omit("mode", "maxBatchSize", "cache")(config)
120
+ : Struct.omit("mode", "maxBatchSize", "window")(config)
121
+ const innerConfig = context.unsafeMap.get(Config.key) as Config.Service | undefined
122
+ return Context.mergeAll(
123
+ context,
124
+ Context.make(Config, { model, ...outerConfig, ...innerConfig }),
125
+ Context.make(Tokenizer.Tokenizer, OpenAiTokenizer.make({ model: innerConfig?.model ?? model }))
126
+ )
127
+ }
128
+ })
129
+
130
+ const makeRequest = (
131
+ client: OpenAiClient.Service,
132
+ input: ReadonlyArray<string>,
133
+ parentConfig: typeof Config.Service | undefined
56
134
  ) =>
57
135
  Effect.context<never>().pipe(
58
136
  Effect.flatMap((context) => {
59
- const localConfig = context.unsafeMap.get(OpenAiEmbeddingsConfig.key)
137
+ const localConfig = context.unsafeMap.get(Config.key)
60
138
  return client.client.createEmbedding({
61
139
  input,
62
- model: options.model,
63
140
  ...parentConfig,
64
141
  ...localConfig
65
142
  })
@@ -89,37 +166,32 @@ const makeRequest = (
89
166
  })
90
167
  )
91
168
 
92
- const make = Effect.fnUntraced(function*(options: {
169
+ const makeBatched = Effect.fnUntraced(function*(options: {
93
170
  readonly model: (string & {}) | Model
94
- readonly maxBatchSize?: number
95
- readonly cache?: {
96
- readonly capacity: number
97
- readonly timeToLive: Duration.DurationInput
98
- }
171
+ readonly config: Config.Batched
99
172
  }) {
100
173
  const client = yield* OpenAiClient
101
- const parentConfig = yield* OpenAiEmbeddingsConfig.getOrUndefined
174
+ const { cache, maxBatchSize = 2048, ...parentConfig } = options.config
102
175
  return yield* Embeddings.make({
103
- cache: options.cache,
104
- maxBatchSize: options.maxBatchSize ?? 2048,
176
+ cache,
177
+ maxBatchSize,
105
178
  embedMany(input) {
106
- return makeRequest(client, input, parentConfig, options)
179
+ return makeRequest(client, input, { ...parentConfig, model: options.model })
107
180
  }
108
181
  })
109
182
  })
110
183
 
111
184
  const makeDataLoader = Effect.fnUntraced(function*(options: {
112
185
  readonly model: (string & {}) | Model
113
- readonly window: Duration.DurationInput
114
- readonly maxBatchSize?: number
186
+ readonly config: Config.DataLoader
115
187
  }) {
116
188
  const client = yield* OpenAiClient
117
- const parentConfig = yield* OpenAiEmbeddingsConfig.getOrUndefined
189
+ const { maxBatchSize = 2048, window, ...parentConfig } = options.config
118
190
  return yield* Embeddings.makeDataLoader({
119
- window: options.window,
120
- maxBatchSize: options.maxBatchSize ?? 2048,
191
+ window,
192
+ maxBatchSize,
121
193
  embedMany(input) {
122
- return makeRequest(client, input, parentConfig, options)
194
+ return makeRequest(client, input, { ...parentConfig, model: options.model })
123
195
  }
124
196
  })
125
197
  })
@@ -135,7 +207,19 @@ export const layer = (options: {
135
207
  readonly capacity: number
136
208
  readonly timeToLive: Duration.DurationInput
137
209
  }
138
- }): Layer.Layer<Embeddings.Embeddings, never, OpenAiClient> => Layer.effect(Embeddings.Embeddings, make(options))
210
+ readonly config?: Config.Service
211
+ }): Layer.Layer<Embeddings.Embeddings, never, OpenAiClient> =>
212
+ Layer.effect(
213
+ Embeddings.Embeddings,
214
+ makeBatched({
215
+ model: options.model,
216
+ config: {
217
+ cache: options.cache,
218
+ maxBatchSize: options.maxBatchSize,
219
+ ...options.config
220
+ }
221
+ })
222
+ )
139
223
 
140
224
  /**
141
225
  * @since 1.0.0
@@ -145,5 +229,48 @@ export const layerDataLoader = (options: {
145
229
  readonly model: (string & {}) | Model
146
230
  readonly window: Duration.DurationInput
147
231
  readonly maxBatchSize?: number
232
+ readonly config?: Config.Service
148
233
  }): Layer.Layer<Embeddings.Embeddings, never, OpenAiClient> =>
149
- Layer.scoped(Embeddings.Embeddings, makeDataLoader(options))
234
+ Layer.scoped(
235
+ Embeddings.Embeddings,
236
+ makeDataLoader({
237
+ model: options.model,
238
+ config: {
239
+ window: options.window,
240
+ maxBatchSize: options.maxBatchSize,
241
+ ...options.config
242
+ }
243
+ })
244
+ )
245
+
246
+ /**
247
+ * @since 1.0.0
248
+ * @category configuration
249
+ */
250
+ export const withConfigOverride: {
251
+ /**
252
+ * @since 1.0.0
253
+ * @category configuration
254
+ */
255
+ (config: Config.Service): <A, E, R>(self: Effect.Effect<A, E, R>) => Effect.Effect<A, E, R>
256
+ /**
257
+ * @since 1.0.0
258
+ * @category configuration
259
+ */
260
+ <A, E, R>(self: Effect.Effect<A, E, R>, config: Config.Service): Effect.Effect<A, E, R>
261
+ } = dual<
262
+ /**
263
+ * @since 1.0.0
264
+ * @category configuration
265
+ */
266
+ (config: Config.Service) => <A, E, R>(self: Effect.Effect<A, E, R>) => Effect.Effect<A, E, R>,
267
+ /**
268
+ * @since 1.0.0
269
+ * @category configuration
270
+ */
271
+ <A, E, R>(self: Effect.Effect<A, E, R>, config: Config.Service) => Effect.Effect<A, E, R>
272
+ >(2, (self, overrides) =>
273
+ Effect.flatMap(
274
+ Config.getOrUndefined,
275
+ (config) => Effect.provideService(self, Config, { ...config, ...overrides })
276
+ ))