@effect/ai-openai 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/Generated/package.json +6 -0
  2. package/LICENSE +21 -0
  3. package/OpenAiClient/package.json +6 -0
  4. package/OpenAiCompletions/package.json +6 -0
  5. package/OpenAiConfig/package.json +6 -0
  6. package/OpenAiTokenizer/package.json +6 -0
  7. package/README.md +1 -0
  8. package/dist/cjs/Generated.js +2702 -0
  9. package/dist/cjs/Generated.js.map +1 -0
  10. package/dist/cjs/OpenAiClient.js +153 -0
  11. package/dist/cjs/OpenAiClient.js.map +1 -0
  12. package/dist/cjs/OpenAiCompletions.js +208 -0
  13. package/dist/cjs/OpenAiCompletions.js.map +1 -0
  14. package/dist/cjs/OpenAiConfig.js +26 -0
  15. package/dist/cjs/OpenAiConfig.js.map +1 -0
  16. package/dist/cjs/OpenAiTokenizer.js +50 -0
  17. package/dist/cjs/OpenAiTokenizer.js.map +1 -0
  18. package/dist/cjs/index.js +19 -0
  19. package/dist/cjs/index.js.map +1 -0
  20. package/dist/dts/Generated.d.ts +5759 -0
  21. package/dist/dts/Generated.d.ts.map +1 -0
  22. package/dist/dts/OpenAiClient.d.ts +113 -0
  23. package/dist/dts/OpenAiClient.d.ts.map +1 -0
  24. package/dist/dts/OpenAiCompletions.d.ts +19 -0
  25. package/dist/dts/OpenAiCompletions.d.ts.map +1 -0
  26. package/dist/dts/OpenAiConfig.d.ts +55 -0
  27. package/dist/dts/OpenAiConfig.d.ts.map +1 -0
  28. package/dist/dts/OpenAiTokenizer.d.ts +10 -0
  29. package/dist/dts/OpenAiTokenizer.d.ts.map +1 -0
  30. package/dist/dts/index.d.ts +21 -0
  31. package/dist/dts/index.d.ts.map +1 -0
  32. package/dist/esm/Generated.js +2493 -0
  33. package/dist/esm/Generated.js.map +1 -0
  34. package/dist/esm/OpenAiClient.js +139 -0
  35. package/dist/esm/OpenAiClient.js.map +1 -0
  36. package/dist/esm/OpenAiCompletions.js +197 -0
  37. package/dist/esm/OpenAiCompletions.js.map +1 -0
  38. package/dist/esm/OpenAiConfig.js +16 -0
  39. package/dist/esm/OpenAiConfig.js.map +1 -0
  40. package/dist/esm/OpenAiTokenizer.js +40 -0
  41. package/dist/esm/OpenAiTokenizer.js.map +1 -0
  42. package/dist/esm/index.js +21 -0
  43. package/dist/esm/index.js.map +1 -0
  44. package/dist/esm/package.json +4 -0
  45. package/package.json +80 -0
  46. package/src/Generated.ts +3936 -0
  47. package/src/OpenAiClient.ts +291 -0
  48. package/src/OpenAiCompletions.ts +260 -0
  49. package/src/OpenAiConfig.ts +31 -0
  50. package/src/OpenAiTokenizer.ts +59 -0
  51. package/src/index.ts +24 -0
@@ -0,0 +1,291 @@
1
+ /**
2
+ * @since 1.0.0
3
+ */
4
+ import * as AiResponse from "@effect/ai/AiResponse"
5
+ import * as AiRole from "@effect/ai/AiRole"
6
+ import * as Sse from "@effect/experimental/Sse"
7
+ import * as HttpBody from "@effect/platform/HttpBody"
8
+ import * as HttpClient from "@effect/platform/HttpClient"
9
+ import type * as HttpClientError from "@effect/platform/HttpClientError"
10
+ import * as HttpClientRequest from "@effect/platform/HttpClientRequest"
11
+ import * as Chunk from "effect/Chunk"
12
+ import * as Config from "effect/Config"
13
+ import type { ConfigError } from "effect/ConfigError"
14
+ import * as Context from "effect/Context"
15
+ import * as Data from "effect/Data"
16
+ import * as Effect from "effect/Effect"
17
+ import { identity } from "effect/Function"
18
+ import * as Layer from "effect/Layer"
19
+ import * as Option from "effect/Option"
20
+ import * as Redacted from "effect/Redacted"
21
+ import * as Stream from "effect/Stream"
22
+ import * as Generated from "./Generated.js"
23
+
24
+ /**
25
+ * @since 1.0.0
26
+ * @category tags
27
+ */
28
+ export class OpenAiClient extends Context.Tag("@effect/ai-openai/OpenAiClient")<
29
+ OpenAiClient,
30
+ OpenAiClient.Service
31
+ >() {}
32
+
33
+ /**
34
+ * @since 1.0.0
35
+ * @category models
36
+ */
37
+ export declare namespace OpenAiClient {
38
+ /**
39
+ * @since 1.0.0
40
+ * @category models
41
+ */
42
+ export interface Service {
43
+ readonly client: Generated.Client
44
+ readonly streamRequest: <A>(
45
+ request: HttpClientRequest.HttpClientRequest
46
+ ) => Stream.Stream<A, HttpClientError.HttpClientError>
47
+ readonly stream: (
48
+ request: StreamCompletionRequest
49
+ ) => Stream.Stream<StreamChunk, HttpClientError.HttpClientError>
50
+ }
51
+ }
52
+
53
+ /**
54
+ * @since 1.0.0
55
+ * @category constructors
56
+ */
57
+ export const make = (options: {
58
+ readonly apiKey: Redacted.Redacted
59
+ readonly organizationId?: Redacted.Redacted | undefined
60
+ readonly projectId?: Redacted.Redacted | undefined
61
+ }): Effect.Effect<OpenAiClient.Service, never, HttpClient.HttpClient> =>
62
+ Effect.gen(function*() {
63
+ const httpClient = (yield* HttpClient.HttpClient).pipe(
64
+ HttpClient.mapRequest((request) =>
65
+ request.pipe(
66
+ HttpClientRequest.prependUrl("https://api.openai.com/v1"),
67
+ HttpClientRequest.bearerToken(options.apiKey),
68
+ options.organizationId !== undefined
69
+ ? HttpClientRequest.setHeader("OpenAI-Organization", Redacted.value(options.organizationId))
70
+ : identity,
71
+ options.projectId !== undefined
72
+ ? HttpClientRequest.setHeader("OpenAI-Project", Redacted.value(options.projectId))
73
+ : identity,
74
+ HttpClientRequest.acceptJson
75
+ )
76
+ )
77
+ )
78
+ const httpClientOk = HttpClient.filterStatusOk(httpClient)
79
+ const client = Generated.make(httpClient)
80
+ const streamRequest = <A = unknown>(request: HttpClientRequest.HttpClientRequest) =>
81
+ httpClientOk.execute(request).pipe(
82
+ Effect.map((r) => r.stream),
83
+ Stream.unwrapScoped,
84
+ Stream.decodeText(),
85
+ Stream.pipeThroughChannel(Sse.makeChannel()),
86
+ Stream.takeWhile((event) => event.data !== "[DONE]"),
87
+ Stream.map((event) => JSON.parse(event.data) as A)
88
+ )
89
+ const stream = (request: StreamCompletionRequest) =>
90
+ streamRequest<RawCompletionChunk>(HttpClientRequest.post("/chat/completions", {
91
+ body: HttpBody.unsafeJson({
92
+ ...request,
93
+ stream: true
94
+ })
95
+ })).pipe(
96
+ Stream.mapAccum(new Map<number, ContentPart | Array<ToolCallPart>>(), (acc, chunk) => {
97
+ const parts: Array<StreamChunkPart> = []
98
+ for (let i = 0; i < chunk.choices.length; i++) {
99
+ const choice = chunk.choices[i]
100
+ if ("content" in choice.delta && typeof choice.delta.content === "string") {
101
+ let part = acc.get(choice.index) as ContentPart | undefined
102
+ part = {
103
+ _tag: "Content",
104
+ content: choice.delta.content
105
+ }
106
+ acc.set(choice.index, part)
107
+ parts.push(part)
108
+ } else if ("tool_calls" in choice.delta && Array.isArray(choice.delta.tool_calls)) {
109
+ const parts = (acc.get(choice.index) ?? []) as Array<ToolCallPart>
110
+ for (const toolCall of choice.delta.tool_calls) {
111
+ const part = parts[toolCall.index]
112
+ const toolPart = part?._tag === "ToolCall" ?
113
+ {
114
+ ...part,
115
+ arguments: part.arguments + toolCall.function.arguments
116
+ } :
117
+ {
118
+ _tag: "ToolCall",
119
+ ...toolCall,
120
+ ...toolCall.function,
121
+ role: choice.delta.role!
122
+ } as any
123
+ parts[toolCall.index] = toolPart
124
+ }
125
+ acc.set(choice.index, parts)
126
+ } else if (choice.finish_reason === "tool_calls") {
127
+ const toolParts = acc.get(choice.index) as Array<ToolCallPart>
128
+ for (const part of toolParts) {
129
+ try {
130
+ const args = JSON.parse(part.arguments as string)
131
+ parts.push({
132
+ _tag: "ToolCall",
133
+ id: part.id,
134
+ name: part.name,
135
+ arguments: args
136
+ })
137
+ // eslint-disable-next-line no-empty
138
+ } catch {}
139
+ }
140
+ }
141
+ }
142
+ return [acc, parts.length === 0 ? Option.none() : Option.some(new StreamChunk({ parts }))]
143
+ }),
144
+ Stream.filterMap(identity)
145
+ )
146
+ return OpenAiClient.of({ client, streamRequest, stream })
147
+ })
148
+
149
+ /**
150
+ * @since 1.0.0
151
+ * @category layers
152
+ */
153
+ export const layer = (options: {
154
+ readonly apiKey: Redacted.Redacted
155
+ readonly organizationId?: Redacted.Redacted | undefined
156
+ readonly projectId?: Redacted.Redacted | undefined
157
+ }): Layer.Layer<OpenAiClient, never, HttpClient.HttpClient> => Layer.effect(OpenAiClient, make(options))
158
+
159
+ /**
160
+ * @since 1.0.0
161
+ * @category layers
162
+ */
163
+ export const layerConfig = (
164
+ options: Config.Config.Wrap<{
165
+ readonly apiKey: Redacted.Redacted
166
+ readonly organizationId?: Redacted.Redacted | undefined
167
+ readonly projectId?: Redacted.Redacted | undefined
168
+ }>
169
+ ): Layer.Layer<OpenAiClient, ConfigError, HttpClient.HttpClient> =>
170
+ Config.unwrap(options).pipe(
171
+ Effect.flatMap(make),
172
+ Layer.effect(OpenAiClient)
173
+ )
174
+
175
+ /**
176
+ * @since 1.0.0
177
+ * @category models
178
+ */
179
+ export type StreamCompletionRequest = Omit<typeof Generated.CreateChatCompletionRequest.Encoded, "stream">
180
+
181
+ interface RawCompletionChunk {
182
+ readonly id: string
183
+ readonly object: "chat.completion.chunk"
184
+ readonly created: number
185
+ readonly model: string
186
+ readonly choices: Array<
187
+ {
188
+ readonly index: number
189
+ readonly finish_reason: null
190
+ readonly delta: RawDelta
191
+ } | {
192
+ readonly index: number
193
+ readonly finish_reason: string
194
+ readonly delta: {}
195
+ }
196
+ >
197
+ readonly system_fingerprint: string
198
+ }
199
+
200
+ type RawDelta = {
201
+ readonly index?: number
202
+ readonly role?: string
203
+ readonly content: string
204
+ } | {
205
+ readonly index?: number
206
+ readonly role?: string
207
+ readonly content?: null
208
+ readonly tool_calls: Array<RawToolCall>
209
+ }
210
+
211
+ type RawToolCall = {
212
+ readonly index: number
213
+ readonly id: string
214
+ readonly type: "function"
215
+ readonly function: {
216
+ readonly name: string
217
+ readonly arguments: string
218
+ }
219
+ } | {
220
+ readonly index: number
221
+ readonly function: {
222
+ readonly arguments: string
223
+ }
224
+ }
225
+
226
+ /**
227
+ * @since 1.0.0
228
+ * @category models
229
+ */
230
+ export class StreamChunk extends Data.Class<{
231
+ readonly parts: Array<StreamChunkPart>
232
+ }> {
233
+ /**
234
+ * @since 1.0.0
235
+ */
236
+ get text(): Option.Option<string> {
237
+ return this.parts[0]?._tag === "Content" ? Option.some(this.parts[0].content) : Option.none()
238
+ }
239
+ /**
240
+ * @since 1.0.0
241
+ */
242
+ get asAiResponse(): AiResponse.AiResponse {
243
+ if (this.parts.length === 0) {
244
+ return AiResponse.AiResponse.fromText({
245
+ role: AiRole.model,
246
+ content: ""
247
+ })
248
+ }
249
+ const part = this.parts[0]
250
+ return part._tag === "Content" ?
251
+ AiResponse.AiResponse.fromText({
252
+ role: AiRole.model,
253
+ content: part.content
254
+ }) :
255
+ new AiResponse.AiResponse({
256
+ role: AiRole.model,
257
+ parts: Chunk.of(AiResponse.ToolCallPart.fromUnknown({
258
+ id: part.id,
259
+ name: part.name,
260
+ params: part.arguments
261
+ }))
262
+ })
263
+ }
264
+ }
265
+
266
+ /**
267
+ * @since 1.0.0
268
+ * @category models
269
+ */
270
+ export type StreamChunkPart = ContentPart | ToolCallPart
271
+
272
+ /**
273
+ * @since 1.0.0
274
+ * @category models
275
+ */
276
+ export interface ContentPart {
277
+ readonly _tag: "Content"
278
+ readonly name?: string
279
+ readonly content: string
280
+ }
281
+
282
+ /**
283
+ * @since 1.0.0
284
+ * @category models
285
+ */
286
+ export interface ToolCallPart {
287
+ readonly _tag: "ToolCall"
288
+ readonly id: string
289
+ readonly name: string
290
+ readonly arguments: unknown
291
+ }
@@ -0,0 +1,260 @@
1
+ /**
2
+ * @since 1.0.0
3
+ */
4
+ import { AiError } from "@effect/ai/AiError"
5
+ import type * as AiInput from "@effect/ai/AiInput"
6
+ import * as AiResponse from "@effect/ai/AiResponse"
7
+ import * as AiRole from "@effect/ai/AiRole"
8
+ import * as Completions from "@effect/ai/Completions"
9
+ import type * as Tokenizer from "@effect/ai/Tokenizer"
10
+ import * as Arr from "effect/Array"
11
+ import * as Effect from "effect/Effect"
12
+ import * as Layer from "effect/Layer"
13
+ import type * as Option from "effect/Option"
14
+ import * as Stream from "effect/Stream"
15
+ import type * as Generated from "./Generated.js"
16
+ import { OpenAiClient } from "./OpenAiClient.js"
17
+ import { OpenAiConfig } from "./OpenAiConfig.js"
18
+ import * as OpenAiTokenizer from "./OpenAiTokenizer.js"
19
+
20
+ const make = (options: {
21
+ readonly model: string
22
+ }) =>
23
+ Effect.gen(function*() {
24
+ const client = yield* OpenAiClient
25
+ const config = yield* OpenAiConfig.getOrUndefined
26
+
27
+ const makeRequest = ({ input, required, system, tools }: Completions.CompletionOptions) =>
28
+ Effect.map(
29
+ Effect.context<never>(),
30
+ (context): typeof Generated.CreateChatCompletionRequest.Encoded => ({
31
+ model: options.model,
32
+ ...config,
33
+ ...context.unsafeMap.get(OpenAiConfig.key),
34
+ messages: makeMessages(input, system),
35
+ tools: tools.length > 0 ?
36
+ tools.map((tool) => ({
37
+ type: "function",
38
+ function: {
39
+ name: tool.name,
40
+ description: tool.description,
41
+ parameters: tool.parameters as any,
42
+ strict: true
43
+ }
44
+ })) :
45
+ undefined,
46
+ tool_choice: tools.length > 0 ?
47
+ typeof required === "boolean" ? (required ? "required" : "auto") : {
48
+ type: "function",
49
+ function: { name: required }
50
+ } :
51
+ undefined
52
+ })
53
+ )
54
+
55
+ return yield* Completions.make({
56
+ create(options) {
57
+ return makeRequest(options).pipe(
58
+ Effect.flatMap(client.client.createChatCompletion),
59
+ Effect.catchAll((cause) =>
60
+ Effect.fail(
61
+ new AiError({
62
+ module: "OpenAiCompletions",
63
+ method: "create",
64
+ description: "An error occurred",
65
+ cause
66
+ })
67
+ )
68
+ ),
69
+ Effect.flatMap((response) => makeResponse(response, "create"))
70
+ )
71
+ },
72
+ stream(options) {
73
+ return makeRequest(options).pipe(
74
+ Effect.map(client.stream),
75
+ Stream.unwrap,
76
+ Stream.catchAll((cause) =>
77
+ Effect.fail(
78
+ new AiError({
79
+ module: "OpenAiCompletions",
80
+ method: "stream",
81
+ description: "An error occurred",
82
+ cause
83
+ })
84
+ )
85
+ ),
86
+ Stream.map((response) => response.asAiResponse)
87
+ )
88
+ }
89
+ })
90
+ })
91
+
92
+ /**
93
+ * @since 1.0.0
94
+ * @category layers
95
+ */
96
+ export const layerCompletions = (options: {
97
+ readonly model: string
98
+ }): Layer.Layer<Completions.Completions, never, OpenAiClient> => Layer.effect(Completions.Completions, make(options))
99
+
100
+ /**
101
+ * @since 1.0.0
102
+ * @category layers
103
+ */
104
+ export const layer = (options: {
105
+ readonly model: string
106
+ }): Layer.Layer<Completions.Completions | Tokenizer.Tokenizer, never, OpenAiClient> =>
107
+ Layer.merge(layerCompletions(options), OpenAiTokenizer.layer(options))
108
+
109
+ const makeMessages = (
110
+ input: AiInput.AiInput,
111
+ system: Option.Option<string>
112
+ ): Arr.NonEmptyReadonlyArray<typeof Generated.ChatCompletionRequestMessage.Encoded> => {
113
+ const messages: Array<typeof Generated.ChatCompletionRequestMessage.Encoded> = system._tag === "Some" ?
114
+ [makeSystemMessage(system.value)] :
115
+ []
116
+ for (const message of input) {
117
+ // eslint-disable-next-line no-restricted-syntax
118
+ messages.push(...convertMessage(message))
119
+ }
120
+ return messages as any
121
+ }
122
+
123
+ const formatRole = (role: AiRole.AiRole) => {
124
+ switch (role._tag) {
125
+ case "UserWithName":
126
+ return {
127
+ role: "user",
128
+ name: safeName(role.name)
129
+ } as const
130
+ case "User":
131
+ return {
132
+ role: "user"
133
+ } as const
134
+ case "Model":
135
+ return {
136
+ role: "assistant"
137
+ } as const
138
+ }
139
+ }
140
+
141
+ const convertMessage = (
142
+ message: AiInput.Message
143
+ ): Array<typeof Generated.ChatCompletionRequestMessage.Encoded> => {
144
+ const messages: Array<typeof Generated.ChatCompletionRequestMessage.Encoded> = []
145
+ let parts: Array<typeof Generated.ChatCompletionRequestUserMessageContentPart.Encoded> = []
146
+ let toolCalls: Array<typeof Generated.ChatCompletionMessageToolCall.Encoded> = []
147
+ function flushContent() {
148
+ if (parts.length === 0) return
149
+ messages.push({
150
+ ...formatRole(message.role),
151
+ content: parts as any
152
+ })
153
+ parts = []
154
+ }
155
+ function flushToolCalls() {
156
+ if (toolCalls.length === 0) return
157
+ messages.push({
158
+ role: "assistant",
159
+ content: null,
160
+ tool_calls: toolCalls
161
+ })
162
+ toolCalls = []
163
+ }
164
+ for (const part of message.parts) {
165
+ if (part._tag === "ToolCall") {
166
+ flushContent()
167
+ toolCalls.push({
168
+ id: part.id,
169
+ type: "function",
170
+ function: {
171
+ name: part.name,
172
+ arguments: JSON.stringify(part.params)
173
+ }
174
+ })
175
+ } else if (part._tag === "ToolCallResolved") {
176
+ flushContent()
177
+ flushToolCalls()
178
+ messages.push({
179
+ role: "tool",
180
+ tool_call_id: part.toolCallId,
181
+ content: JSON.stringify(part.value)
182
+ })
183
+ } else {
184
+ flushToolCalls()
185
+ parts.push(makeContentPart(part))
186
+ }
187
+ }
188
+ flushContent()
189
+ flushToolCalls()
190
+ return messages
191
+ }
192
+
193
+ const makeContentPart = (
194
+ part: AiInput.TextPart | AiInput.ImagePart | AiInput.ImageUrlPart
195
+ ): typeof Generated.ChatCompletionRequestUserMessageContentPart.Encoded => {
196
+ switch (part._tag) {
197
+ case "Image":
198
+ return {
199
+ type: "image_url",
200
+ image_url: {
201
+ url: part.asDataUri,
202
+ detail: part.quality
203
+ }
204
+ }
205
+ case "ImageUrl":
206
+ return {
207
+ type: "image_url",
208
+ image_url: {
209
+ url: part.url,
210
+ detail: part.quality
211
+ }
212
+ }
213
+ case "Text":
214
+ return {
215
+ type: "text",
216
+ text: part.content
217
+ }
218
+ }
219
+ }
220
+
221
+ const makeResponse = (
222
+ response: Generated.CreateChatCompletionResponse,
223
+ method: string
224
+ ): Effect.Effect<AiResponse.AiResponse, AiError> =>
225
+ Arr.head(response.choices).pipe(
226
+ Effect.mapError(() =>
227
+ new AiError({
228
+ module: "OpenAiCompletions",
229
+ method,
230
+ description: "Could not get response"
231
+ })
232
+ ),
233
+ Effect.flatMap((choice) => {
234
+ const response = typeof choice.message.content === "string" ?
235
+ AiResponse.AiResponse.fromText({
236
+ role: AiRole.model,
237
+ content: choice.message.content!
238
+ }) :
239
+ AiResponse.AiResponse.empty
240
+
241
+ if (choice.message.tool_calls && choice.message.tool_calls.length > 0) {
242
+ return response.withToolCallsJson(choice.message.tool_calls.map((toolCall) => ({
243
+ id: toolCall.id,
244
+ name: toolCall.function.name,
245
+ params: toolCall.function.arguments
246
+ })))
247
+ }
248
+
249
+ return Effect.succeed(response)
250
+ })
251
+ )
252
+
253
+ const makeSystemMessage = (content: string): typeof Generated.ChatCompletionRequestSystemMessage.Encoded => {
254
+ return {
255
+ role: "system",
256
+ content
257
+ }
258
+ }
259
+
260
+ const safeName = (name: string) => name.replace(/[^a-zA-Z0-9_-]/g, "_").replace(/_+/, "_")
@@ -0,0 +1,31 @@
1
+ /**
2
+ * @since 1.0.0
3
+ */
4
+ import * as Context from "effect/Context"
5
+ import * as Effect from "effect/Effect"
6
+ import type { Simplify } from "effect/Types"
7
+ import type * as Generated from "./Generated.js"
8
+
9
+ /**
10
+ * @since 1.0.0
11
+ * @category tags
12
+ */
13
+ export class OpenAiConfig extends Context.Tag("@effect/ai-openai/OpenAiConfig")<
14
+ OpenAiConfig,
15
+ Simplify<
16
+ Partial<
17
+ Omit<
18
+ typeof Generated.CreateChatCompletionRequest.Encoded,
19
+ "messages" | "tools" | "tool_choice" | "stream" | "stream_options" | "functions"
20
+ >
21
+ >
22
+ >
23
+ >() {
24
+ /**
25
+ * @since 1.0.0
26
+ */
27
+ static readonly getOrUndefined: Effect.Effect<typeof OpenAiConfig.Service | undefined> = Effect.map(
28
+ Effect.context<never>(),
29
+ (context) => context.unsafeMap.get(OpenAiConfig.key)
30
+ )
31
+ }
@@ -0,0 +1,59 @@
1
+ /**
2
+ * @since 1.0.0
3
+ */
4
+ import { AiError } from "@effect/ai/AiError"
5
+ import * as Tokenizer from "@effect/ai/Tokenizer"
6
+ import * as Arr from "effect/Array"
7
+ import * as Chunk from "effect/Chunk"
8
+ import * as Effect from "effect/Effect"
9
+ import * as Layer from "effect/Layer"
10
+ import * as Option from "effect/Option"
11
+ import * as GptTokenizer from "gpt-tokenizer"
12
+ import { OpenAiConfig } from "./OpenAiConfig.js"
13
+
14
+ const make = (options: { readonly model: string }) =>
15
+ Tokenizer.make({
16
+ tokenize(content) {
17
+ return Effect.tryMap(OpenAiConfig.getOrUndefined, {
18
+ try: (localConfig) =>
19
+ GptTokenizer.encodeChat(
20
+ content.pipe(
21
+ Chunk.toReadonlyArray,
22
+ Arr.flatMap((message) =>
23
+ message.parts.pipe(
24
+ Arr.filterMap((part) => {
25
+ if (part._tag === "Image" || part._tag === "ImageUrl") {
26
+ return Option.none()
27
+ }
28
+ return Option.some(
29
+ {
30
+ role: message.role.kind === "user" ? "user" : "assistant",
31
+ name: message.role._tag === "UserWithName" ? message.role.name : undefined,
32
+ content: part._tag === "Text"
33
+ ? part.content
34
+ : JSON.stringify(part._tag === "ToolCall" ? part.params : part.value)
35
+ } as const
36
+ )
37
+ })
38
+ )
39
+ )
40
+ ),
41
+ localConfig?.model ?? options.model as any
42
+ ),
43
+ catch: (cause) =>
44
+ new AiError({
45
+ module: "OpenAiCompletions",
46
+ method: "tokenize",
47
+ description: "Could not tokenize",
48
+ cause
49
+ })
50
+ })
51
+ }
52
+ })
53
+
54
+ /**
55
+ * @since 1.0.0
56
+ * @category layers
57
+ */
58
+ export const layer = (options: { readonly model: string }): Layer.Layer<Tokenizer.Tokenizer> =>
59
+ Layer.succeed(Tokenizer.Tokenizer, make(options))
package/src/index.ts ADDED
@@ -0,0 +1,24 @@
1
+ /**
2
+ * @since 1.0.0
3
+ */
4
+ export * as Generated from "./Generated.js"
5
+
6
+ /**
7
+ * @since 1.0.0
8
+ */
9
+ export * as OpenAiClient from "./OpenAiClient.js"
10
+
11
+ /**
12
+ * @since 1.0.0
13
+ */
14
+ export * as OpenAiCompletions from "./OpenAiCompletions.js"
15
+
16
+ /**
17
+ * @since 1.0.0
18
+ */
19
+ export * as OpenAiConfig from "./OpenAiConfig.js"
20
+
21
+ /**
22
+ * @since 1.0.0
23
+ */
24
+ export * as OpenAiTokenizer from "./OpenAiTokenizer.js"