@luanpoppe/ai 1.0.5 → 1.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +10 -0
- package/dist/@types/model-names.d.ts +4 -1
- package/dist/@types/model-names.d.ts.map +1 -1
- package/dist/index.d.ts +2 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +8 -0
- package/dist/index.js.map +1 -1
- package/dist/langchain/models.d.ts +4 -0
- package/dist/langchain/models.d.ts.map +1 -1
- package/dist/langchain/models.js +17 -0
- package/dist/langchain/models.js.map +1 -1
- package/dist/langchain/tools.d.ts +1 -1
- package/package.json +19 -9
- package/src/@types/model-names.ts +17 -1
- package/src/index.ts +10 -0
- package/src/langchain/models.ts +22 -0
- package/tests/e2e/README.md +47 -0
- package/tests/e2e/langchain.test.ts +477 -0
- package/tests/tsconfig.json +17 -0
- package/tests/unit/index.test.ts +355 -0
- package/tests/unit/langchain/messages.test.ts +101 -0
- package/tests/unit/langchain/models.test.ts +192 -0
- package/tests/unit/langchain/tools.test.ts +134 -0
- package/vitest.config.ts +24 -0
|
@@ -0,0 +1,355 @@
|
|
|
1
|
+
import { Langchain } from "../../src/index";
|
|
2
|
+
import { LangchainModels } from "../../src/langchain/models";
|
|
3
|
+
import { createAgent } from "langchain";
|
|
4
|
+
import { LangchainMessages } from "../../src/langchain/messages";
|
|
5
|
+
import z from "zod";
|
|
6
|
+
|
|
7
|
+
// Mock das dependências
|
|
8
|
+
vi.mock("langchain", async () => {
|
|
9
|
+
const actual = await vi.importActual("langchain");
|
|
10
|
+
return {
|
|
11
|
+
...actual,
|
|
12
|
+
createAgent: vi.fn(),
|
|
13
|
+
modelRetryMiddleware: vi.fn((config) => ({ type: "retry", ...config })),
|
|
14
|
+
modelFallbackMiddleware: vi.fn((...models) => ({ type: "fallback", models })),
|
|
15
|
+
};
|
|
16
|
+
});
|
|
17
|
+
|
|
18
|
+
vi.mock("../../src/langchain/models", () => ({
|
|
19
|
+
LangchainModels: {
|
|
20
|
+
gpt: vi.fn(),
|
|
21
|
+
gemini: vi.fn(),
|
|
22
|
+
},
|
|
23
|
+
}));
|
|
24
|
+
|
|
25
|
+
describe("Langchain", () => {
|
|
26
|
+
let langchain: Langchain;
|
|
27
|
+
const mockTokens = {
|
|
28
|
+
openAIApiKey: "test-openai-key",
|
|
29
|
+
googleGeminiToken: "test-gemini-token",
|
|
30
|
+
};
|
|
31
|
+
|
|
32
|
+
beforeEach(() => {
|
|
33
|
+
vi.clearAllMocks();
|
|
34
|
+
langchain = new Langchain(mockTokens);
|
|
35
|
+
});
|
|
36
|
+
|
|
37
|
+
describe("constructor", () => {
|
|
38
|
+
it("deve criar uma instância com tokens fornecidos", () => {
|
|
39
|
+
const instance = new Langchain(mockTokens);
|
|
40
|
+
expect(instance).toBeInstanceOf(Langchain);
|
|
41
|
+
});
|
|
42
|
+
|
|
43
|
+
it("deve criar uma instância com apenas openAIApiKey", () => {
|
|
44
|
+
const instance = new Langchain({ openAIApiKey: "test-key" });
|
|
45
|
+
expect(instance).toBeInstanceOf(Langchain);
|
|
46
|
+
});
|
|
47
|
+
|
|
48
|
+
it("deve criar uma instância com apenas googleGeminiToken", () => {
|
|
49
|
+
const instance = new Langchain({ googleGeminiToken: "test-token" });
|
|
50
|
+
expect(instance).toBeInstanceOf(Langchain);
|
|
51
|
+
});
|
|
52
|
+
});
|
|
53
|
+
|
|
54
|
+
describe("call", () => {
|
|
55
|
+
it("deve chamar o agente e retornar a resposta correta", async () => {
|
|
56
|
+
const mockModel = {} as any;
|
|
57
|
+
const mockMessages = [
|
|
58
|
+
LangchainMessages.human("Olá"),
|
|
59
|
+
];
|
|
60
|
+
const mockResponse = {
|
|
61
|
+
messages: [
|
|
62
|
+
mockMessages[0],
|
|
63
|
+
{ content: "Olá! Como posso ajudar?" } as any,
|
|
64
|
+
],
|
|
65
|
+
};
|
|
66
|
+
|
|
67
|
+
vi.mocked(LangchainModels.gpt).mockReturnValue(mockModel);
|
|
68
|
+
vi.mocked(createAgent).mockReturnValue({
|
|
69
|
+
invoke: vi.fn().mockResolvedValue(mockResponse),
|
|
70
|
+
} as any);
|
|
71
|
+
|
|
72
|
+
const result = await langchain.call({
|
|
73
|
+
aiModel: "gpt-4",
|
|
74
|
+
messages: mockMessages,
|
|
75
|
+
});
|
|
76
|
+
|
|
77
|
+
expect(createAgent).toHaveBeenCalled();
|
|
78
|
+
expect(result.text).toBe("Olá! Como posso ajudar?");
|
|
79
|
+
expect(result.messages).toEqual(mockResponse.messages);
|
|
80
|
+
});
|
|
81
|
+
|
|
82
|
+
it("deve retornar mensagem padrão quando resposta está vazia", async () => {
|
|
83
|
+
const mockModel = {} as any;
|
|
84
|
+
const mockMessages = [LangchainMessages.human("Teste")];
|
|
85
|
+
const mockResponse = {
|
|
86
|
+
messages: [
|
|
87
|
+
mockMessages[0],
|
|
88
|
+
{ content: null } as any, // Última mensagem sem conteúdo
|
|
89
|
+
],
|
|
90
|
+
};
|
|
91
|
+
|
|
92
|
+
vi.mocked(LangchainModels.gpt).mockReturnValue(mockModel);
|
|
93
|
+
vi.mocked(createAgent).mockReturnValue({
|
|
94
|
+
invoke: vi.fn().mockResolvedValue(mockResponse),
|
|
95
|
+
} as any);
|
|
96
|
+
|
|
97
|
+
const result = await langchain.call({
|
|
98
|
+
aiModel: "gpt-4",
|
|
99
|
+
messages: mockMessages,
|
|
100
|
+
});
|
|
101
|
+
|
|
102
|
+
expect(result.text).toBe("Empty response from the model");
|
|
103
|
+
});
|
|
104
|
+
|
|
105
|
+
it("deve usar systemPrompt quando fornecido", async () => {
|
|
106
|
+
const mockModel = {} as any;
|
|
107
|
+
const mockMessages = [LangchainMessages.human("Teste")];
|
|
108
|
+
const mockResponse = {
|
|
109
|
+
messages: [{ content: "Resposta" } as any],
|
|
110
|
+
};
|
|
111
|
+
|
|
112
|
+
vi.mocked(LangchainModels.gpt).mockReturnValue(mockModel);
|
|
113
|
+
vi.mocked(createAgent).mockReturnValue({
|
|
114
|
+
invoke: vi.fn().mockResolvedValue(mockResponse),
|
|
115
|
+
} as any);
|
|
116
|
+
|
|
117
|
+
await langchain.call({
|
|
118
|
+
aiModel: "gpt-4",
|
|
119
|
+
messages: mockMessages,
|
|
120
|
+
systemPrompt: "Você é um assistente útil",
|
|
121
|
+
});
|
|
122
|
+
|
|
123
|
+
const callArgs = vi.mocked(createAgent).mock.calls[0][0];
|
|
124
|
+
expect(callArgs.systemPrompt).toBe("Você é um assistente útil");
|
|
125
|
+
});
|
|
126
|
+
|
|
127
|
+
it("deve usar maxRetries padrão quando não fornecido", async () => {
|
|
128
|
+
const mockModel = {} as any;
|
|
129
|
+
const mockMessages = [LangchainMessages.human("Teste")];
|
|
130
|
+
const mockResponse = {
|
|
131
|
+
messages: [{ content: "Resposta" } as any],
|
|
132
|
+
};
|
|
133
|
+
|
|
134
|
+
vi.mocked(LangchainModels.gpt).mockReturnValue(mockModel);
|
|
135
|
+
vi.mocked(createAgent).mockReturnValue({
|
|
136
|
+
invoke: vi.fn().mockResolvedValue(mockResponse),
|
|
137
|
+
} as any);
|
|
138
|
+
|
|
139
|
+
await langchain.call({
|
|
140
|
+
aiModel: "gpt-4",
|
|
141
|
+
messages: mockMessages,
|
|
142
|
+
});
|
|
143
|
+
|
|
144
|
+
const callArgs = vi.mocked(createAgent).mock.calls[0][0];
|
|
145
|
+
expect(callArgs.middleware).toBeDefined();
|
|
146
|
+
expect(callArgs.middleware?.length).toBeGreaterThan(0);
|
|
147
|
+
});
|
|
148
|
+
|
|
149
|
+
it("deve usar maxRetries customizado quando fornecido", async () => {
|
|
150
|
+
const mockModel = {} as any;
|
|
151
|
+
const mockMessages = [LangchainMessages.human("Teste")];
|
|
152
|
+
const mockResponse = {
|
|
153
|
+
messages: [{ content: "Resposta" } as any],
|
|
154
|
+
};
|
|
155
|
+
|
|
156
|
+
vi.mocked(LangchainModels.gpt).mockReturnValue(mockModel);
|
|
157
|
+
vi.mocked(createAgent).mockReturnValue({
|
|
158
|
+
invoke: vi.fn().mockResolvedValue(mockResponse),
|
|
159
|
+
} as any);
|
|
160
|
+
|
|
161
|
+
await langchain.call({
|
|
162
|
+
aiModel: "gpt-4",
|
|
163
|
+
messages: mockMessages,
|
|
164
|
+
maxRetries: 5,
|
|
165
|
+
});
|
|
166
|
+
|
|
167
|
+
const callArgs = vi.mocked(createAgent).mock.calls[0][0];
|
|
168
|
+
expect(callArgs.middleware).toBeDefined();
|
|
169
|
+
});
|
|
170
|
+
|
|
171
|
+
it("deve usar modelo GPT quando aiModel começa com 'gpt'", async () => {
|
|
172
|
+
const mockModel = {} as any;
|
|
173
|
+
const mockMessages = [LangchainMessages.human("Teste")];
|
|
174
|
+
const mockResponse = {
|
|
175
|
+
messages: [{ content: "Resposta" } as any],
|
|
176
|
+
};
|
|
177
|
+
|
|
178
|
+
vi.mocked(LangchainModels.gpt).mockReturnValue(mockModel);
|
|
179
|
+
vi.mocked(createAgent).mockReturnValue({
|
|
180
|
+
invoke: vi.fn().mockResolvedValue(mockResponse),
|
|
181
|
+
} as any);
|
|
182
|
+
|
|
183
|
+
await langchain.call({
|
|
184
|
+
aiModel: "gpt-4o",
|
|
185
|
+
messages: mockMessages,
|
|
186
|
+
});
|
|
187
|
+
|
|
188
|
+
expect(LangchainModels.gpt).toHaveBeenCalledWith({
|
|
189
|
+
model: "gpt-4o",
|
|
190
|
+
apiKey: mockTokens.openAIApiKey,
|
|
191
|
+
});
|
|
192
|
+
});
|
|
193
|
+
|
|
194
|
+
it("deve usar modelo Gemini quando aiModel começa com 'gemini'", async () => {
|
|
195
|
+
const mockModel = {} as any;
|
|
196
|
+
const mockMessages = [LangchainMessages.human("Teste")];
|
|
197
|
+
const mockResponse = {
|
|
198
|
+
messages: [{ content: "Resposta" } as any],
|
|
199
|
+
};
|
|
200
|
+
|
|
201
|
+
vi.mocked(LangchainModels.gemini).mockReturnValue(mockModel);
|
|
202
|
+
vi.mocked(createAgent).mockReturnValue({
|
|
203
|
+
invoke: vi.fn().mockResolvedValue(mockResponse),
|
|
204
|
+
} as any);
|
|
205
|
+
|
|
206
|
+
await langchain.call({
|
|
207
|
+
aiModel: "gemini-2.5-flash",
|
|
208
|
+
messages: mockMessages,
|
|
209
|
+
});
|
|
210
|
+
|
|
211
|
+
expect(LangchainModels.gemini).toHaveBeenCalledWith({
|
|
212
|
+
model: "gemini-2.5-flash",
|
|
213
|
+
apiKey: mockTokens.googleGeminiToken,
|
|
214
|
+
});
|
|
215
|
+
});
|
|
216
|
+
|
|
217
|
+
it("deve lançar erro quando modelo não é suportado", async () => {
|
|
218
|
+
const mockMessages = [LangchainMessages.human("Teste")];
|
|
219
|
+
|
|
220
|
+
await expect(
|
|
221
|
+
langchain.call({
|
|
222
|
+
aiModel: "unsupported-model" as any,
|
|
223
|
+
messages: mockMessages,
|
|
224
|
+
})
|
|
225
|
+
).rejects.toThrow("Model not supported");
|
|
226
|
+
});
|
|
227
|
+
|
|
228
|
+
it("deve passar modelConfig quando fornecido", async () => {
|
|
229
|
+
const mockModel = {} as any;
|
|
230
|
+
const mockMessages = [LangchainMessages.human("Teste")];
|
|
231
|
+
const mockResponse = {
|
|
232
|
+
messages: [{ content: "Resposta" } as any],
|
|
233
|
+
};
|
|
234
|
+
|
|
235
|
+
vi.mocked(LangchainModels.gpt).mockReturnValue(mockModel);
|
|
236
|
+
vi.mocked(createAgent).mockReturnValue({
|
|
237
|
+
invoke: vi.fn().mockResolvedValue(mockResponse),
|
|
238
|
+
} as any);
|
|
239
|
+
|
|
240
|
+
await langchain.call({
|
|
241
|
+
aiModel: "gpt-4",
|
|
242
|
+
messages: mockMessages,
|
|
243
|
+
modelConfig: {
|
|
244
|
+
maxTokens: 2000,
|
|
245
|
+
temperature: 0.7,
|
|
246
|
+
},
|
|
247
|
+
});
|
|
248
|
+
|
|
249
|
+
expect(LangchainModels.gpt).toHaveBeenCalledWith({
|
|
250
|
+
model: "gpt-4",
|
|
251
|
+
apiKey: mockTokens.openAIApiKey,
|
|
252
|
+
maxTokens: 2000,
|
|
253
|
+
temperature: 0.7,
|
|
254
|
+
});
|
|
255
|
+
});
|
|
256
|
+
});
|
|
257
|
+
|
|
258
|
+
describe("callStructuredOutput", () => {
|
|
259
|
+
it("deve chamar o agente com outputSchema e retornar resposta estruturada", async () => {
|
|
260
|
+
const mockModel = {} as any;
|
|
261
|
+
const mockMessages = [LangchainMessages.human("Teste")];
|
|
262
|
+
const outputSchema = z.object({
|
|
263
|
+
name: z.string(),
|
|
264
|
+
age: z.number(),
|
|
265
|
+
});
|
|
266
|
+
const mockStructuredResponse = { name: "João", age: 30 };
|
|
267
|
+
const mockResponse = {
|
|
268
|
+
structuredResponse: mockStructuredResponse,
|
|
269
|
+
};
|
|
270
|
+
|
|
271
|
+
vi.mocked(LangchainModels.gpt).mockReturnValue(mockModel);
|
|
272
|
+
vi.mocked(createAgent).mockReturnValue({
|
|
273
|
+
invoke: vi.fn().mockResolvedValue(mockResponse),
|
|
274
|
+
} as any);
|
|
275
|
+
|
|
276
|
+
const result = await langchain.callStructuredOutput({
|
|
277
|
+
aiModel: "gpt-4",
|
|
278
|
+
messages: mockMessages,
|
|
279
|
+
outputSchema,
|
|
280
|
+
});
|
|
281
|
+
|
|
282
|
+
expect(createAgent).toHaveBeenCalled();
|
|
283
|
+
expect(result.response).toEqual(mockStructuredResponse);
|
|
284
|
+
});
|
|
285
|
+
|
|
286
|
+
it("deve validar o schema e lançar erro se inválido", async () => {
|
|
287
|
+
const mockModel = {} as any;
|
|
288
|
+
const mockMessages = [LangchainMessages.human("Teste")];
|
|
289
|
+
const outputSchema = z.object({
|
|
290
|
+
name: z.string(),
|
|
291
|
+
age: z.number(),
|
|
292
|
+
});
|
|
293
|
+
const mockStructuredResponse = { name: "João", age: "30" }; // age deveria ser number
|
|
294
|
+
|
|
295
|
+
vi.mocked(LangchainModels.gpt).mockReturnValue(mockModel);
|
|
296
|
+
vi.mocked(createAgent).mockReturnValue({
|
|
297
|
+
invoke: vi.fn().mockResolvedValue({
|
|
298
|
+
structuredResponse: mockStructuredResponse,
|
|
299
|
+
}),
|
|
300
|
+
} as any);
|
|
301
|
+
|
|
302
|
+
await expect(
|
|
303
|
+
langchain.callStructuredOutput({
|
|
304
|
+
aiModel: "gpt-4",
|
|
305
|
+
messages: mockMessages,
|
|
306
|
+
outputSchema,
|
|
307
|
+
})
|
|
308
|
+
).rejects.toThrow();
|
|
309
|
+
});
|
|
310
|
+
});
|
|
311
|
+
|
|
312
|
+
describe("getRawAgent", () => {
|
|
313
|
+
it("deve retornar um agente sem outputSchema", () => {
|
|
314
|
+
const mockModel = {} as any;
|
|
315
|
+
const mockMessages = [LangchainMessages.human("Teste")];
|
|
316
|
+
const mockAgent = {
|
|
317
|
+
invoke: vi.fn(),
|
|
318
|
+
};
|
|
319
|
+
|
|
320
|
+
vi.mocked(LangchainModels.gpt).mockReturnValue(mockModel);
|
|
321
|
+
vi.mocked(createAgent).mockReturnValue(mockAgent as any);
|
|
322
|
+
|
|
323
|
+
const result = langchain.getRawAgent({
|
|
324
|
+
aiModel: "gpt-4",
|
|
325
|
+
messages: mockMessages,
|
|
326
|
+
});
|
|
327
|
+
|
|
328
|
+
expect(createAgent).toHaveBeenCalled();
|
|
329
|
+
expect(result.agent).toBe(mockAgent);
|
|
330
|
+
});
|
|
331
|
+
|
|
332
|
+
it("deve retornar um agente com outputSchema quando fornecido", () => {
|
|
333
|
+
const mockModel = {} as any;
|
|
334
|
+
const mockMessages = [LangchainMessages.human("Teste")];
|
|
335
|
+
const outputSchema = z.object({ result: z.string() });
|
|
336
|
+
const mockAgent = {
|
|
337
|
+
invoke: vi.fn(),
|
|
338
|
+
};
|
|
339
|
+
|
|
340
|
+
vi.mocked(LangchainModels.gpt).mockReturnValue(mockModel);
|
|
341
|
+
vi.mocked(createAgent).mockReturnValue(mockAgent as any);
|
|
342
|
+
|
|
343
|
+
const result = langchain.getRawAgent(
|
|
344
|
+
{
|
|
345
|
+
aiModel: "gpt-4",
|
|
346
|
+
messages: mockMessages,
|
|
347
|
+
},
|
|
348
|
+
outputSchema
|
|
349
|
+
);
|
|
350
|
+
|
|
351
|
+
expect(createAgent).toHaveBeenCalled();
|
|
352
|
+
expect(result.agent).toBe(mockAgent);
|
|
353
|
+
});
|
|
354
|
+
});
|
|
355
|
+
});
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
import { LangchainMessages } from "../../../src/langchain/messages";
|
|
2
|
+
import { SystemMessage, HumanMessage, AIMessage } from "langchain";
|
|
3
|
+
|
|
4
|
+
// Mock do langchain
|
|
5
|
+
vi.mock("langchain", () => {
|
|
6
|
+
class MockSystemMessage {
|
|
7
|
+
constructor(public content: string) { }
|
|
8
|
+
}
|
|
9
|
+
class MockHumanMessage {
|
|
10
|
+
constructor(public content: string) { }
|
|
11
|
+
}
|
|
12
|
+
class MockAIMessage {
|
|
13
|
+
constructor(public content: string) { }
|
|
14
|
+
}
|
|
15
|
+
function SystemMessageConstructor(content: string) {
|
|
16
|
+
return new MockSystemMessage(content);
|
|
17
|
+
}
|
|
18
|
+
function HumanMessageConstructor(content: string) {
|
|
19
|
+
return new MockHumanMessage(content);
|
|
20
|
+
}
|
|
21
|
+
function AIMessageConstructor(content: string) {
|
|
22
|
+
return new MockAIMessage(content);
|
|
23
|
+
}
|
|
24
|
+
return {
|
|
25
|
+
SystemMessage: vi.fn(SystemMessageConstructor),
|
|
26
|
+
HumanMessage: vi.fn(HumanMessageConstructor),
|
|
27
|
+
AIMessage: vi.fn(AIMessageConstructor),
|
|
28
|
+
};
|
|
29
|
+
});
|
|
30
|
+
|
|
31
|
+
describe("LangchainMessages", () => {
|
|
32
|
+
beforeEach(() => {
|
|
33
|
+
vi.clearAllMocks();
|
|
34
|
+
});
|
|
35
|
+
|
|
36
|
+
describe("system", () => {
|
|
37
|
+
it("deve criar uma SystemMessage com a mensagem fornecida", () => {
|
|
38
|
+
const message = "Esta é uma mensagem do sistema";
|
|
39
|
+
|
|
40
|
+
const result = LangchainMessages.system(message);
|
|
41
|
+
|
|
42
|
+
expect(result).toBeDefined();
|
|
43
|
+
expect(result.content).toBe(message);
|
|
44
|
+
expect(SystemMessage).toHaveBeenCalledWith(message);
|
|
45
|
+
});
|
|
46
|
+
|
|
47
|
+
it("deve criar uma SystemMessage com string vazia", () => {
|
|
48
|
+
const message = "";
|
|
49
|
+
|
|
50
|
+
const result = LangchainMessages.system(message);
|
|
51
|
+
|
|
52
|
+
expect(result).toBeDefined();
|
|
53
|
+
expect(result.content).toBe("");
|
|
54
|
+
expect(SystemMessage).toHaveBeenCalledWith("");
|
|
55
|
+
});
|
|
56
|
+
});
|
|
57
|
+
|
|
58
|
+
describe("human", () => {
|
|
59
|
+
it("deve criar uma HumanMessage com a mensagem fornecida", () => {
|
|
60
|
+
const message = "Esta é uma mensagem do usuário";
|
|
61
|
+
|
|
62
|
+
const result = LangchainMessages.human(message);
|
|
63
|
+
|
|
64
|
+
expect(result).toBeDefined();
|
|
65
|
+
expect(result.content).toBe(message);
|
|
66
|
+
expect(HumanMessage).toHaveBeenCalledWith(message);
|
|
67
|
+
});
|
|
68
|
+
|
|
69
|
+
it("deve criar uma HumanMessage com string vazia", () => {
|
|
70
|
+
const message = "";
|
|
71
|
+
|
|
72
|
+
const result = LangchainMessages.human(message);
|
|
73
|
+
|
|
74
|
+
expect(result).toBeDefined();
|
|
75
|
+
expect(result.content).toBe("");
|
|
76
|
+
expect(HumanMessage).toHaveBeenCalledWith("");
|
|
77
|
+
});
|
|
78
|
+
});
|
|
79
|
+
|
|
80
|
+
describe("ai", () => {
|
|
81
|
+
it("deve criar uma AIMessage com a mensagem fornecida", () => {
|
|
82
|
+
const message = "Esta é uma mensagem da IA";
|
|
83
|
+
|
|
84
|
+
const result = LangchainMessages.ai(message);
|
|
85
|
+
|
|
86
|
+
expect(result).toBeDefined();
|
|
87
|
+
expect(result.content).toBe(message);
|
|
88
|
+
expect(AIMessage).toHaveBeenCalledWith(message);
|
|
89
|
+
});
|
|
90
|
+
|
|
91
|
+
it("deve criar uma AIMessage com string vazia", () => {
|
|
92
|
+
const message = "";
|
|
93
|
+
|
|
94
|
+
const result = LangchainMessages.ai(message);
|
|
95
|
+
|
|
96
|
+
expect(result).toBeDefined();
|
|
97
|
+
expect(result.content).toBe("");
|
|
98
|
+
expect(AIMessage).toHaveBeenCalledWith("");
|
|
99
|
+
});
|
|
100
|
+
});
|
|
101
|
+
});
|
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
import { LangchainModels, LLMModelConfig } from "../../../src/langchain/models";
|
|
2
|
+
import { ChatOpenAI } from "@langchain/openai";
|
|
3
|
+
import { ChatGoogleGenerativeAI } from "@langchain/google-genai";
|
|
4
|
+
|
|
5
|
+
// Mock das dependências
|
|
6
|
+
vi.mock("@langchain/openai", () => ({
|
|
7
|
+
ChatOpenAI: vi.fn(),
|
|
8
|
+
}));
|
|
9
|
+
|
|
10
|
+
vi.mock("@langchain/google-genai", () => ({
|
|
11
|
+
ChatGoogleGenerativeAI: vi.fn(),
|
|
12
|
+
}));
|
|
13
|
+
|
|
14
|
+
describe("LangchainModels", () => {
|
|
15
|
+
beforeEach(() => {
|
|
16
|
+
vi.clearAllMocks();
|
|
17
|
+
});
|
|
18
|
+
|
|
19
|
+
describe("gpt", () => {
|
|
20
|
+
it("deve criar uma instância do ChatOpenAI com configurações básicas", () => {
|
|
21
|
+
const config: LLMModelConfig = {
|
|
22
|
+
model: "gpt-4",
|
|
23
|
+
apiKey: "test-api-key",
|
|
24
|
+
};
|
|
25
|
+
|
|
26
|
+
LangchainModels.gpt(config);
|
|
27
|
+
|
|
28
|
+
expect(ChatOpenAI).toHaveBeenCalledWith({
|
|
29
|
+
model: "gpt-4",
|
|
30
|
+
apiKey: "test-api-key",
|
|
31
|
+
});
|
|
32
|
+
});
|
|
33
|
+
|
|
34
|
+
it("deve criar uma instância do ChatOpenAI com maxTokens", () => {
|
|
35
|
+
const config: LLMModelConfig = {
|
|
36
|
+
model: "gpt-4o",
|
|
37
|
+
apiKey: "test-api-key",
|
|
38
|
+
maxTokens: 1000,
|
|
39
|
+
};
|
|
40
|
+
|
|
41
|
+
LangchainModels.gpt(config);
|
|
42
|
+
|
|
43
|
+
expect(ChatOpenAI).toHaveBeenCalledWith({
|
|
44
|
+
model: "gpt-4o",
|
|
45
|
+
apiKey: "test-api-key",
|
|
46
|
+
maxTokens: 1000,
|
|
47
|
+
});
|
|
48
|
+
});
|
|
49
|
+
|
|
50
|
+
it("deve criar uma instância do ChatOpenAI com temperature", () => {
|
|
51
|
+
const config: LLMModelConfig = {
|
|
52
|
+
model: "gpt-4",
|
|
53
|
+
apiKey: "test-api-key",
|
|
54
|
+
temperature: 0.7,
|
|
55
|
+
};
|
|
56
|
+
|
|
57
|
+
LangchainModels.gpt(config);
|
|
58
|
+
|
|
59
|
+
expect(ChatOpenAI).toHaveBeenCalledWith({
|
|
60
|
+
model: "gpt-4",
|
|
61
|
+
apiKey: "test-api-key",
|
|
62
|
+
temperature: 0.7,
|
|
63
|
+
});
|
|
64
|
+
});
|
|
65
|
+
|
|
66
|
+
it("deve criar uma instância do ChatOpenAI com todas as opções", () => {
|
|
67
|
+
const config: LLMModelConfig = {
|
|
68
|
+
model: "gpt-4o",
|
|
69
|
+
apiKey: "test-api-key",
|
|
70
|
+
maxTokens: 2000,
|
|
71
|
+
temperature: 0.5,
|
|
72
|
+
};
|
|
73
|
+
|
|
74
|
+
LangchainModels.gpt(config);
|
|
75
|
+
|
|
76
|
+
expect(ChatOpenAI).toHaveBeenCalledWith({
|
|
77
|
+
model: "gpt-4o",
|
|
78
|
+
apiKey: "test-api-key",
|
|
79
|
+
maxTokens: 2000,
|
|
80
|
+
temperature: 0.5,
|
|
81
|
+
});
|
|
82
|
+
});
|
|
83
|
+
|
|
84
|
+
it("deve lançar erro quando apiKey não é fornecida", () => {
|
|
85
|
+
const config: LLMModelConfig = {
|
|
86
|
+
model: "gpt-4",
|
|
87
|
+
};
|
|
88
|
+
|
|
89
|
+
expect(() => LangchainModels.gpt(config)).toThrow(
|
|
90
|
+
"OpenAI API key is not passed in the model parameters"
|
|
91
|
+
);
|
|
92
|
+
});
|
|
93
|
+
|
|
94
|
+
it("deve lançar erro quando apiKey é undefined", () => {
|
|
95
|
+
const config: LLMModelConfig = {
|
|
96
|
+
model: "gpt-4",
|
|
97
|
+
apiKey: undefined,
|
|
98
|
+
};
|
|
99
|
+
|
|
100
|
+
expect(() => LangchainModels.gpt(config)).toThrow(
|
|
101
|
+
"OpenAI API key is not passed in the model parameters"
|
|
102
|
+
);
|
|
103
|
+
});
|
|
104
|
+
});
|
|
105
|
+
|
|
106
|
+
describe("gemini", () => {
|
|
107
|
+
it("deve criar uma instância do ChatGoogleGenerativeAI com configurações básicas", () => {
|
|
108
|
+
const config: LLMModelConfig = {
|
|
109
|
+
model: "gemini-2.5-flash",
|
|
110
|
+
apiKey: "test-api-key",
|
|
111
|
+
};
|
|
112
|
+
|
|
113
|
+
LangchainModels.gemini(config);
|
|
114
|
+
|
|
115
|
+
expect(ChatGoogleGenerativeAI).toHaveBeenCalledWith({
|
|
116
|
+
model: "gemini-2.5-flash",
|
|
117
|
+
apiKey: "test-api-key",
|
|
118
|
+
});
|
|
119
|
+
});
|
|
120
|
+
|
|
121
|
+
it("deve criar uma instância do ChatGoogleGenerativeAI com maxTokens", () => {
|
|
122
|
+
const config: LLMModelConfig = {
|
|
123
|
+
model: "gemini-2.5-pro",
|
|
124
|
+
apiKey: "test-api-key",
|
|
125
|
+
maxTokens: 1500,
|
|
126
|
+
};
|
|
127
|
+
|
|
128
|
+
LangchainModels.gemini(config);
|
|
129
|
+
|
|
130
|
+
expect(ChatGoogleGenerativeAI).toHaveBeenCalledWith({
|
|
131
|
+
model: "gemini-2.5-pro",
|
|
132
|
+
apiKey: "test-api-key",
|
|
133
|
+
maxOutputTokens: 1500,
|
|
134
|
+
});
|
|
135
|
+
});
|
|
136
|
+
|
|
137
|
+
it("deve criar uma instância do ChatGoogleGenerativeAI com temperature", () => {
|
|
138
|
+
const config: LLMModelConfig = {
|
|
139
|
+
model: "gemini-3-flash",
|
|
140
|
+
apiKey: "test-api-key",
|
|
141
|
+
temperature: 0.8,
|
|
142
|
+
};
|
|
143
|
+
|
|
144
|
+
LangchainModels.gemini(config);
|
|
145
|
+
|
|
146
|
+
expect(ChatGoogleGenerativeAI).toHaveBeenCalledWith({
|
|
147
|
+
model: "gemini-3-flash",
|
|
148
|
+
apiKey: "test-api-key",
|
|
149
|
+
temperature: 0.8,
|
|
150
|
+
});
|
|
151
|
+
});
|
|
152
|
+
|
|
153
|
+
it("deve criar uma instância do ChatGoogleGenerativeAI com todas as opções", () => {
|
|
154
|
+
const config: LLMModelConfig = {
|
|
155
|
+
model: "gemini-3-pro",
|
|
156
|
+
apiKey: "test-api-key",
|
|
157
|
+
maxTokens: 3000,
|
|
158
|
+
temperature: 0.6,
|
|
159
|
+
};
|
|
160
|
+
|
|
161
|
+
LangchainModels.gemini(config);
|
|
162
|
+
|
|
163
|
+
expect(ChatGoogleGenerativeAI).toHaveBeenCalledWith({
|
|
164
|
+
model: "gemini-3-pro",
|
|
165
|
+
apiKey: "test-api-key",
|
|
166
|
+
maxOutputTokens: 3000,
|
|
167
|
+
temperature: 0.6,
|
|
168
|
+
});
|
|
169
|
+
});
|
|
170
|
+
|
|
171
|
+
it("deve lançar erro quando apiKey não é fornecida", () => {
|
|
172
|
+
const config: LLMModelConfig = {
|
|
173
|
+
model: "gemini-2.5-flash",
|
|
174
|
+
};
|
|
175
|
+
|
|
176
|
+
expect(() => LangchainModels.gemini(config)).toThrow(
|
|
177
|
+
"Google Gemini API key is not passed in the model parameters"
|
|
178
|
+
);
|
|
179
|
+
});
|
|
180
|
+
|
|
181
|
+
it("deve lançar erro quando apiKey é undefined", () => {
|
|
182
|
+
const config: LLMModelConfig = {
|
|
183
|
+
model: "gemini-2.5-flash",
|
|
184
|
+
apiKey: undefined,
|
|
185
|
+
};
|
|
186
|
+
|
|
187
|
+
expect(() => LangchainModels.gemini(config)).toThrow(
|
|
188
|
+
"Google Gemini API key is not passed in the model parameters"
|
|
189
|
+
);
|
|
190
|
+
});
|
|
191
|
+
});
|
|
192
|
+
});
|