mirror of
https://github.com/zadam/trilium.git
synced 2025-10-19 22:58:52 +02:00
test(server): fix test failures
This commit is contained in:
parent
fa436c7ce6
commit
c671f91bca
@ -16,7 +16,8 @@ vi.mock("../../services/ws.js", () => ({
|
|||||||
default: {
|
default: {
|
||||||
sendMessageToAllClients: vi.fn(),
|
sendMessageToAllClients: vi.fn(),
|
||||||
sendTransactionEntityChangesToAllClients: vi.fn(),
|
sendTransactionEntityChangesToAllClients: vi.fn(),
|
||||||
setLastSyncedPush: vi.fn()
|
setLastSyncedPush: vi.fn(),
|
||||||
|
syncFailed() {}
|
||||||
}
|
}
|
||||||
}));
|
}));
|
||||||
|
|
||||||
@ -81,7 +82,7 @@ async function loginWithSession(app: Application) {
|
|||||||
.post("/login")
|
.post("/login")
|
||||||
.send({ password: "demo1234" })
|
.send({ password: "demo1234" })
|
||||||
.expect(302);
|
.expect(302);
|
||||||
|
|
||||||
const setCookieHeader = response.headers["set-cookie"][0];
|
const setCookieHeader = response.headers["set-cookie"][0];
|
||||||
expect(setCookieHeader).toBeTruthy();
|
expect(setCookieHeader).toBeTruthy();
|
||||||
return setCookieHeader;
|
return setCookieHeader;
|
||||||
@ -91,14 +92,14 @@ async function loginWithSession(app: Application) {
|
|||||||
async function getCsrfToken(app: Application, sessionCookie: string) {
|
async function getCsrfToken(app: Application, sessionCookie: string) {
|
||||||
const response = await supertest(app)
|
const response = await supertest(app)
|
||||||
.get("/")
|
.get("/")
|
||||||
|
|
||||||
.expect(200);
|
.expect(200);
|
||||||
|
|
||||||
const csrfTokenMatch = response.text.match(/csrfToken: '([^']+)'/);
|
const csrfTokenMatch = response.text.match(/csrfToken: '([^']+)'/);
|
||||||
if (csrfTokenMatch) {
|
if (csrfTokenMatch) {
|
||||||
return csrfTokenMatch[1];
|
return csrfTokenMatch[1];
|
||||||
}
|
}
|
||||||
|
|
||||||
throw new Error("CSRF token not found in response");
|
throw new Error("CSRF token not found in response");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -154,7 +155,7 @@ describe("LLM API Tests", () => {
|
|||||||
|
|
||||||
expect(response.body).toHaveProperty('sessions');
|
expect(response.body).toHaveProperty('sessions');
|
||||||
expect(Array.isArray(response.body.sessions)).toBe(true);
|
expect(Array.isArray(response.body.sessions)).toBe(true);
|
||||||
|
|
||||||
if (response.body.sessions.length > 0) {
|
if (response.body.sessions.length > 0) {
|
||||||
expect(response.body.sessions[0]).toMatchObject({
|
expect(response.body.sessions[0]).toMatchObject({
|
||||||
id: expect.any(String),
|
id: expect.any(String),
|
||||||
@ -171,18 +172,18 @@ describe("LLM API Tests", () => {
|
|||||||
// Create a chat first if we don't have one
|
// Create a chat first if we don't have one
|
||||||
const createResponse = await supertest(app)
|
const createResponse = await supertest(app)
|
||||||
.post("/api/llm/chat")
|
.post("/api/llm/chat")
|
||||||
|
|
||||||
.send({
|
.send({
|
||||||
title: "Test Retrieval Chat"
|
title: "Test Retrieval Chat"
|
||||||
})
|
})
|
||||||
.expect(200);
|
.expect(200);
|
||||||
|
|
||||||
createdChatId = createResponse.body.id;
|
createdChatId = createResponse.body.id;
|
||||||
}
|
}
|
||||||
|
|
||||||
const response = await supertest(app)
|
const response = await supertest(app)
|
||||||
.get(`/api/llm/chat/${createdChatId}`)
|
.get(`/api/llm/chat/${createdChatId}`)
|
||||||
|
|
||||||
.expect(200);
|
.expect(200);
|
||||||
|
|
||||||
expect(response.body).toMatchObject({
|
expect(response.body).toMatchObject({
|
||||||
@ -202,7 +203,7 @@ describe("LLM API Tests", () => {
|
|||||||
title: "Test Update Chat"
|
title: "Test Update Chat"
|
||||||
})
|
})
|
||||||
.expect(200);
|
.expect(200);
|
||||||
|
|
||||||
createdChatId = createResponse.body.id;
|
createdChatId = createResponse.body.id;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -224,7 +225,7 @@ describe("LLM API Tests", () => {
|
|||||||
it("should return 404 for non-existent chat session", async () => {
|
it("should return 404 for non-existent chat session", async () => {
|
||||||
await supertest(app)
|
await supertest(app)
|
||||||
.get("/api/llm/chat/nonexistent-chat-id")
|
.get("/api/llm/chat/nonexistent-chat-id")
|
||||||
|
|
||||||
.expect(404);
|
.expect(404);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
@ -240,7 +241,7 @@ describe("LLM API Tests", () => {
|
|||||||
title: "Message Test Chat"
|
title: "Message Test Chat"
|
||||||
})
|
})
|
||||||
.expect(200);
|
.expect(200);
|
||||||
|
|
||||||
testChatId = createResponse.body.id;
|
testChatId = createResponse.body.id;
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -260,10 +261,10 @@ describe("LLM API Tests", () => {
|
|||||||
// The response depends on whether AI is actually configured
|
// The response depends on whether AI is actually configured
|
||||||
// We should get either a successful response or an error about AI not being configured
|
// We should get either a successful response or an error about AI not being configured
|
||||||
expect([200, 400, 500]).toContain(response.status);
|
expect([200, 400, 500]).toContain(response.status);
|
||||||
|
|
||||||
// All responses should have some body
|
// All responses should have some body
|
||||||
expect(response.body).toBeDefined();
|
expect(response.body).toBeDefined();
|
||||||
|
|
||||||
// Either success with response or error
|
// Either success with response or error
|
||||||
if (response.body.response) {
|
if (response.body.response) {
|
||||||
expect(response.body).toMatchObject({
|
expect(response.body).toMatchObject({
|
||||||
@ -310,10 +311,10 @@ describe("LLM API Tests", () => {
|
|||||||
beforeEach(async () => {
|
beforeEach(async () => {
|
||||||
// Reset all mocks
|
// Reset all mocks
|
||||||
vi.clearAllMocks();
|
vi.clearAllMocks();
|
||||||
|
|
||||||
// Import options service to access mock
|
// Import options service to access mock
|
||||||
const options = (await import("../../services/options.js")).default;
|
const options = (await import("../../services/options.js")).default;
|
||||||
|
|
||||||
// Setup default mock behaviors
|
// Setup default mock behaviors
|
||||||
(options.getOptionBool as any).mockReturnValue(true); // AI enabled
|
(options.getOptionBool as any).mockReturnValue(true); // AI enabled
|
||||||
mockAiServiceManager.getOrCreateAnyService.mockResolvedValue({});
|
mockAiServiceManager.getOrCreateAnyService.mockResolvedValue({});
|
||||||
@ -321,7 +322,7 @@ describe("LLM API Tests", () => {
|
|||||||
model: 'test-model',
|
model: 'test-model',
|
||||||
provider: 'test-provider'
|
provider: 'test-provider'
|
||||||
});
|
});
|
||||||
|
|
||||||
// Create a fresh chat for each test
|
// Create a fresh chat for each test
|
||||||
const mockChat = {
|
const mockChat = {
|
||||||
id: 'streaming-test-chat',
|
id: 'streaming-test-chat',
|
||||||
@ -331,15 +332,15 @@ describe("LLM API Tests", () => {
|
|||||||
};
|
};
|
||||||
mockChatStorage.createChat.mockResolvedValue(mockChat);
|
mockChatStorage.createChat.mockResolvedValue(mockChat);
|
||||||
mockChatStorage.getChat.mockResolvedValue(mockChat);
|
mockChatStorage.getChat.mockResolvedValue(mockChat);
|
||||||
|
|
||||||
const createResponse = await supertest(app)
|
const createResponse = await supertest(app)
|
||||||
.post("/api/llm/chat")
|
.post("/api/llm/chat")
|
||||||
|
|
||||||
.send({
|
.send({
|
||||||
title: "Streaming Test Chat"
|
title: "Streaming Test Chat"
|
||||||
})
|
})
|
||||||
.expect(200);
|
.expect(200);
|
||||||
|
|
||||||
testChatId = createResponse.body.id;
|
testChatId = createResponse.body.id;
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -358,7 +359,7 @@ describe("LLM API Tests", () => {
|
|||||||
|
|
||||||
const response = await supertest(app)
|
const response = await supertest(app)
|
||||||
.post(`/api/llm/chat/${testChatId}/messages/stream`)
|
.post(`/api/llm/chat/${testChatId}/messages/stream`)
|
||||||
|
|
||||||
.send({
|
.send({
|
||||||
content: "Tell me a short story",
|
content: "Tell me a short story",
|
||||||
useAdvancedContext: false,
|
useAdvancedContext: false,
|
||||||
@ -372,17 +373,17 @@ describe("LLM API Tests", () => {
|
|||||||
success: true,
|
success: true,
|
||||||
message: "Streaming initiated successfully"
|
message: "Streaming initiated successfully"
|
||||||
});
|
});
|
||||||
|
|
||||||
// Import ws service to access mock
|
// Import ws service to access mock
|
||||||
const ws = (await import("../../services/ws.js")).default;
|
const ws = (await import("../../services/ws.js")).default;
|
||||||
|
|
||||||
// Verify WebSocket messages were sent
|
// Verify WebSocket messages were sent
|
||||||
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
|
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
|
||||||
type: 'llm-stream',
|
type: 'llm-stream',
|
||||||
chatNoteId: testChatId,
|
chatNoteId: testChatId,
|
||||||
thinking: undefined
|
thinking: undefined
|
||||||
});
|
});
|
||||||
|
|
||||||
// Verify streaming chunks were sent
|
// Verify streaming chunks were sent
|
||||||
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
|
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
|
||||||
type: 'llm-stream',
|
type: 'llm-stream',
|
||||||
@ -390,7 +391,7 @@ describe("LLM API Tests", () => {
|
|||||||
content: 'Hello',
|
content: 'Hello',
|
||||||
done: false
|
done: false
|
||||||
});
|
});
|
||||||
|
|
||||||
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
|
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
|
||||||
type: 'llm-stream',
|
type: 'llm-stream',
|
||||||
chatNoteId: testChatId,
|
chatNoteId: testChatId,
|
||||||
@ -402,7 +403,7 @@ describe("LLM API Tests", () => {
|
|||||||
it("should handle empty content for streaming", async () => {
|
it("should handle empty content for streaming", async () => {
|
||||||
const response = await supertest(app)
|
const response = await supertest(app)
|
||||||
.post(`/api/llm/chat/${testChatId}/messages/stream`)
|
.post(`/api/llm/chat/${testChatId}/messages/stream`)
|
||||||
|
|
||||||
.send({
|
.send({
|
||||||
content: "",
|
content: "",
|
||||||
useAdvancedContext: false,
|
useAdvancedContext: false,
|
||||||
@ -419,7 +420,7 @@ describe("LLM API Tests", () => {
|
|||||||
it("should handle whitespace-only content for streaming", async () => {
|
it("should handle whitespace-only content for streaming", async () => {
|
||||||
const response = await supertest(app)
|
const response = await supertest(app)
|
||||||
.post(`/api/llm/chat/${testChatId}/messages/stream`)
|
.post(`/api/llm/chat/${testChatId}/messages/stream`)
|
||||||
|
|
||||||
.send({
|
.send({
|
||||||
content: " \n\t ",
|
content: " \n\t ",
|
||||||
useAdvancedContext: false,
|
useAdvancedContext: false,
|
||||||
@ -436,7 +437,7 @@ describe("LLM API Tests", () => {
|
|||||||
it("should handle invalid chat ID for streaming", async () => {
|
it("should handle invalid chat ID for streaming", async () => {
|
||||||
const response = await supertest(app)
|
const response = await supertest(app)
|
||||||
.post("/api/llm/chat/invalid-chat-id/messages/stream")
|
.post("/api/llm/chat/invalid-chat-id/messages/stream")
|
||||||
|
|
||||||
.send({
|
.send({
|
||||||
content: "Hello",
|
content: "Hello",
|
||||||
useAdvancedContext: false,
|
useAdvancedContext: false,
|
||||||
@ -467,7 +468,7 @@ describe("LLM API Tests", () => {
|
|||||||
// Verify mention content is included
|
// Verify mention content is included
|
||||||
expect(input.query).toContain('Tell me about this note');
|
expect(input.query).toContain('Tell me about this note');
|
||||||
expect(input.query).toContain('Root note content for testing');
|
expect(input.query).toContain('Root note content for testing');
|
||||||
|
|
||||||
const callback = input.streamCallback;
|
const callback = input.streamCallback;
|
||||||
await callback('The root note contains', false, {});
|
await callback('The root note contains', false, {});
|
||||||
await callback(' important information.', true, {});
|
await callback(' important information.', true, {});
|
||||||
@ -475,7 +476,7 @@ describe("LLM API Tests", () => {
|
|||||||
|
|
||||||
const response = await supertest(app)
|
const response = await supertest(app)
|
||||||
.post(`/api/llm/chat/${testChatId}/messages/stream`)
|
.post(`/api/llm/chat/${testChatId}/messages/stream`)
|
||||||
|
|
||||||
.send({
|
.send({
|
||||||
content: "Tell me about this note",
|
content: "Tell me about this note",
|
||||||
useAdvancedContext: true,
|
useAdvancedContext: true,
|
||||||
@ -493,10 +494,10 @@ describe("LLM API Tests", () => {
|
|||||||
success: true,
|
success: true,
|
||||||
message: "Streaming initiated successfully"
|
message: "Streaming initiated successfully"
|
||||||
});
|
});
|
||||||
|
|
||||||
// Import ws service to access mock
|
// Import ws service to access mock
|
||||||
const ws = (await import("../../services/ws.js")).default;
|
const ws = (await import("../../services/ws.js")).default;
|
||||||
|
|
||||||
// Verify thinking message was sent
|
// Verify thinking message was sent
|
||||||
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
|
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
|
||||||
type: 'llm-stream',
|
type: 'llm-stream',
|
||||||
@ -517,7 +518,7 @@ describe("LLM API Tests", () => {
|
|||||||
|
|
||||||
const response = await supertest(app)
|
const response = await supertest(app)
|
||||||
.post(`/api/llm/chat/${testChatId}/messages/stream`)
|
.post(`/api/llm/chat/${testChatId}/messages/stream`)
|
||||||
|
|
||||||
.send({
|
.send({
|
||||||
content: "What is the meaning of life?",
|
content: "What is the meaning of life?",
|
||||||
useAdvancedContext: false,
|
useAdvancedContext: false,
|
||||||
@ -525,10 +526,10 @@ describe("LLM API Tests", () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
expect(response.status).toBe(200);
|
expect(response.status).toBe(200);
|
||||||
|
|
||||||
// Import ws service to access mock
|
// Import ws service to access mock
|
||||||
const ws = (await import("../../services/ws.js")).default;
|
const ws = (await import("../../services/ws.js")).default;
|
||||||
|
|
||||||
// Verify thinking messages
|
// Verify thinking messages
|
||||||
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
|
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
|
||||||
type: 'llm-stream',
|
type: 'llm-stream',
|
||||||
@ -536,7 +537,7 @@ describe("LLM API Tests", () => {
|
|||||||
thinking: 'Analyzing the question...',
|
thinking: 'Analyzing the question...',
|
||||||
done: false
|
done: false
|
||||||
});
|
});
|
||||||
|
|
||||||
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
|
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
|
||||||
type: 'llm-stream',
|
type: 'llm-stream',
|
||||||
chatNoteId: testChatId,
|
chatNoteId: testChatId,
|
||||||
@ -564,7 +565,7 @@ describe("LLM API Tests", () => {
|
|||||||
|
|
||||||
const response = await supertest(app)
|
const response = await supertest(app)
|
||||||
.post(`/api/llm/chat/${testChatId}/messages/stream`)
|
.post(`/api/llm/chat/${testChatId}/messages/stream`)
|
||||||
|
|
||||||
.send({
|
.send({
|
||||||
content: "What is 2 + 2?",
|
content: "What is 2 + 2?",
|
||||||
useAdvancedContext: false,
|
useAdvancedContext: false,
|
||||||
@ -572,10 +573,10 @@ describe("LLM API Tests", () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
expect(response.status).toBe(200);
|
expect(response.status).toBe(200);
|
||||||
|
|
||||||
// Import ws service to access mock
|
// Import ws service to access mock
|
||||||
const ws = (await import("../../services/ws.js")).default;
|
const ws = (await import("../../services/ws.js")).default;
|
||||||
|
|
||||||
// Verify tool execution message
|
// Verify tool execution message
|
||||||
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
|
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
|
||||||
type: 'llm-stream',
|
type: 'llm-stream',
|
||||||
@ -597,7 +598,7 @@ describe("LLM API Tests", () => {
|
|||||||
|
|
||||||
const response = await supertest(app)
|
const response = await supertest(app)
|
||||||
.post(`/api/llm/chat/${testChatId}/messages/stream`)
|
.post(`/api/llm/chat/${testChatId}/messages/stream`)
|
||||||
|
|
||||||
.send({
|
.send({
|
||||||
content: "This will fail",
|
content: "This will fail",
|
||||||
useAdvancedContext: false,
|
useAdvancedContext: false,
|
||||||
@ -605,10 +606,10 @@ describe("LLM API Tests", () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
expect(response.status).toBe(200); // Still returns 200
|
expect(response.status).toBe(200); // Still returns 200
|
||||||
|
|
||||||
// Import ws service to access mock
|
// Import ws service to access mock
|
||||||
const ws = (await import("../../services/ws.js")).default;
|
const ws = (await import("../../services/ws.js")).default;
|
||||||
|
|
||||||
// Verify error message was sent via WebSocket
|
// Verify error message was sent via WebSocket
|
||||||
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
|
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
|
||||||
type: 'llm-stream',
|
type: 'llm-stream',
|
||||||
@ -625,7 +626,7 @@ describe("LLM API Tests", () => {
|
|||||||
|
|
||||||
const response = await supertest(app)
|
const response = await supertest(app)
|
||||||
.post(`/api/llm/chat/${testChatId}/messages/stream`)
|
.post(`/api/llm/chat/${testChatId}/messages/stream`)
|
||||||
|
|
||||||
.send({
|
.send({
|
||||||
content: "Hello AI",
|
content: "Hello AI",
|
||||||
useAdvancedContext: false,
|
useAdvancedContext: false,
|
||||||
@ -633,10 +634,10 @@ describe("LLM API Tests", () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
expect(response.status).toBe(200);
|
expect(response.status).toBe(200);
|
||||||
|
|
||||||
// Import ws service to access mock
|
// Import ws service to access mock
|
||||||
const ws = (await import("../../services/ws.js")).default;
|
const ws = (await import("../../services/ws.js")).default;
|
||||||
|
|
||||||
// Verify error message about AI being disabled
|
// Verify error message about AI being disabled
|
||||||
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
|
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
|
||||||
type: 'llm-stream',
|
type: 'llm-stream',
|
||||||
@ -655,7 +656,7 @@ describe("LLM API Tests", () => {
|
|||||||
|
|
||||||
await supertest(app)
|
await supertest(app)
|
||||||
.post(`/api/llm/chat/${testChatId}/messages/stream`)
|
.post(`/api/llm/chat/${testChatId}/messages/stream`)
|
||||||
|
|
||||||
.send({
|
.send({
|
||||||
content: "Save this response",
|
content: "Save this response",
|
||||||
useAdvancedContext: false,
|
useAdvancedContext: false,
|
||||||
@ -680,10 +681,10 @@ describe("LLM API Tests", () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
// Send multiple requests rapidly
|
// Send multiple requests rapidly
|
||||||
const promises = Array.from({ length: 3 }, (_, i) =>
|
const promises = Array.from({ length: 3 }, (_, i) =>
|
||||||
supertest(app)
|
supertest(app)
|
||||||
.post(`/api/llm/chat/${testChatId}/messages/stream`)
|
.post(`/api/llm/chat/${testChatId}/messages/stream`)
|
||||||
|
|
||||||
.send({
|
.send({
|
||||||
content: `Request ${i + 1}`,
|
content: `Request ${i + 1}`,
|
||||||
useAdvancedContext: false,
|
useAdvancedContext: false,
|
||||||
@ -692,7 +693,7 @@ describe("LLM API Tests", () => {
|
|||||||
);
|
);
|
||||||
|
|
||||||
const responses = await Promise.all(promises);
|
const responses = await Promise.all(promises);
|
||||||
|
|
||||||
// All should succeed
|
// All should succeed
|
||||||
responses.forEach(response => {
|
responses.forEach(response => {
|
||||||
expect(response.status).toBe(200);
|
expect(response.status).toBe(200);
|
||||||
@ -716,7 +717,7 @@ describe("LLM API Tests", () => {
|
|||||||
|
|
||||||
const response = await supertest(app)
|
const response = await supertest(app)
|
||||||
.post(`/api/llm/chat/${testChatId}/messages/stream`)
|
.post(`/api/llm/chat/${testChatId}/messages/stream`)
|
||||||
|
|
||||||
.send({
|
.send({
|
||||||
content: "Generate large response",
|
content: "Generate large response",
|
||||||
useAdvancedContext: false,
|
useAdvancedContext: false,
|
||||||
@ -724,10 +725,10 @@ describe("LLM API Tests", () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
expect(response.status).toBe(200);
|
expect(response.status).toBe(200);
|
||||||
|
|
||||||
// Import ws service to access mock
|
// Import ws service to access mock
|
||||||
const ws = (await import("../../services/ws.js")).default;
|
const ws = (await import("../../services/ws.js")).default;
|
||||||
|
|
||||||
// Verify multiple chunks were sent
|
// Verify multiple chunks were sent
|
||||||
const streamCalls = (ws.sendMessageToAllClients as any).mock.calls.filter(
|
const streamCalls = (ws.sendMessageToAllClients as any).mock.calls.filter(
|
||||||
call => call[0].type === 'llm-stream' && call[0].content
|
call => call[0].type === 'llm-stream' && call[0].content
|
||||||
@ -741,7 +742,7 @@ describe("LLM API Tests", () => {
|
|||||||
const response = await supertest(app)
|
const response = await supertest(app)
|
||||||
.post("/api/llm/chat")
|
.post("/api/llm/chat")
|
||||||
.set('Content-Type', 'application/json')
|
.set('Content-Type', 'application/json')
|
||||||
|
|
||||||
.send('{ invalid json }');
|
.send('{ invalid json }');
|
||||||
|
|
||||||
expect([400, 500]).toContain(response.status);
|
expect([400, 500]).toContain(response.status);
|
||||||
@ -750,7 +751,7 @@ describe("LLM API Tests", () => {
|
|||||||
it("should handle missing required fields", async () => {
|
it("should handle missing required fields", async () => {
|
||||||
const response = await supertest(app)
|
const response = await supertest(app)
|
||||||
.post("/api/llm/chat")
|
.post("/api/llm/chat")
|
||||||
|
|
||||||
.send({
|
.send({
|
||||||
// Missing required fields
|
// Missing required fields
|
||||||
});
|
});
|
||||||
@ -762,7 +763,7 @@ describe("LLM API Tests", () => {
|
|||||||
it("should handle invalid parameter types", async () => {
|
it("should handle invalid parameter types", async () => {
|
||||||
const response = await supertest(app)
|
const response = await supertest(app)
|
||||||
.post("/api/llm/chat")
|
.post("/api/llm/chat")
|
||||||
|
|
||||||
.send({
|
.send({
|
||||||
title: "Test Chat",
|
title: "Test Chat",
|
||||||
temperature: "invalid", // Should be number
|
temperature: "invalid", // Should be number
|
||||||
@ -786,4 +787,4 @@ describe("LLM API Tests", () => {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||||
import { StreamProcessor, createStreamHandler, processProviderStream, extractStreamStats, performProviderHealthCheck } from './stream_handler.js';
|
import { StreamProcessor, createStreamHandler, processProviderStream, extractStreamStats, performProviderHealthCheck } from './stream_handler.js';
|
||||||
import type { StreamProcessingOptions, StreamChunk, ProviderStreamOptions } from './stream_handler.js';
|
import type { StreamProcessingOptions, StreamChunk } from './stream_handler.js';
|
||||||
|
|
||||||
// Mock the log module
|
// Mock the log module
|
||||||
vi.mock('../../log.js', () => ({
|
vi.mock('../../log.js', () => ({
|
||||||
@ -86,7 +86,7 @@ describe('StreamProcessor', () => {
|
|||||||
|
|
||||||
it('should handle callback errors gracefully', async () => {
|
it('should handle callback errors gracefully', async () => {
|
||||||
const errorCallback = vi.fn().mockRejectedValue(new Error('Callback error'));
|
const errorCallback = vi.fn().mockRejectedValue(new Error('Callback error'));
|
||||||
|
|
||||||
// Should not throw
|
// Should not throw
|
||||||
await expect(StreamProcessor.sendChunkToCallback(errorCallback, 'test', false, {}, 1))
|
await expect(StreamProcessor.sendChunkToCallback(errorCallback, 'test', false, {}, 1))
|
||||||
.resolves.toBeUndefined();
|
.resolves.toBeUndefined();
|
||||||
@ -127,7 +127,7 @@ describe('StreamProcessor', () => {
|
|||||||
|
|
||||||
it('should handle final callback errors gracefully', async () => {
|
it('should handle final callback errors gracefully', async () => {
|
||||||
const errorCallback = vi.fn().mockRejectedValue(new Error('Final callback error'));
|
const errorCallback = vi.fn().mockRejectedValue(new Error('Final callback error'));
|
||||||
|
|
||||||
await expect(StreamProcessor.sendFinalCallback(errorCallback, 'test'))
|
await expect(StreamProcessor.sendFinalCallback(errorCallback, 'test'))
|
||||||
.resolves.toBeUndefined();
|
.resolves.toBeUndefined();
|
||||||
});
|
});
|
||||||
@ -297,8 +297,8 @@ describe('processProviderStream', () => {
|
|||||||
it('should handle tool calls in stream', async () => {
|
it('should handle tool calls in stream', async () => {
|
||||||
const chunks = [
|
const chunks = [
|
||||||
{ message: { content: 'Using tool...' } },
|
{ message: { content: 'Using tool...' } },
|
||||||
{
|
{
|
||||||
message: {
|
message: {
|
||||||
tool_calls: [
|
tool_calls: [
|
||||||
{ id: 'call_1', function: { name: 'calculator', arguments: '{"x": 5}' } }
|
{ id: 'call_1', function: { name: 'calculator', arguments: '{"x": 5}' } }
|
||||||
]
|
]
|
||||||
@ -573,8 +573,8 @@ describe('Streaming edge cases and concurrency', () => {
|
|||||||
it('should handle mixed content and tool calls', async () => {
|
it('should handle mixed content and tool calls', async () => {
|
||||||
const chunks = [
|
const chunks = [
|
||||||
{ message: { content: 'Let me calculate that...' } },
|
{ message: { content: 'Let me calculate that...' } },
|
||||||
{
|
{
|
||||||
message: {
|
message: {
|
||||||
content: '',
|
content: '',
|
||||||
tool_calls: [{ id: '1', function: { name: 'calc' } }]
|
tool_calls: [{ id: '1', function: { name: 'calc' } }]
|
||||||
}
|
}
|
||||||
@ -599,4 +599,4 @@ describe('Streaming edge cases and concurrency', () => {
|
|||||||
expect(result.completeText).toBe('Let me calculate that...The answer is 42.');
|
expect(result.completeText).toBe('Let me calculate that...The answer is 42.');
|
||||||
expect(result.toolCalls).toHaveLength(1);
|
expect(result.toolCalls).toHaveLength(1);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
Loading…
x
Reference in New Issue
Block a user