diff --git a/apps/server/src/routes/api/llm.spec.ts b/apps/server/src/routes/api/llm.spec.ts index a1f1ca3c2..846b9ecc9 100644 --- a/apps/server/src/routes/api/llm.spec.ts +++ b/apps/server/src/routes/api/llm.spec.ts @@ -52,9 +52,9 @@ vi.mock("../../services/llm/ai_service_manager.js", () => ({ // Mock chat pipeline const mockChatPipelineExecute = vi.fn(); -const MockChatPipeline = vi.fn().mockImplementation(function () { - this.execute = mockChatPipelineExecute; -}); +class MockChatPipeline { + execute = mockChatPipelineExecute; +} vi.mock("../../services/llm/pipeline/chat_pipeline.js", () => ({ ChatPipeline: MockChatPipeline })); diff --git a/apps/server/src/services/llm/ai_service_manager.spec.ts b/apps/server/src/services/llm/ai_service_manager.spec.ts index 47c43c36e..bea473913 100644 --- a/apps/server/src/services/llm/ai_service_manager.spec.ts +++ b/apps/server/src/services/llm/ai_service_manager.spec.ts @@ -34,26 +34,29 @@ vi.mock('../log.js', () => ({ } })); -vi.mock('./providers/anthropic_service.js', () => ({ - AnthropicService: vi.fn().mockImplementation(function () { - this.isAvailable = vi.fn().mockReturnValue(true); - this.generateChatCompletion = vi.fn(); - }) -})); +vi.mock('./providers/anthropic_service.js', () => { + class AnthropicService { + isAvailable = vi.fn().mockReturnValue(true); + generateChatCompletion = vi.fn(); + } + return { AnthropicService }; +}); -vi.mock('./providers/openai_service.js', () => ({ - OpenAIService: vi.fn().mockImplementation(function () { - this.isAvailable = vi.fn().mockReturnValue(true); - this.generateChatCompletion = vi.fn(); - }) -})); +vi.mock('./providers/openai_service.js', () => { + class OpenAIService { + isAvailable = vi.fn().mockReturnValue(true); + generateChatCompletion = vi.fn(); + } + return { OpenAIService }; +}); -vi.mock('./providers/ollama_service.js', () => ({ - OllamaService: vi.fn().mockImplementation(function () { - this.isAvailable = vi.fn().mockReturnValue(true); - this.generateChatCompletion = vi.fn(); - }) -})); +vi.mock('./providers/ollama_service.js', () => { + class OllamaService { + isAvailable = vi.fn().mockReturnValue(true); + generateChatCompletion = vi.fn(); + } + return { OllamaService }; +}); vi.mock('./config/configuration_helpers.js', () => ({ getSelectedProvider: vi.fn(), diff --git a/apps/server/src/services/llm/chat/rest_chat_service.spec.ts b/apps/server/src/services/llm/chat/rest_chat_service.spec.ts index d0ba94617..c797c290b 100644 --- a/apps/server/src/services/llm/chat/rest_chat_service.spec.ts +++ b/apps/server/src/services/llm/chat/rest_chat_service.spec.ts @@ -38,11 +38,12 @@ vi.mock('../pipeline/chat_pipeline.js', () => ({ })) })); -vi.mock('./handlers/tool_handler.js', () => ({ - ToolHandler: vi.fn().mockImplementation(function () { - this.handleToolCalls = vi.fn() - }) -})); +vi.mock('./handlers/tool_handler.js', () => { + class ToolHandler { + handleToolCalls = vi.fn() + } + return { ToolHandler }; +}); vi.mock('../chat_storage_service.js', () => ({ default: { diff --git a/apps/server/src/services/llm/chat_service.spec.ts b/apps/server/src/services/llm/chat_service.spec.ts index c9d491057..578fc03da 100644 --- a/apps/server/src/services/llm/chat_service.spec.ts +++ b/apps/server/src/services/llm/chat_service.spec.ts @@ -35,24 +35,28 @@ vi.mock('./constants/llm_prompt_constants.js', () => ({ } })); -vi.mock('./pipeline/chat_pipeline.js', () => ({ - ChatPipeline: vi.fn().mockImplementation(function (config) { - Object.assign(this, { - config, - execute: vi.fn(), - getMetrics: vi.fn(), - resetMetrics: vi.fn(), - stages: { - contextExtraction: { - execute: vi.fn() - }, - semanticContextExtraction: { - execute: vi.fn() - } +vi.mock('./pipeline/chat_pipeline.js', () => { + class ChatPipeline { + config: any; + + constructor(config: any) { + this.config = config; + } + + execute = vi.fn(); + getMetrics = vi.fn(); + resetMetrics = vi.fn(); + stages = { + contextExtraction: { + execute: vi.fn() + }, + semanticContextExtraction: { + execute: vi.fn() } - }); - }) -})); + } + } + return { ChatPipeline }; +}); vi.mock('./ai_service_manager.js', () => ({ default: { diff --git a/apps/server/src/services/llm/context/services/context_service.spec.ts b/apps/server/src/services/llm/context/services/context_service.spec.ts index 486ec7179..66dce8e9f 100644 --- a/apps/server/src/services/llm/context/services/context_service.spec.ts +++ b/apps/server/src/services/llm/context/services/context_service.spec.ts @@ -46,11 +46,12 @@ vi.mock('../../ai_service_manager.js', () => ({ } })); -vi.mock('../index.js', () => ({ - ContextExtractor: vi.fn().mockImplementation(function () { - this.findRelevantNotes = vi.fn().mockResolvedValue([]) - }) -})); +vi.mock('../index.js', () => { + class ContextExtractor { + findRelevantNotes = vi.fn().mockResolvedValue([]) + } + return { ContextExtractor }; +}); describe('ContextService', () => { let service: ContextService; diff --git a/apps/server/src/services/llm/providers/anthropic_service.spec.ts b/apps/server/src/services/llm/providers/anthropic_service.spec.ts index df48c1339..5a4e8b8e3 100644 --- a/apps/server/src/services/llm/providers/anthropic_service.spec.ts +++ b/apps/server/src/services/llm/providers/anthropic_service.spec.ts @@ -48,8 +48,8 @@ vi.mock('@anthropic-ai/sdk', () => { } }; - const mockAnthropic = vi.fn().mockImplementation(function () { - this.messages = { + class MockAnthropic { + messages = { create: vi.fn().mockImplementation((params) => { if (params.stream) { return Promise.resolve(mockStream); @@ -72,9 +72,9 @@ vi.mock('@anthropic-ai/sdk', () => { }); }) }; - }); + } - return { default: mockAnthropic }; + return { default: MockAnthropic }; }); describe('AnthropicService', () => { diff --git a/apps/server/src/services/llm/providers/ollama_service.spec.ts b/apps/server/src/services/llm/providers/ollama_service.spec.ts index 46b8072ea..2d4072742 100644 --- a/apps/server/src/services/llm/providers/ollama_service.spec.ts +++ b/apps/server/src/services/llm/providers/ollama_service.spec.ts @@ -29,12 +29,12 @@ vi.mock('./providers.js', () => ({ getOllamaOptions: vi.fn() })); -vi.mock('../formatters/ollama_formatter.js', () => ({ - OllamaMessageFormatter: vi.fn().mockImplementation(function () { - this.formatMessages = vi.fn().mockReturnValue([ +vi.mock('../formatters/ollama_formatter.js', () => { + class MockFormatter { + formatMessages = vi.fn().mockReturnValue([ { role: 'user', content: 'Hello' } ]); - this.formatResponse = vi.fn().mockReturnValue({ + formatResponse = vi.fn().mockReturnValue({ text: 'Hello! How can I help you today?', provider: 'Ollama', model: 'llama2', @@ -45,8 +45,9 @@ vi.mock('../formatters/ollama_formatter.js', () => ({ }, tool_calls: null }); - }) -})); + } + return { OllamaMessageFormatter: MockFormatter }; +}); vi.mock('../tools/tool_registry.js', () => ({ default: { @@ -83,8 +84,8 @@ vi.mock('ollama', () => { } }; - const mockOllama = vi.fn().mockImplementation(function () { - this.chat = vi.fn().mockImplementation((params) => { + class MockOllama { + chat = vi.fn().mockImplementation((params) => { if (params.stream) { return Promise.resolve(mockStream); } @@ -98,7 +99,7 @@ vi.mock('ollama', () => { done: true }); }); - this.show = vi.fn().mockResolvedValue({ + show = vi.fn().mockResolvedValue({ modelfile: 'FROM llama2', parameters: {}, template: '', @@ -110,7 +111,7 @@ vi.mock('ollama', () => { quantization_level: 'Q4_0' } }); - this.list = vi.fn().mockResolvedValue({ + list = vi.fn().mockResolvedValue({ models: [ { name: 'llama2:latest', @@ -118,10 +119,10 @@ vi.mock('ollama', () => { size: 3800000000 } ] - }) - }); + }); + } - return { Ollama: mockOllama }; + return { Ollama: MockOllama }; }); // Mock global fetch