mirror of
https://github.com/zadam/trilium.git
synced 2025-11-21 16:14:23 +01:00
This commit is contained in:
parent
fa30bfc04b
commit
35f244cf50
@ -52,9 +52,9 @@ vi.mock("../../services/llm/ai_service_manager.js", () => ({
|
||||
|
||||
// Mock chat pipeline
|
||||
const mockChatPipelineExecute = vi.fn();
|
||||
const MockChatPipeline = vi.fn().mockImplementation(() => ({
|
||||
execute: mockChatPipelineExecute
|
||||
}));
|
||||
const MockChatPipeline = vi.fn().mockImplementation(function () {
|
||||
this.execute = mockChatPipelineExecute;
|
||||
});
|
||||
vi.mock("../../services/llm/pipeline/chat_pipeline.js", () => ({
|
||||
ChatPipeline: MockChatPipeline
|
||||
}));
|
||||
|
||||
@ -35,24 +35,24 @@ vi.mock('../log.js', () => ({
|
||||
}));
|
||||
|
||||
vi.mock('./providers/anthropic_service.js', () => ({
|
||||
AnthropicService: vi.fn().mockImplementation(() => ({
|
||||
isAvailable: vi.fn().mockReturnValue(true),
|
||||
generateChatCompletion: vi.fn()
|
||||
}))
|
||||
AnthropicService: vi.fn().mockImplementation(function () {
|
||||
this.isAvailable = vi.fn().mockReturnValue(true);
|
||||
this.generateChatCompletion = vi.fn();
|
||||
})
|
||||
}));
|
||||
|
||||
vi.mock('./providers/openai_service.js', () => ({
|
||||
OpenAIService: vi.fn().mockImplementation(() => ({
|
||||
isAvailable: vi.fn().mockReturnValue(true),
|
||||
generateChatCompletion: vi.fn()
|
||||
}))
|
||||
OpenAIService: vi.fn().mockImplementation(function () {
|
||||
this.isAvailable = vi.fn().mockReturnValue(true);
|
||||
this.generateChatCompletion = vi.fn();
|
||||
};
|
||||
}));
|
||||
|
||||
vi.mock('./providers/ollama_service.js', () => ({
|
||||
OllamaService: vi.fn().mockImplementation(() => ({
|
||||
isAvailable: vi.fn().mockReturnValue(true),
|
||||
generateChatCompletion: vi.fn()
|
||||
}))
|
||||
OllamaService: vi.fn().mockImplementation(function () {
|
||||
this.isAvailable = vi.fn().mockReturnValue(true);
|
||||
this.generateChatCompletion = vi.fn();
|
||||
})
|
||||
}));
|
||||
|
||||
vi.mock('./config/configuration_helpers.js', () => ({
|
||||
@ -65,7 +65,7 @@ vi.mock('./config/configuration_helpers.js', () => ({
|
||||
}));
|
||||
|
||||
vi.mock('./context/index.js', () => ({
|
||||
ContextExtractor: vi.fn().mockImplementation(() => ({}))
|
||||
ContextExtractor: vi.fn().mockImplementation(function () {})
|
||||
}));
|
||||
|
||||
vi.mock('./context_extractors/index.js', () => ({
|
||||
|
||||
@ -39,9 +39,9 @@ vi.mock('../pipeline/chat_pipeline.js', () => ({
|
||||
}));
|
||||
|
||||
vi.mock('./handlers/tool_handler.js', () => ({
|
||||
ToolHandler: vi.fn().mockImplementation(() => ({
|
||||
handleToolCalls: vi.fn()
|
||||
}))
|
||||
ToolHandler: vi.fn().mockImplementation(function () {
|
||||
this.handleToolCalls = vi.fn()
|
||||
})
|
||||
}));
|
||||
|
||||
vi.mock('../chat_storage_service.js', () => ({
|
||||
|
||||
@ -36,20 +36,22 @@ vi.mock('./constants/llm_prompt_constants.js', () => ({
|
||||
}));
|
||||
|
||||
vi.mock('./pipeline/chat_pipeline.js', () => ({
|
||||
ChatPipeline: vi.fn().mockImplementation((config) => ({
|
||||
config,
|
||||
execute: vi.fn(),
|
||||
getMetrics: vi.fn(),
|
||||
resetMetrics: vi.fn(),
|
||||
stages: {
|
||||
contextExtraction: {
|
||||
execute: vi.fn()
|
||||
},
|
||||
semanticContextExtraction: {
|
||||
execute: vi.fn()
|
||||
ChatPipeline: vi.fn().mockImplementation(function (config) {
|
||||
Object.assign(this, {
|
||||
config,
|
||||
execute: vi.fn(),
|
||||
getMetrics: vi.fn(),
|
||||
resetMetrics: vi.fn(),
|
||||
stages: {
|
||||
contextExtraction: {
|
||||
execute: vi.fn()
|
||||
},
|
||||
semanticContextExtraction: {
|
||||
execute: vi.fn()
|
||||
}
|
||||
}
|
||||
}
|
||||
}))
|
||||
});
|
||||
});
|
||||
}));
|
||||
|
||||
vi.mock('./ai_service_manager.js', () => ({
|
||||
@ -67,12 +69,12 @@ describe('ChatService', () => {
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
|
||||
// Get mocked modules
|
||||
mockChatStorageService = (await import('./chat_storage_service.js')).default;
|
||||
mockAiServiceManager = (await import('./ai_service_manager.js')).default;
|
||||
mockLog = (await import('../log.js')).default;
|
||||
|
||||
|
||||
// Setup pipeline mock
|
||||
mockChatPipeline = {
|
||||
execute: vi.fn(),
|
||||
@ -87,10 +89,10 @@ describe('ChatService', () => {
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// Create a new ChatService instance
|
||||
chatService = new ChatService();
|
||||
|
||||
|
||||
// Replace the internal pipelines with our mock
|
||||
(chatService as any).pipelines.set('default', mockChatPipeline);
|
||||
(chatService as any).pipelines.set('agent', mockChatPipeline);
|
||||
@ -228,7 +230,7 @@ describe('ChatService', () => {
|
||||
|
||||
it('should create new session if not found', async () => {
|
||||
mockChatStorageService.getChat.mockResolvedValueOnce(null);
|
||||
|
||||
|
||||
const mockNewChat = {
|
||||
id: 'chat-new',
|
||||
title: 'New Chat',
|
||||
@ -301,7 +303,7 @@ describe('ChatService', () => {
|
||||
|
||||
mockChatStorageService.getChat.mockResolvedValue(mockChat);
|
||||
mockChatStorageService.updateChat.mockResolvedValue(mockChat);
|
||||
|
||||
|
||||
mockChatPipeline.execute.mockResolvedValue({
|
||||
text: 'Hello! How can I help you?',
|
||||
model: 'gpt-3.5-turbo',
|
||||
@ -435,7 +437,7 @@ describe('ChatService', () => {
|
||||
|
||||
mockChatStorageService.getChat.mockResolvedValue(mockChat);
|
||||
mockChatStorageService.updateChat.mockResolvedValue(mockChat);
|
||||
|
||||
|
||||
mockChatPipeline.execute.mockResolvedValue({
|
||||
text: 'Based on the context, here is my response.',
|
||||
model: 'gpt-4',
|
||||
@ -841,7 +843,7 @@ describe('ChatService', () => {
|
||||
|
||||
it('should return default title for empty or invalid messages', () => {
|
||||
const generateTitle = (chatService as any).generateTitleFromMessages.bind(chatService);
|
||||
|
||||
|
||||
expect(generateTitle([])).toBe('New Chat');
|
||||
expect(generateTitle([{ role: 'assistant', content: 'Hello' }])).toBe('New Chat');
|
||||
});
|
||||
@ -858,4 +860,4 @@ describe('ChatService', () => {
|
||||
expect(title).toBe('First line');
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@ -47,9 +47,9 @@ vi.mock('../../ai_service_manager.js', () => ({
|
||||
}));
|
||||
|
||||
vi.mock('../index.js', () => ({
|
||||
ContextExtractor: vi.fn().mockImplementation(() => ({
|
||||
findRelevantNotes: vi.fn().mockResolvedValue([])
|
||||
}))
|
||||
ContextExtractor: vi.fn().mockImplementation(function () {
|
||||
this.findRelevantNotes = vi.fn().mockResolvedValue([])
|
||||
});
|
||||
}));
|
||||
|
||||
describe('ContextService', () => {
|
||||
@ -59,7 +59,7 @@ describe('ContextService', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
service = new ContextService();
|
||||
|
||||
|
||||
mockLLMService = {
|
||||
generateChatCompletion: vi.fn().mockResolvedValue({
|
||||
content: 'Mock LLM response',
|
||||
@ -84,7 +84,7 @@ describe('ContextService', () => {
|
||||
describe('initialize', () => {
|
||||
it('should initialize successfully', async () => {
|
||||
const result = await service.initialize();
|
||||
|
||||
|
||||
expect(result).toBeUndefined(); // initialize returns void
|
||||
expect((service as any).initialized).toBe(true);
|
||||
});
|
||||
@ -92,7 +92,7 @@ describe('ContextService', () => {
|
||||
it('should not initialize twice', async () => {
|
||||
await service.initialize();
|
||||
await service.initialize(); // Second call should be a no-op
|
||||
|
||||
|
||||
expect((service as any).initialized).toBe(true);
|
||||
});
|
||||
|
||||
@ -102,9 +102,9 @@ describe('ContextService', () => {
|
||||
service.initialize(),
|
||||
service.initialize()
|
||||
];
|
||||
|
||||
|
||||
await Promise.all(promises);
|
||||
|
||||
|
||||
expect((service as any).initialized).toBe(true);
|
||||
});
|
||||
});
|
||||
@ -186,11 +186,11 @@ describe('ContextService', () => {
|
||||
describe('error handling', () => {
|
||||
it('should handle service operations', async () => {
|
||||
await service.initialize();
|
||||
|
||||
|
||||
// These operations should not throw
|
||||
const result1 = await service.processQuery('test', mockLLMService);
|
||||
const result2 = await service.findRelevantNotes('test', null, {});
|
||||
|
||||
|
||||
expect(result1).toBeDefined();
|
||||
expect(result2).toBeDefined();
|
||||
});
|
||||
@ -224,4 +224,4 @@ describe('ContextService', () => {
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@ -48,8 +48,8 @@ vi.mock('@anthropic-ai/sdk', () => {
|
||||
}
|
||||
};
|
||||
|
||||
const mockAnthropic = vi.fn().mockImplementation(() => ({
|
||||
messages: {
|
||||
const mockAnthropic = vi.fn().mockImplementation(function () {
|
||||
this.messages = {
|
||||
create: vi.fn().mockImplementation((params) => {
|
||||
if (params.stream) {
|
||||
return Promise.resolve(mockStream);
|
||||
@ -71,8 +71,8 @@ vi.mock('@anthropic-ai/sdk', () => {
|
||||
}
|
||||
});
|
||||
})
|
||||
}
|
||||
}));
|
||||
};
|
||||
});
|
||||
|
||||
return { default: mockAnthropic };
|
||||
});
|
||||
@ -127,7 +127,9 @@ describe('AnthropicService', () => {
|
||||
}
|
||||
};
|
||||
|
||||
AnthropicMock.mockImplementation(() => mockAnthropicInstance);
|
||||
AnthropicMock.mockImplementation(function () {
|
||||
Object.assign(this, mockAnthropicInstance);
|
||||
});
|
||||
|
||||
service = new AnthropicService();
|
||||
});
|
||||
|
||||
@ -30,11 +30,11 @@ vi.mock('./providers.js', () => ({
|
||||
}));
|
||||
|
||||
vi.mock('../formatters/ollama_formatter.js', () => ({
|
||||
OllamaMessageFormatter: vi.fn().mockImplementation(() => ({
|
||||
formatMessages: vi.fn().mockReturnValue([
|
||||
OllamaMessageFormatter: vi.fn().mockImplementation(function () {
|
||||
this.formatMessages = vi.fn().mockReturnValue([
|
||||
{ role: 'user', content: 'Hello' }
|
||||
]),
|
||||
formatResponse: vi.fn().mockReturnValue({
|
||||
]);
|
||||
this.formatResponse = vi.fn().mockReturnValue({
|
||||
text: 'Hello! How can I help you today?',
|
||||
provider: 'Ollama',
|
||||
model: 'llama2',
|
||||
@ -44,8 +44,8 @@ vi.mock('../formatters/ollama_formatter.js', () => ({
|
||||
totalTokens: 15
|
||||
},
|
||||
tool_calls: null
|
||||
})
|
||||
}))
|
||||
});
|
||||
})
|
||||
}));
|
||||
|
||||
vi.mock('../tools/tool_registry.js', () => ({
|
||||
@ -83,8 +83,8 @@ vi.mock('ollama', () => {
|
||||
}
|
||||
};
|
||||
|
||||
const mockOllama = vi.fn().mockImplementation(() => ({
|
||||
chat: vi.fn().mockImplementation((params) => {
|
||||
const mockOllama = vi.fn().mockImplementation(function () {
|
||||
this.chat = vi.fn().mockImplementation((params) => {
|
||||
if (params.stream) {
|
||||
return Promise.resolve(mockStream);
|
||||
}
|
||||
@ -97,8 +97,8 @@ vi.mock('ollama', () => {
|
||||
model: 'llama2',
|
||||
done: true
|
||||
});
|
||||
}),
|
||||
show: vi.fn().mockResolvedValue({
|
||||
});
|
||||
this.show = vi.fn().mockResolvedValue({
|
||||
modelfile: 'FROM llama2',
|
||||
parameters: {},
|
||||
template: '',
|
||||
@ -109,8 +109,8 @@ vi.mock('ollama', () => {
|
||||
parameter_size: '7B',
|
||||
quantization_level: 'Q4_0'
|
||||
}
|
||||
}),
|
||||
list: vi.fn().mockResolvedValue({
|
||||
});
|
||||
this.list = vi.fn().mockResolvedValue({
|
||||
models: [
|
||||
{
|
||||
name: 'llama2:latest',
|
||||
@ -119,7 +119,7 @@ vi.mock('ollama', () => {
|
||||
}
|
||||
]
|
||||
})
|
||||
}));
|
||||
});
|
||||
|
||||
return { Ollama: mockOllama };
|
||||
});
|
||||
@ -196,7 +196,9 @@ describe('OllamaService', () => {
|
||||
})
|
||||
};
|
||||
|
||||
OllamaMock.mockImplementation(() => mockOllamaInstance);
|
||||
OllamaMock.mockImplementation(function () {
|
||||
Object.assign(this, mockOllamaInstance);
|
||||
});
|
||||
|
||||
service = new OllamaService();
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user