mirror of
				https://github.com/zadam/trilium.git
				synced 2025-10-31 03:29:02 +01:00 
			
		
		
		
	it errors, but works
This commit is contained in:
		
							parent
							
								
									cf0e9242a0
								
							
						
					
					
						commit
						ef6ecdc42d
					
				| @ -133,8 +133,20 @@ export default class LlmChatPanel extends BasicWidget { | ||||
|         try { | ||||
|             const useAdvancedContext = this.useAdvancedContextCheckbox.checked; | ||||
| 
 | ||||
|             // Setup streaming
 | ||||
|             const source = new EventSource(`./api/llm/messages?sessionId=${this.sessionId}&format=stream`); | ||||
|             // Create the message parameters
 | ||||
|             const messageParams = { | ||||
|                 content, | ||||
|                 contextNoteId: this.currentNoteId, | ||||
|                 useAdvancedContext | ||||
|             }; | ||||
| 
 | ||||
|             // First, send the message via POST request
 | ||||
|             await server.post<any>(`llm/sessions/${this.sessionId}/messages`, messageParams); | ||||
| 
 | ||||
|             // Then set up streaming via EventSource
 | ||||
|             const streamUrl = `./api/llm/sessions/${this.sessionId}/messages?format=stream&useAdvancedContext=${useAdvancedContext}`; | ||||
|             const source = new EventSource(streamUrl); | ||||
| 
 | ||||
|             let assistantResponse = ''; | ||||
| 
 | ||||
|             // Handle streaming response
 | ||||
| @ -171,18 +183,6 @@ export default class LlmChatPanel extends BasicWidget { | ||||
|                 toastService.showError('Error connecting to the LLM service. Please try again.'); | ||||
|             }; | ||||
| 
 | ||||
|             // Send the actual message
 | ||||
|             const response = await server.post<any>('llm/messages', { | ||||
|                 sessionId: this.sessionId, | ||||
|                 content, | ||||
|                 contextNoteId: this.currentNoteId, | ||||
|                 useAdvancedContext | ||||
|             }); | ||||
| 
 | ||||
|             // Handle sources if returned in non-streaming response
 | ||||
|             if (response && response.sources && response.sources.length > 0) { | ||||
|                 this.showSources(response.sources); | ||||
|             } | ||||
|         } catch (error) { | ||||
|             this.hideLoadingIndicator(); | ||||
|             toastService.showError('Error sending message: ' + (error as Error).message); | ||||
|  | ||||
| @ -449,26 +449,57 @@ Now, based on the above notes, please answer: ${query}`; | ||||
|  */ | ||||
| async function sendMessage(req: Request, res: Response) { | ||||
|     try { | ||||
|         // Extract the content from the request body
 | ||||
|         const { content, sessionId, useAdvancedContext = false } = req.body || {}; | ||||
|         // Extract parameters differently based on the request method
 | ||||
|         let content, useAdvancedContext, sessionId; | ||||
| 
 | ||||
|         // Validate the content
 | ||||
|         if (!content || typeof content !== 'string' || content.trim().length === 0) { | ||||
|         if (req.method === 'POST') { | ||||
|             // For POST requests, get content from the request body
 | ||||
|             const requestBody = req.body || {}; | ||||
|             content = requestBody.content; | ||||
|             useAdvancedContext = requestBody.useAdvancedContext || false; | ||||
|         } else if (req.method === 'GET') { | ||||
|             // For GET (streaming) requests, get format from query params
 | ||||
|             // The content should have been sent in a previous POST request
 | ||||
|             useAdvancedContext = req.query.useAdvancedContext === 'true'; | ||||
|             content = ''; // We don't need content for GET requests
 | ||||
|         } | ||||
| 
 | ||||
|         // Get sessionId from URL params since it's part of the route
 | ||||
|         sessionId = req.params.sessionId; | ||||
| 
 | ||||
|         // Get the Accept header once at the start
 | ||||
|         const acceptHeader = req.get('Accept'); | ||||
|         const isStreamingRequest = acceptHeader && acceptHeader.includes('text/event-stream'); | ||||
| 
 | ||||
|         // For GET requests, ensure we have the format=stream parameter
 | ||||
|         if (req.method === 'GET' && (!req.query.format || req.query.format !== 'stream')) { | ||||
|             throw new Error('Stream format parameter is required for GET requests'); | ||||
|         } | ||||
| 
 | ||||
|         // For POST requests, validate the content
 | ||||
|         if (req.method === 'POST' && (!content || typeof content !== 'string' || content.trim().length === 0)) { | ||||
|             throw new Error('Content cannot be empty'); | ||||
|         } | ||||
| 
 | ||||
|         // Get or create the session
 | ||||
|         let session: ChatSession; | ||||
|         // Get session
 | ||||
|         if (!sessionId || !sessions.has(sessionId)) { | ||||
|             throw new Error('Session not found'); | ||||
|         } | ||||
| 
 | ||||
|         if (sessionId && sessions.has(sessionId)) { | ||||
|             session = sessions.get(sessionId)!; | ||||
|             session.lastActive = new Date(); | ||||
|         } else { | ||||
|             const result = await createSession(req, res); | ||||
|             if (!result?.id) { | ||||
|                 throw new Error('Failed to create a new session'); | ||||
|             } | ||||
|             session = sessions.get(result.id)!; | ||||
|         const session = sessions.get(sessionId)!; | ||||
|         session.lastActive = new Date(); | ||||
| 
 | ||||
|         // For POST requests, store the user message
 | ||||
|         if (req.method === 'POST' && content) { | ||||
|             // Add message to session
 | ||||
|             session.messages.push({ | ||||
|                 role: 'user', | ||||
|                 content, | ||||
|                 timestamp: new Date() | ||||
|             }); | ||||
| 
 | ||||
|             // Log a preview of the message
 | ||||
|             log.info(`Processing LLM message: "${content.substring(0, 50)}${content.length > 50 ? '...' : ''}"`); | ||||
|         } | ||||
| 
 | ||||
|         // Check if AI services are available
 | ||||
| @ -495,184 +526,225 @@ async function sendMessage(req: Request, res: Response) { | ||||
|             throw new Error('No AI service is available'); | ||||
|         } | ||||
| 
 | ||||
|         // Create user message
 | ||||
|         const userMessage: Message = { | ||||
|             role: 'user', | ||||
|             content | ||||
|         }; | ||||
| 
 | ||||
|         // Add message to session
 | ||||
|         session.messages.push({ | ||||
|             role: 'user', | ||||
|             content, | ||||
|             timestamp: new Date() | ||||
|         }); | ||||
| 
 | ||||
|         // Log a preview of the message
 | ||||
|         log.info(`Processing LLM message: "${content.substring(0, 50)}${content.length > 50 ? '...' : ''}"`); | ||||
| 
 | ||||
|         // Information to return to the client
 | ||||
|         let aiResponse = ''; | ||||
|         let sourceNotes: NoteSource[] = []; | ||||
| 
 | ||||
|         // If Advanced Context is enabled, we use the improved method
 | ||||
|         if (useAdvancedContext) { | ||||
|             // Use the Trilium-specific approach
 | ||||
|             const contextNoteId = session.noteContext || null; | ||||
|             const results = await triliumContextService.processQuery(content, service, contextNoteId); | ||||
|         // For POST requests, we need to process the message
 | ||||
|         // For GET (streaming) requests, we use the latest user message from the session
 | ||||
|         if (req.method === 'POST' || isStreamingRequest) { | ||||
|             // Get the latest user message for context
 | ||||
|             const latestUserMessage = session.messages | ||||
|                 .filter(msg => msg.role === 'user') | ||||
|                 .pop(); | ||||
| 
 | ||||
|             // Get the generated context
 | ||||
|             const context = results.context; | ||||
|             sourceNotes = results.notes; | ||||
|             if (!latestUserMessage && req.method === 'GET') { | ||||
|                 throw new Error('No user message found in session'); | ||||
|             } | ||||
| 
 | ||||
|             // Add system message with the context
 | ||||
|             const contextMessage: Message = { | ||||
|                 role: 'system', | ||||
|                 content: context | ||||
|             }; | ||||
|             // Use the latest message content for GET requests
 | ||||
|             const messageContent = req.method === 'POST' ? content : latestUserMessage!.content; | ||||
| 
 | ||||
|             // Format all messages for the AI
 | ||||
|             const aiMessages: Message[] = [ | ||||
|                 contextMessage, | ||||
|                 ...session.messages.slice(-10).map(msg => ({ | ||||
|                     role: msg.role, | ||||
|                     content: msg.content | ||||
|                 })) | ||||
|             ]; | ||||
|             // If Advanced Context is enabled, we use the improved method
 | ||||
|             if (useAdvancedContext) { | ||||
|                 // Use the Trilium-specific approach
 | ||||
|                 const contextNoteId = session.noteContext || null; | ||||
|                 const results = await triliumContextService.processQuery(messageContent, service, contextNoteId); | ||||
| 
 | ||||
|             // Configure chat options from session metadata
 | ||||
|             const chatOptions: ChatCompletionOptions = { | ||||
|                 temperature: session.metadata.temperature || 0.7, | ||||
|                 maxTokens: session.metadata.maxTokens, | ||||
|                 model: session.metadata.model | ||||
|                 // 'provider' property has been removed as it's not in the ChatCompletionOptions type
 | ||||
|             }; | ||||
|                 // Get the generated context
 | ||||
|                 const context = results.context; | ||||
|                 sourceNotes = results.notes; | ||||
| 
 | ||||
|             // Get streaming response if requested
 | ||||
|             const acceptHeader = req.get('Accept'); | ||||
|             if (acceptHeader && acceptHeader.includes('text/event-stream')) { | ||||
|                 res.setHeader('Content-Type', 'text/event-stream'); | ||||
|                 res.setHeader('Cache-Control', 'no-cache'); | ||||
|                 res.setHeader('Connection', 'keep-alive'); | ||||
|                 // Add system message with the context
 | ||||
|                 const contextMessage: Message = { | ||||
|                     role: 'system', | ||||
|                     content: context | ||||
|                 }; | ||||
| 
 | ||||
|                 let messageContent = ''; | ||||
|                 // Format all messages for the AI
 | ||||
|                 const aiMessages: Message[] = [ | ||||
|                     contextMessage, | ||||
|                     ...session.messages.slice(-10).map(msg => ({ | ||||
|                         role: msg.role, | ||||
|                         content: msg.content | ||||
|                     })) | ||||
|                 ]; | ||||
| 
 | ||||
|                 // Stream the response
 | ||||
|                 await service.sendChatCompletion( | ||||
|                     aiMessages, | ||||
|                     chatOptions, | ||||
|                     (chunk: string) => { | ||||
|                         messageContent += chunk; | ||||
|                         res.write(`data: ${JSON.stringify({ content: chunk })}\n\n`); | ||||
|                 // Configure chat options from session metadata
 | ||||
|                 const chatOptions: ChatCompletionOptions = { | ||||
|                     temperature: session.metadata.temperature || 0.7, | ||||
|                     maxTokens: session.metadata.maxTokens, | ||||
|                     model: session.metadata.model, | ||||
|                     stream: isStreamingRequest ? true : undefined | ||||
|                 }; | ||||
| 
 | ||||
|                 // Process based on whether this is a streaming request
 | ||||
|                 if (isStreamingRequest) { | ||||
|                     res.setHeader('Content-Type', 'text/event-stream'); | ||||
|                     res.setHeader('Cache-Control', 'no-cache'); | ||||
|                     res.setHeader('Connection', 'keep-alive'); | ||||
| 
 | ||||
|                     let messageContent = ''; | ||||
| 
 | ||||
|                     // Use the correct method name: generateChatCompletion
 | ||||
|                     const response = await service.generateChatCompletion(aiMessages, chatOptions); | ||||
| 
 | ||||
|                     // Handle streaming if the response includes a stream method
 | ||||
|                     if (response.stream) { | ||||
|                         await response.stream((chunk: { text: string; done: boolean }) => { | ||||
|                             if (chunk.text) { | ||||
|                                 messageContent += chunk.text; | ||||
|                                 res.write(`data: ${JSON.stringify({ content: chunk.text })}\n\n`); | ||||
|                             } | ||||
| 
 | ||||
|                             if (chunk.done) { | ||||
|                                 // Signal the end of the stream when done
 | ||||
|                                 res.write('data: [DONE]\n\n'); | ||||
|                                 res.end(); | ||||
|                             } | ||||
|                         }); | ||||
|                     } else { | ||||
|                         // If no streaming available, send the response as a single chunk
 | ||||
|                         messageContent = response.text; | ||||
|                         res.write(`data: ${JSON.stringify({ content: messageContent })}\n\n`); | ||||
|                         res.write('data: [DONE]\n\n'); | ||||
|                         res.end(); | ||||
|                     } | ||||
| 
 | ||||
|                     // Store the full response for the session
 | ||||
|                     aiResponse = messageContent; | ||||
| 
 | ||||
|                     // Store the assistant's response in the session
 | ||||
|                     session.messages.push({ | ||||
|                         role: 'assistant', | ||||
|                         content: aiResponse, | ||||
|                         timestamp: new Date() | ||||
|                     }); | ||||
|                 } else { | ||||
|                     // Non-streaming approach for POST requests
 | ||||
|                     const response = await service.generateChatCompletion(aiMessages, chatOptions); | ||||
|                     aiResponse = response.text; // Extract the text from the response
 | ||||
| 
 | ||||
|                     // Store the assistant's response in the session
 | ||||
|                     session.messages.push({ | ||||
|                         role: 'assistant', | ||||
|                         content: aiResponse, | ||||
|                         timestamp: new Date() | ||||
|                     }); | ||||
| 
 | ||||
|                     // Return the response for POST requests
 | ||||
|                     return { | ||||
|                         content: aiResponse, | ||||
|                         sources: sourceNotes.map(note => ({ | ||||
|                             noteId: note.noteId, | ||||
|                             title: note.title, | ||||
|                             similarity: note.similarity, | ||||
|                             branchId: note.branchId | ||||
|                         })) | ||||
|                     }; | ||||
|                 } | ||||
|             } else { | ||||
|                 // Original approach - find relevant notes through direct embedding comparison
 | ||||
|                 const relevantNotes = await findRelevantNotes( | ||||
|                     content, | ||||
|                     session.noteContext || null, | ||||
|                     5 | ||||
|                 ); | ||||
| 
 | ||||
|                 // Close the stream
 | ||||
|                 res.write('data: [DONE]\n\n'); | ||||
|                 res.end(); | ||||
|                 sourceNotes = relevantNotes; | ||||
| 
 | ||||
|                 // Store the full response
 | ||||
|                 aiResponse = messageContent; | ||||
|             } else { | ||||
|                 // Non-streaming approach
 | ||||
|                 aiResponse = await service.sendChatCompletion(aiMessages, chatOptions); | ||||
|             } | ||||
|         } else { | ||||
|             // Original approach - find relevant notes through direct embedding comparison
 | ||||
|             const relevantNotes = await findRelevantNotes( | ||||
|                 content, | ||||
|                 session.noteContext || null, | ||||
|                 5 | ||||
|             ); | ||||
|                 // Build context from relevant notes
 | ||||
|                 const context = buildContextFromNotes(relevantNotes, content); | ||||
| 
 | ||||
|             sourceNotes = relevantNotes; | ||||
|                 // Add system message with the context
 | ||||
|                 const contextMessage: Message = { | ||||
|                     role: 'system', | ||||
|                     content: context | ||||
|                 }; | ||||
| 
 | ||||
|             // Build context from relevant notes
 | ||||
|             const context = buildContextFromNotes(relevantNotes, content); | ||||
|                 // Format all messages for the AI
 | ||||
|                 const aiMessages: Message[] = [ | ||||
|                     contextMessage, | ||||
|                     ...session.messages.slice(-10).map(msg => ({ | ||||
|                         role: msg.role, | ||||
|                         content: msg.content | ||||
|                     })) | ||||
|                 ]; | ||||
| 
 | ||||
|             // Add system message with the context
 | ||||
|             const contextMessage: Message = { | ||||
|                 role: 'system', | ||||
|                 content: context | ||||
|             }; | ||||
|                 // Configure chat options from session metadata
 | ||||
|                 const chatOptions: ChatCompletionOptions = { | ||||
|                     temperature: session.metadata.temperature || 0.7, | ||||
|                     maxTokens: session.metadata.maxTokens, | ||||
|                     model: session.metadata.model, | ||||
|                     stream: isStreamingRequest ? true : undefined | ||||
|                 }; | ||||
| 
 | ||||
|             // Format all messages for the AI
 | ||||
|             const aiMessages: Message[] = [ | ||||
|                 contextMessage, | ||||
|                 ...session.messages.slice(-10).map(msg => ({ | ||||
|                     role: msg.role, | ||||
|                     content: msg.content | ||||
|                 })) | ||||
|             ]; | ||||
|                 if (isStreamingRequest) { | ||||
|                     res.setHeader('Content-Type', 'text/event-stream'); | ||||
|                     res.setHeader('Cache-Control', 'no-cache'); | ||||
|                     res.setHeader('Connection', 'keep-alive'); | ||||
| 
 | ||||
|             // Configure chat options from session metadata
 | ||||
|             const chatOptions: ChatCompletionOptions = { | ||||
|                 temperature: session.metadata.temperature || 0.7, | ||||
|                 maxTokens: session.metadata.maxTokens, | ||||
|                 model: session.metadata.model | ||||
|                 // 'provider' property has been removed as it's not in the ChatCompletionOptions type
 | ||||
|             }; | ||||
|                     let messageContent = ''; | ||||
| 
 | ||||
|             // Get streaming response if requested
 | ||||
|             const acceptHeader = req.get('Accept'); | ||||
|             if (acceptHeader && acceptHeader.includes('text/event-stream')) { | ||||
|                 res.setHeader('Content-Type', 'text/event-stream'); | ||||
|                 res.setHeader('Cache-Control', 'no-cache'); | ||||
|                 res.setHeader('Connection', 'keep-alive'); | ||||
|                     // Use the correct method name: generateChatCompletion
 | ||||
|                     const response = await service.generateChatCompletion(aiMessages, chatOptions); | ||||
| 
 | ||||
|                 let messageContent = ''; | ||||
|                     // Handle streaming if the response includes a stream method
 | ||||
|                     if (response.stream) { | ||||
|                         await response.stream((chunk: { text: string; done: boolean }) => { | ||||
|                             if (chunk.text) { | ||||
|                                 messageContent += chunk.text; | ||||
|                                 res.write(`data: ${JSON.stringify({ content: chunk.text })}\n\n`); | ||||
|                             } | ||||
| 
 | ||||
|                 // Stream the response
 | ||||
|                 await service.sendChatCompletion( | ||||
|                     aiMessages, | ||||
|                     chatOptions, | ||||
|                     (chunk: string) => { | ||||
|                         messageContent += chunk; | ||||
|                         res.write(`data: ${JSON.stringify({ content: chunk })}\n\n`); | ||||
|                             if (chunk.done) { | ||||
|                                 // Signal the end of the stream when done
 | ||||
|                                 res.write('data: [DONE]\n\n'); | ||||
|                                 res.end(); | ||||
|                             } | ||||
|                         }); | ||||
|                     } else { | ||||
|                         // If no streaming available, send the response as a single chunk
 | ||||
|                         messageContent = response.text; | ||||
|                         res.write(`data: ${JSON.stringify({ content: messageContent })}\n\n`); | ||||
|                         res.write('data: [DONE]\n\n'); | ||||
|                         res.end(); | ||||
|                     } | ||||
|                 ); | ||||
| 
 | ||||
|                 // Close the stream
 | ||||
|                 res.write('data: [DONE]\n\n'); | ||||
|                 res.end(); | ||||
|                     // Store the full response for the session
 | ||||
|                     aiResponse = messageContent; | ||||
| 
 | ||||
|                 // Store the full response
 | ||||
|                 aiResponse = messageContent; | ||||
|             } else { | ||||
|                 // Non-streaming approach
 | ||||
|                 aiResponse = await service.sendChatCompletion(aiMessages, chatOptions); | ||||
|                     // Store the assistant's response in the session
 | ||||
|                     session.messages.push({ | ||||
|                         role: 'assistant', | ||||
|                         content: aiResponse, | ||||
|                         timestamp: new Date() | ||||
|                     }); | ||||
|                 } else { | ||||
|                     // Non-streaming approach for POST requests
 | ||||
|                     const response = await service.generateChatCompletion(aiMessages, chatOptions); | ||||
|                     aiResponse = response.text; // Extract the text from the response
 | ||||
| 
 | ||||
|                     // Store the assistant's response in the session
 | ||||
|                     session.messages.push({ | ||||
|                         role: 'assistant', | ||||
|                         content: aiResponse, | ||||
|                         timestamp: new Date() | ||||
|                     }); | ||||
| 
 | ||||
|                     // Return the response for POST requests
 | ||||
|                     return { | ||||
|                         content: aiResponse, | ||||
|                         sources: sourceNotes.map(note => ({ | ||||
|                             noteId: note.noteId, | ||||
|                             title: note.title, | ||||
|                             similarity: note.similarity, | ||||
|                             branchId: note.branchId | ||||
|                         })) | ||||
|                     }; | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         // Only store the assistant's message if we're not streaming (otherwise we already did)
 | ||||
|         const acceptHeader = req.get('Accept'); | ||||
|         if (!acceptHeader || !acceptHeader.includes('text/event-stream')) { | ||||
|             // Store the assistant's response in the session
 | ||||
|             session.messages.push({ | ||||
|                 role: 'assistant', | ||||
|                 content: aiResponse, | ||||
|                 timestamp: new Date() | ||||
|             }); | ||||
| 
 | ||||
|             // Return the response
 | ||||
|             return { | ||||
|                 content: aiResponse, | ||||
|                 sources: sourceNotes.map(note => ({ | ||||
|                     noteId: note.noteId, | ||||
|                     title: note.title, | ||||
|                     similarity: note.similarity, | ||||
|                     branchId: note.branchId | ||||
|                 })) | ||||
|             }; | ||||
|         } else { | ||||
|             // For streaming responses, we've already sent the data
 | ||||
|             // But we still need to add the message to the session
 | ||||
|             session.messages.push({ | ||||
|                 role: 'assistant', | ||||
|                 content: aiResponse, | ||||
|                 timestamp: new Date() | ||||
|             }); | ||||
|         } | ||||
|     } catch (error: any) { | ||||
|         log.error(`Error sending message to LLM: ${error.message}`); | ||||
|         throw new Error(`Failed to send message: ${error.message}`); | ||||
|  | ||||
| @ -387,6 +387,7 @@ function register(app: express.Application) { | ||||
|     apiRoute(PATCH, "/api/llm/sessions/:sessionId", llmRoute.updateSession); | ||||
|     apiRoute(DEL, "/api/llm/sessions/:sessionId", llmRoute.deleteSession); | ||||
|     apiRoute(PST, "/api/llm/sessions/:sessionId/messages", llmRoute.sendMessage); | ||||
|     route(GET, "/api/llm/sessions/:sessionId/messages", [auth.checkApiAuth, csrfMiddleware], llmRoute.sendMessage, apiResultHandler); | ||||
| 
 | ||||
|     // Ollama API endpoints
 | ||||
|     route(PST, "/api/ollama/list-models", [auth.checkApiAuth, csrfMiddleware], ollamaRoute.listModels, apiResultHandler); | ||||
|  | ||||
| @ -40,7 +40,20 @@ export class AIServiceManager { | ||||
| 
 | ||||
|             if (customOrder) { | ||||
|                 try { | ||||
|                     const parsed = JSON.parse(customOrder); | ||||
|                     // Try to parse as JSON first
 | ||||
|                     let parsed; | ||||
| 
 | ||||
|                     // Handle both array in JSON format and simple string format
 | ||||
|                     if (customOrder.startsWith('[') && customOrder.endsWith(']')) { | ||||
|                         parsed = JSON.parse(customOrder); | ||||
|                     } else if (typeof customOrder === 'string') { | ||||
|                         // If it's a simple string (like "ollama"), convert to single-item array
 | ||||
|                         parsed = [customOrder]; | ||||
|                     } else { | ||||
|                         // Fallback to default
 | ||||
|                         parsed = defaultOrder; | ||||
|                     } | ||||
| 
 | ||||
|                     // Validate that all providers are valid
 | ||||
|                     if (Array.isArray(parsed) && | ||||
|                         parsed.every(p => Object.keys(this.services).includes(p))) { | ||||
|  | ||||
| @ -102,12 +102,13 @@ Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`; | ||||
|                 maxTokens: 300 | ||||
|             }; | ||||
| 
 | ||||
|             // Get the response from the LLM
 | ||||
|             const response = await llmService.sendTextCompletion(messages, options); | ||||
|             // Get the response from the LLM using the correct method name
 | ||||
|             const response = await llmService.generateChatCompletion(messages, options); | ||||
|             const responseText = response.text; // Extract the text from the response object
 | ||||
| 
 | ||||
|             try { | ||||
|                 // Parse the JSON response
 | ||||
|                 const jsonStr = response.trim().replace(/```json|```/g, '').trim(); | ||||
|                 const jsonStr = responseText.trim().replace(/```json|```/g, '').trim(); | ||||
|                 const queries = JSON.parse(jsonStr); | ||||
| 
 | ||||
|                 if (Array.isArray(queries) && queries.length > 0) { | ||||
| @ -117,7 +118,7 @@ Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`; | ||||
|                 } | ||||
|             } catch (parseError) { | ||||
|                 // Fallback: if JSON parsing fails, try to extract queries line by line
 | ||||
|                 const lines = response.split('\n') | ||||
|                 const lines = responseText.split('\n') | ||||
|                     .map((line: string) => line.trim()) | ||||
|                     .filter((line: string) => line.length > 0 && !line.startsWith('```')); | ||||
| 
 | ||||
| @ -176,8 +177,8 @@ Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`; | ||||
| 
 | ||||
|             // Process each query
 | ||||
|             for (const query of queries) { | ||||
|                 // Get embeddings for this query
 | ||||
|                 const queryEmbedding = await this.provider.getEmbedding(query); | ||||
|                 // Get embeddings for this query using the correct method name
 | ||||
|                 const queryEmbedding = await this.provider.generateEmbeddings(query); | ||||
| 
 | ||||
|                 // Find notes similar to this query
 | ||||
|                 let results; | ||||
| @ -192,8 +193,8 @@ Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`; | ||||
|                     // Search all notes
 | ||||
|                     results = await vectorStore.findSimilarNotes( | ||||
|                         queryEmbedding, | ||||
|                         this.provider.id, | ||||
|                         this.provider.modelId, | ||||
|                         this.provider.name, // Use name property instead of id
 | ||||
|                         this.provider.getConfig().model, // Use getConfig().model instead of modelId
 | ||||
|                         Math.min(limit, 5), // Limit per query
 | ||||
|                         0.5 // Lower threshold to get more diverse results
 | ||||
|                     ); | ||||
| @ -265,8 +266,8 @@ Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`; | ||||
|             for (const noteId of subtreeNoteIds) { | ||||
|                 const noteEmbedding = await vectorStore.getEmbeddingForNote( | ||||
|                     noteId, | ||||
|                     this.provider.id, | ||||
|                     this.provider.modelId | ||||
|                     this.provider.name, // Use name property instead of id
 | ||||
|                     this.provider.getConfig().model // Use getConfig().model instead of modelId
 | ||||
|                 ); | ||||
| 
 | ||||
|                 if (noteEmbedding) { | ||||
| @ -338,7 +339,10 @@ Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`; | ||||
|      */ | ||||
|     buildContextFromNotes(sources: any[], query: string): string { | ||||
|         if (!sources || sources.length === 0) { | ||||
|             return ""; | ||||
|             // Return a default context instead of empty string
 | ||||
|             return "I am an AI assistant helping you with your Trilium notes. " + | ||||
|                    "I couldn't find any specific notes related to your query, but I'll try to assist you " + | ||||
|                    "with general knowledge about Trilium or other topics you're interested in."; | ||||
|         } | ||||
| 
 | ||||
|         let context = `The following are relevant notes from your knowledge base that may help answer the query: "${query}"\n\n`; | ||||
| @ -382,28 +386,62 @@ Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`; | ||||
|      */ | ||||
|     async processQuery(userQuestion: string, llmService: any, contextNoteId: string | null = null) { | ||||
|         if (!this.initialized) { | ||||
|             await this.initialize(); | ||||
|             try { | ||||
|                 await this.initialize(); | ||||
|             } catch (error) { | ||||
|                 log.error(`Failed to initialize TriliumContextService: ${error}`); | ||||
|                 // Return a fallback response if initialization fails
 | ||||
|                 return { | ||||
|                     context: "I am an AI assistant helping you with your Trilium notes. " + | ||||
|                              "I'll try to assist you with general knowledge about your query.", | ||||
|                     notes: [], | ||||
|                     queries: [userQuestion] | ||||
|                 }; | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         // Step 1: Generate search queries
 | ||||
|         const searchQueries = await this.generateSearchQueries(userQuestion, llmService); | ||||
|         log.info(`Generated search queries: ${JSON.stringify(searchQueries)}`); | ||||
|         try { | ||||
|             // Step 1: Generate search queries
 | ||||
|             let searchQueries: string[]; | ||||
|             try { | ||||
|                 searchQueries = await this.generateSearchQueries(userQuestion, llmService); | ||||
|             } catch (error) { | ||||
|                 log.error(`Error generating search queries, using fallback: ${error}`); | ||||
|                 searchQueries = [userQuestion]; // Fallback to using the original question
 | ||||
|             } | ||||
|             log.info(`Generated search queries: ${JSON.stringify(searchQueries)}`); | ||||
| 
 | ||||
|         // Step 2: Find relevant notes using those queries
 | ||||
|         const relevantNotes = await this.findRelevantNotesMultiQuery( | ||||
|             searchQueries, | ||||
|             contextNoteId, | ||||
|             8 // Get more notes since we're using multiple queries
 | ||||
|         ); | ||||
|             // Step 2: Find relevant notes using those queries
 | ||||
|             let relevantNotes: any[] = []; | ||||
|             try { | ||||
|                 relevantNotes = await this.findRelevantNotesMultiQuery( | ||||
|                     searchQueries, | ||||
|                     contextNoteId, | ||||
|                     8 // Get more notes since we're using multiple queries
 | ||||
|                 ); | ||||
|             } catch (error) { | ||||
|                 log.error(`Error finding relevant notes: ${error}`); | ||||
|                 // Continue with empty notes list
 | ||||
|             } | ||||
| 
 | ||||
|         // Step 3: Build context from the notes
 | ||||
|         const context = this.buildContextFromNotes(relevantNotes, userQuestion); | ||||
|             // Step 3: Build context from the notes
 | ||||
|             const context = this.buildContextFromNotes(relevantNotes, userQuestion); | ||||
| 
 | ||||
|         return { | ||||
|             context, | ||||
|             notes: relevantNotes, | ||||
|             queries: searchQueries | ||||
|         }; | ||||
|             return { | ||||
|                 context, | ||||
|                 notes: relevantNotes, | ||||
|                 queries: searchQueries | ||||
|             }; | ||||
|         } catch (error) { | ||||
|             log.error(`Error in processQuery: ${error}`); | ||||
|             // Return a fallback response if anything fails
 | ||||
|             return { | ||||
|                 context: "I am an AI assistant helping you with your Trilium notes. " + | ||||
|                          "I encountered an error while processing your query, but I'll try to assist you anyway.", | ||||
|                 notes: [], | ||||
|                 queries: [userQuestion] | ||||
|             }; | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
|  | ||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user
	 perf3ct
						perf3ct