mirror of
				https://github.com/zadam/trilium.git
				synced 2025-11-04 05:28:59 +01:00 
			
		
		
		
	feat(llm): also update OpenAI tool usage prompts
This commit is contained in:
		
							parent
							
								
									8f8b9d9e3b
								
							
						
					
					
						commit
						2f303b1ae9
					
				@ -184,6 +184,22 @@ When responding:
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
        INSTRUCTIONS_WRAPPER: (instructions: string) =>
 | 
					        INSTRUCTIONS_WRAPPER: (instructions: string) =>
 | 
				
			||||||
            `<instructions>\n${instructions}\n</instructions>`,
 | 
					            `<instructions>\n${instructions}\n</instructions>`,
 | 
				
			||||||
 | 
					            
 | 
				
			||||||
 | 
					        // Tool instructions for Anthropic Claude
 | 
				
			||||||
 | 
					        TOOL_INSTRUCTIONS: `<instructions>
 | 
				
			||||||
 | 
					When using tools to search for information, follow these requirements:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					1. ALWAYS TRY MULTIPLE SEARCH APPROACHES before concluding information isn't available
 | 
				
			||||||
 | 
					2. YOU MUST PERFORM AT LEAST 3 DIFFERENT SEARCHES with varied parameters before giving up
 | 
				
			||||||
 | 
					3. If a search returns no results:
 | 
				
			||||||
 | 
					   - Try broader terms (e.g., "Kubernetes" instead of "Kubernetes deployment")
 | 
				
			||||||
 | 
					   - Use synonyms (e.g., "meeting" instead of "conference")
 | 
				
			||||||
 | 
					   - Remove specific qualifiers (e.g., "report" instead of "Q3 financial report")
 | 
				
			||||||
 | 
					   - Try different search tools (vector_search for conceptual matches, keyword_search for exact matches)
 | 
				
			||||||
 | 
					4. NEVER tell the user "there are no notes about X" until you've tried multiple search variations
 | 
				
			||||||
 | 
					5. EXPLAIN your search strategy when adjusting parameters (e.g., "I'll try a broader search for...")
 | 
				
			||||||
 | 
					6. When searches fail, AUTOMATICALLY try different approaches rather than asking the user what to do
 | 
				
			||||||
 | 
					</instructions>`,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        ACKNOWLEDGMENT: "I understand. I'll follow those instructions.",
 | 
					        ACKNOWLEDGMENT: "I understand. I'll follow those instructions.",
 | 
				
			||||||
        CONTEXT_ACKNOWLEDGMENT: "I'll help you with your notes based on the context provided.",
 | 
					        CONTEXT_ACKNOWLEDGMENT: "I'll help you with your notes based on the context provided.",
 | 
				
			||||||
@ -203,7 +219,21 @@ ${context}
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
Focus on relevant information from these notes when answering.
 | 
					Focus on relevant information from these notes when answering.
 | 
				
			||||||
Be concise and informative in your responses.
 | 
					Be concise and informative in your responses.
 | 
				
			||||||
</system_prompt>`
 | 
					</system_prompt>`,
 | 
				
			||||||
 | 
					        
 | 
				
			||||||
 | 
					        // Tool instructions for OpenAI models
 | 
				
			||||||
 | 
					        TOOL_INSTRUCTIONS: `When using tools to search for information, you must follow these requirements:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					1. ALWAYS TRY MULTIPLE SEARCH APPROACHES before concluding information isn't available
 | 
				
			||||||
 | 
					2. YOU MUST PERFORM AT LEAST 3 DIFFERENT SEARCHES with varied parameters before giving up
 | 
				
			||||||
 | 
					3. If a search returns no results:
 | 
				
			||||||
 | 
					   - Try broader terms (e.g., "Kubernetes" instead of "Kubernetes deployment")
 | 
				
			||||||
 | 
					   - Use synonyms (e.g., "meeting" instead of "conference")
 | 
				
			||||||
 | 
					   - Remove specific qualifiers (e.g., "report" instead of "Q3 financial report")
 | 
				
			||||||
 | 
					   - Try different search tools (vector_search for conceptual matches, keyword_search for exact matches)
 | 
				
			||||||
 | 
					4. NEVER tell the user "there are no notes about X" until you've tried multiple search variations
 | 
				
			||||||
 | 
					5. EXPLAIN your search strategy when adjusting parameters (e.g., "I'll try a broader search for...")
 | 
				
			||||||
 | 
					6. When searches fail, AUTOMATICALLY try different approaches rather than asking the user what to do`
 | 
				
			||||||
    },
 | 
					    },
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    OLLAMA: {
 | 
					    OLLAMA: {
 | 
				
			||||||
 | 
				
			|||||||
@ -1,7 +1,7 @@
 | 
				
			|||||||
import sanitizeHtml from 'sanitize-html';
 | 
					import sanitizeHtml from 'sanitize-html';
 | 
				
			||||||
import type { Message } from '../ai_interface.js';
 | 
					import type { Message } from '../ai_interface.js';
 | 
				
			||||||
import { BaseMessageFormatter } from './base_formatter.js';
 | 
					import { BaseMessageFormatter } from './base_formatter.js';
 | 
				
			||||||
import { PROVIDER_PROMPTS, FORMATTING_PROMPTS } from '../constants/llm_prompt_constants.js';
 | 
					import { PROVIDER_PROMPTS } from '../constants/llm_prompt_constants.js';
 | 
				
			||||||
import { LLM_CONSTANTS } from '../constants/provider_constants.js';
 | 
					import { LLM_CONSTANTS } from '../constants/provider_constants.js';
 | 
				
			||||||
import {
 | 
					import {
 | 
				
			||||||
    HTML_ALLOWED_TAGS,
 | 
					    HTML_ALLOWED_TAGS,
 | 
				
			||||||
@ -10,6 +10,7 @@ import {
 | 
				
			|||||||
    HTML_ENTITY_REPLACEMENTS,
 | 
					    HTML_ENTITY_REPLACEMENTS,
 | 
				
			||||||
    FORMATTER_LOGS
 | 
					    FORMATTER_LOGS
 | 
				
			||||||
} from '../constants/formatter_constants.js';
 | 
					} from '../constants/formatter_constants.js';
 | 
				
			||||||
 | 
					import log from '../../log.js';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * OpenAI-specific message formatter
 | 
					 * OpenAI-specific message formatter
 | 
				
			||||||
@ -24,8 +25,13 @@ export class OpenAIMessageFormatter extends BaseMessageFormatter {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
    /**
 | 
					    /**
 | 
				
			||||||
     * Format messages for the OpenAI API
 | 
					     * Format messages for the OpenAI API
 | 
				
			||||||
 | 
					     * @param messages The messages to format
 | 
				
			||||||
 | 
					     * @param systemPrompt Optional system prompt to use
 | 
				
			||||||
 | 
					     * @param context Optional context to include
 | 
				
			||||||
 | 
					     * @param preserveSystemPrompt When true, preserves existing system messages
 | 
				
			||||||
 | 
					     * @param useTools Flag indicating if tools will be used in this request
 | 
				
			||||||
     */
 | 
					     */
 | 
				
			||||||
    formatMessages(messages: Message[], systemPrompt?: string, context?: string): Message[] {
 | 
					    formatMessages(messages: Message[], systemPrompt?: string, context?: string, preserveSystemPrompt?: boolean, useTools?: boolean): Message[] {
 | 
				
			||||||
        const formattedMessages: Message[] = [];
 | 
					        const formattedMessages: Message[] = [];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        // Check if we already have a system message
 | 
					        // Check if we already have a system message
 | 
				
			||||||
@ -47,9 +53,22 @@ export class OpenAIMessageFormatter extends BaseMessageFormatter {
 | 
				
			|||||||
        }
 | 
					        }
 | 
				
			||||||
        // If we don't have explicit context but have a system prompt
 | 
					        // If we don't have explicit context but have a system prompt
 | 
				
			||||||
        else if (!hasSystemMessage && systemPrompt) {
 | 
					        else if (!hasSystemMessage && systemPrompt) {
 | 
				
			||||||
 | 
					            let baseSystemPrompt = systemPrompt || PROVIDER_PROMPTS.COMMON.DEFAULT_ASSISTANT_INTRO;
 | 
				
			||||||
 | 
					            
 | 
				
			||||||
 | 
					            // Check if this is a tool-using conversation
 | 
				
			||||||
 | 
					            const hasPreviousToolCalls = messages.some(msg => msg.tool_calls && msg.tool_calls.length > 0);
 | 
				
			||||||
 | 
					            const hasToolResults = messages.some(msg => msg.role === 'tool');
 | 
				
			||||||
 | 
					            const isToolUsingConversation = useTools || hasPreviousToolCalls || hasToolResults;
 | 
				
			||||||
 | 
					            
 | 
				
			||||||
 | 
					            // Add tool instructions for OpenAI when tools are being used
 | 
				
			||||||
 | 
					            if (isToolUsingConversation && PROVIDER_PROMPTS.OPENAI.TOOL_INSTRUCTIONS) {
 | 
				
			||||||
 | 
					                log.info('Adding tool instructions to system prompt for OpenAI');
 | 
				
			||||||
 | 
					                baseSystemPrompt = `${baseSystemPrompt}\n\n${PROVIDER_PROMPTS.OPENAI.TOOL_INSTRUCTIONS}`;
 | 
				
			||||||
 | 
					            }
 | 
				
			||||||
 | 
					            
 | 
				
			||||||
            formattedMessages.push({
 | 
					            formattedMessages.push({
 | 
				
			||||||
                role: 'system',
 | 
					                role: 'system',
 | 
				
			||||||
                content: systemPrompt
 | 
					                content: baseSystemPrompt
 | 
				
			||||||
            });
 | 
					            });
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
        // If neither context nor system prompt is provided, use default system prompt
 | 
					        // If neither context nor system prompt is provided, use default system prompt
 | 
				
			||||||
 | 
				
			|||||||
@ -3,6 +3,8 @@ import { BaseAIService } from '../base_ai_service.js';
 | 
				
			|||||||
import type { ChatCompletionOptions, ChatResponse, Message, StreamChunk } from '../ai_interface.js';
 | 
					import type { ChatCompletionOptions, ChatResponse, Message, StreamChunk } from '../ai_interface.js';
 | 
				
			||||||
import { getOpenAIOptions } from './providers.js';
 | 
					import { getOpenAIOptions } from './providers.js';
 | 
				
			||||||
import OpenAI from 'openai';
 | 
					import OpenAI from 'openai';
 | 
				
			||||||
 | 
					import { PROVIDER_PROMPTS } from '../constants/llm_prompt_constants.js';
 | 
				
			||||||
 | 
					import log from '../../log.js';
 | 
				
			||||||
 | 
					
 | 
				
			||||||
export class OpenAIService extends BaseAIService {
 | 
					export class OpenAIService extends BaseAIService {
 | 
				
			||||||
    private openai: OpenAI | null = null;
 | 
					    private openai: OpenAI | null = null;
 | 
				
			||||||
@ -36,7 +38,17 @@ export class OpenAIService extends BaseAIService {
 | 
				
			|||||||
        // Initialize the OpenAI client
 | 
					        // Initialize the OpenAI client
 | 
				
			||||||
        const client = this.getClient(providerOptions.apiKey, providerOptions.baseUrl);
 | 
					        const client = this.getClient(providerOptions.apiKey, providerOptions.baseUrl);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        const systemPrompt = this.getSystemPrompt(providerOptions.systemPrompt || options.getOption('aiSystemPrompt'));
 | 
					        // Get base system prompt
 | 
				
			||||||
 | 
					        let systemPrompt = this.getSystemPrompt(providerOptions.systemPrompt || options.getOption('aiSystemPrompt'));
 | 
				
			||||||
 | 
					        
 | 
				
			||||||
 | 
					        // Check if tools are enabled for this request
 | 
				
			||||||
 | 
					        const willUseTools = providerOptions.enableTools && providerOptions.tools && providerOptions.tools.length > 0;
 | 
				
			||||||
 | 
					        
 | 
				
			||||||
 | 
					        // Add tool instructions to system prompt if tools are enabled
 | 
				
			||||||
 | 
					        if (willUseTools && PROVIDER_PROMPTS.OPENAI.TOOL_INSTRUCTIONS) {
 | 
				
			||||||
 | 
					            log.info('Adding tool instructions to system prompt for OpenAI');
 | 
				
			||||||
 | 
					            systemPrompt = `${systemPrompt}\n\n${PROVIDER_PROMPTS.OPENAI.TOOL_INSTRUCTIONS}`;
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        // Ensure we have a system message
 | 
					        // Ensure we have a system message
 | 
				
			||||||
        const systemMessageExists = messages.some(m => m.role === 'system');
 | 
					        const systemMessageExists = messages.some(m => m.role === 'system');
 | 
				
			||||||
@ -67,7 +79,7 @@ export class OpenAIService extends BaseAIService {
 | 
				
			|||||||
            }
 | 
					            }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            // Log the request parameters
 | 
					            // Log the request parameters
 | 
				
			||||||
            console.log('OpenAI API Request:', JSON.stringify({
 | 
					            log.info(`OpenAI API Request: ${JSON.stringify({
 | 
				
			||||||
                endpoint: 'chat.completions.create',
 | 
					                endpoint: 'chat.completions.create',
 | 
				
			||||||
                model: params.model,
 | 
					                model: params.model,
 | 
				
			||||||
                messages: params.messages,
 | 
					                messages: params.messages,
 | 
				
			||||||
@ -76,7 +88,7 @@ export class OpenAIService extends BaseAIService {
 | 
				
			|||||||
                stream: params.stream,
 | 
					                stream: params.stream,
 | 
				
			||||||
                tools: params.tools,
 | 
					                tools: params.tools,
 | 
				
			||||||
                tool_choice: params.tool_choice
 | 
					                tool_choice: params.tool_choice
 | 
				
			||||||
            }, null, 2));
 | 
					            }, null, 2)}`);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            // If streaming is requested
 | 
					            // If streaming is requested
 | 
				
			||||||
            if (providerOptions.stream) {
 | 
					            if (providerOptions.stream) {
 | 
				
			||||||
@ -84,10 +96,10 @@ export class OpenAIService extends BaseAIService {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
                // Get stream from OpenAI SDK
 | 
					                // Get stream from OpenAI SDK
 | 
				
			||||||
                const stream = await client.chat.completions.create(params);
 | 
					                const stream = await client.chat.completions.create(params);
 | 
				
			||||||
                console.log('OpenAI API Stream Started');
 | 
					                log.info('OpenAI API Stream Started');
 | 
				
			||||||
 | 
					
 | 
				
			||||||
                // Create a closure to hold accumulated tool calls
 | 
					                // Create a closure to hold accumulated tool calls
 | 
				
			||||||
                let accumulatedToolCalls: any[] = [];
 | 
					                const accumulatedToolCalls: OpenAI.Chat.ChatCompletionMessageToolCall[] = [];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
                // Return a response with the stream handler
 | 
					                // Return a response with the stream handler
 | 
				
			||||||
                const response: ChatResponse = {
 | 
					                const response: ChatResponse = {
 | 
				
			||||||
@ -104,7 +116,8 @@ export class OpenAIService extends BaseAIService {
 | 
				
			|||||||
                            if (Symbol.asyncIterator in stream) {
 | 
					                            if (Symbol.asyncIterator in stream) {
 | 
				
			||||||
                                for await (const chunk of stream as AsyncIterable<OpenAI.Chat.ChatCompletionChunk>) {
 | 
					                                for await (const chunk of stream as AsyncIterable<OpenAI.Chat.ChatCompletionChunk>) {
 | 
				
			||||||
                                    // Log each chunk received from OpenAI
 | 
					                                    // Log each chunk received from OpenAI
 | 
				
			||||||
                                    console.log('OpenAI API Stream Chunk:', JSON.stringify(chunk, null, 2));
 | 
					                                    // Use info level as debug is not available
 | 
				
			||||||
 | 
					                                    log.info(`OpenAI API Stream Chunk: ${JSON.stringify(chunk, null, 2)}`);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
                                    const content = chunk.choices[0]?.delta?.content || '';
 | 
					                                    const content = chunk.choices[0]?.delta?.content || '';
 | 
				
			||||||
                                    const isDone = !!chunk.choices[0]?.finish_reason;
 | 
					                                    const isDone = !!chunk.choices[0]?.finish_reason;
 | 
				
			||||||
 | 
				
			|||||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user