feat(llm): redo chat storage, part 2

This commit is contained in:
perf3ct 2025-06-02 02:38:21 +00:00
parent 35f78aede9
commit f6af617f6b
No known key found for this signature in database
GPG Key ID: 569C4EEC436F5232
2 changed files with 56 additions and 3 deletions

View File

@ -69,7 +69,6 @@ export default class LlmChatPanel extends BasicWidget {
totalTokens?: number; totalTokens?: number;
}; };
} = { } = {
model: 'default',
temperature: 0.7, temperature: 0.7,
toolExecutions: [] toolExecutions: []
}; };
@ -332,7 +331,7 @@ export default class LlmChatPanel extends BasicWidget {
sources: this.sources || [], sources: this.sources || [],
// Add metadata // Add metadata
metadata: { metadata: {
model: this.metadata?.model || 'default', model: this.metadata?.model || undefined,
provider: this.metadata?.provider || undefined, provider: this.metadata?.provider || undefined,
temperature: this.metadata?.temperature || 0.7, temperature: this.metadata?.temperature || 0.7,
lastUpdated: new Date().toISOString(), lastUpdated: new Date().toISOString(),

View File

@ -267,7 +267,8 @@ class RestChatService {
systemPrompt: session?.messages.find(m => m.role === 'system')?.content, systemPrompt: session?.messages.find(m => m.role === 'system')?.content,
temperature: session?.metadata.temperature, temperature: session?.metadata.temperature,
maxTokens: session?.metadata.maxTokens, maxTokens: session?.metadata.maxTokens,
model: session?.metadata.model, // Get the user's preferred model if session model is 'default' or not set
model: await this.getPreferredModel(session?.metadata.model),
// Set stream based on request type, but ensure it's explicitly a boolean value // Set stream based on request type, but ensure it's explicitly a boolean value
// GET requests or format=stream parameter indicates streaming should be used // GET requests or format=stream parameter indicates streaming should be used
stream: !!(req.method === 'GET' || req.query.format === 'stream' || req.query.stream === 'true'), stream: !!(req.method === 'GET' || req.query.format === 'stream' || req.query.stream === 'true'),
@ -702,6 +703,59 @@ class RestChatService {
// Create from Chat Note // Create from Chat Note
return await this.createSessionFromChatNote(noteId); return await this.createSessionFromChatNote(noteId);
} }
/**
* Get the user's preferred model
*/
async getPreferredModel(sessionModel?: string): Promise<string | undefined> {
// If the session already has a valid model (not 'default'), use it
if (sessionModel && sessionModel !== 'default') {
return sessionModel;
}
try {
// Get provider precedence list (same logic as model selection stage)
const providerPrecedence = await options.getOption('aiProviderPrecedence');
let defaultProvider = 'openai';
let defaultModelName = 'gpt-3.5-turbo';
if (providerPrecedence) {
// Parse provider precedence list
let providers: string[] = [];
if (providerPrecedence.includes(',')) {
providers = providerPrecedence.split(',').map(p => p.trim());
} else if (providerPrecedence.startsWith('[') && providerPrecedence.endsWith(']')) {
providers = JSON.parse(providerPrecedence);
} else {
providers = [providerPrecedence];
}
// Get first available provider
if (providers.length > 0) {
const firstProvider = providers[0];
defaultProvider = firstProvider;
// Get provider-specific default model
if (firstProvider === 'openai') {
const model = await options.getOption('openaiDefaultModel');
if (model) defaultModelName = model;
} else if (firstProvider === 'anthropic') {
const model = await options.getOption('anthropicDefaultModel');
if (model) defaultModelName = model;
} else if (firstProvider === 'ollama') {
const model = await options.getOption('ollamaDefaultModel');
if (model) defaultModelName = model;
}
}
}
log.info(`Selected user's preferred model: ${defaultModelName} from provider: ${defaultProvider}`);
return defaultModelName;
} catch (error) {
log.error(`Error getting user's preferred model: ${error}`);
return 'gpt-3.5-turbo'; // Fallback
}
}
} }
// Create singleton instance // Create singleton instance