mirror of
				https://github.com/zadam/trilium.git
				synced 2025-10-31 03:29:02 +01:00 
			
		
		
		
	move more constants from files into centralized location
This commit is contained in:
		
							parent
							
								
									a50575c12c
								
							
						
					
					
						commit
						5869eaff9a
					
				| @ -31,5 +31,64 @@ export const PROVIDER_CONSTANTS = { | |||||||
|                 maxTokens: 4096 |                 maxTokens: 4096 | ||||||
|             } |             } | ||||||
|         ] |         ] | ||||||
|  |     }, | ||||||
|  | 
 | ||||||
|  |     OPENAI: { | ||||||
|  |         BASE_URL: 'https://api.openai.com/v1', | ||||||
|  |         DEFAULT_MODEL: 'gpt-3.5-turbo', | ||||||
|  |         DEFAULT_EMBEDDING_MODEL: 'text-embedding-ada-002', | ||||||
|  |         CONTEXT_WINDOW: 16000, | ||||||
|  |         EMBEDDING_DIMENSIONS: { | ||||||
|  |             ADA: 1536, | ||||||
|  |             DEFAULT: 1536 | ||||||
|  |         }, | ||||||
|  |         AVAILABLE_MODELS: [ | ||||||
|  |             { | ||||||
|  |                 id: 'gpt-4o', | ||||||
|  |                 name: 'GPT-4o', | ||||||
|  |                 description: 'Most capable multimodal model', | ||||||
|  |                 maxTokens: 8192 | ||||||
|  |             }, | ||||||
|  |             { | ||||||
|  |                 id: 'gpt-4-turbo', | ||||||
|  |                 name: 'GPT-4 Turbo', | ||||||
|  |                 description: 'Advanced capabilities with higher token limit', | ||||||
|  |                 maxTokens: 8192 | ||||||
|  |             }, | ||||||
|  |             { | ||||||
|  |                 id: 'gpt-4', | ||||||
|  |                 name: 'GPT-4', | ||||||
|  |                 description: 'Original GPT-4 model', | ||||||
|  |                 maxTokens: 8192 | ||||||
|  |             }, | ||||||
|  |             { | ||||||
|  |                 id: 'gpt-3.5-turbo', | ||||||
|  |                 name: 'GPT-3.5 Turbo', | ||||||
|  |                 description: 'Fast and efficient model for most tasks', | ||||||
|  |                 maxTokens: 4096 | ||||||
|  |             } | ||||||
|  |         ] | ||||||
|  |     }, | ||||||
|  | 
 | ||||||
|  |     OLLAMA: { | ||||||
|  |         BASE_URL: 'http://localhost:11434', | ||||||
|  |         DEFAULT_MODEL: 'llama2', | ||||||
|  |         BATCH_SIZE: 100, | ||||||
|  |         CHUNKING: { | ||||||
|  |             SIZE: 4000, | ||||||
|  |             OVERLAP: 200 | ||||||
|  |         }, | ||||||
|  |         MODEL_DIMENSIONS: { | ||||||
|  |             default: 4096, | ||||||
|  |             llama2: 4096, | ||||||
|  |             mixtral: 4096, | ||||||
|  |             'mistral': 4096 | ||||||
|  |         }, | ||||||
|  |         MODEL_CONTEXT_WINDOWS: { | ||||||
|  |             default: 8192, | ||||||
|  |             llama2: 4096, | ||||||
|  |             mixtral: 8192, | ||||||
|  |             'mistral': 8192 | ||||||
|  |         } | ||||||
|     } |     } | ||||||
| } as const; | } as const; | ||||||
|  | |||||||
| @ -1,6 +1,7 @@ | |||||||
| import options from '../../options.js'; | import options from '../../options.js'; | ||||||
| import { BaseAIService } from '../base_ai_service.js'; | import { BaseAIService } from '../base_ai_service.js'; | ||||||
| import type { ChatCompletionOptions, ChatResponse, Message } from '../ai_interface.js'; | import type { ChatCompletionOptions, ChatResponse, Message } from '../ai_interface.js'; | ||||||
|  | import { PROVIDER_CONSTANTS } from '../constants/provider_constants.js'; | ||||||
| 
 | 
 | ||||||
| export class OllamaService extends BaseAIService { | export class OllamaService extends BaseAIService { | ||||||
|     constructor() { |     constructor() { | ||||||
| @ -18,8 +19,8 @@ export class OllamaService extends BaseAIService { | |||||||
|             throw new Error('Ollama service is not available. Check Ollama settings.'); |             throw new Error('Ollama service is not available. Check Ollama settings.'); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         const baseUrl = options.getOption('ollamaBaseUrl') || 'http://localhost:11434'; |         const baseUrl = options.getOption('ollamaBaseUrl') || PROVIDER_CONSTANTS.OLLAMA.BASE_URL; | ||||||
|         const model = opts.model || options.getOption('ollamaDefaultModel') || 'llama2'; |         const model = opts.model || options.getOption('ollamaDefaultModel') || PROVIDER_CONSTANTS.OLLAMA.DEFAULT_MODEL; | ||||||
|         const temperature = opts.temperature !== undefined |         const temperature = opts.temperature !== undefined | ||||||
|             ? opts.temperature |             ? opts.temperature | ||||||
|             : parseFloat(options.getOption('aiTemperature') || '0.7'); |             : parseFloat(options.getOption('aiTemperature') || '0.7'); | ||||||
|  | |||||||
| @ -1,6 +1,7 @@ | |||||||
| import options from '../../options.js'; | import options from '../../options.js'; | ||||||
| import { BaseAIService } from '../base_ai_service.js'; | import { BaseAIService } from '../base_ai_service.js'; | ||||||
| import type { ChatCompletionOptions, ChatResponse, Message } from '../ai_interface.js'; | import type { ChatCompletionOptions, ChatResponse, Message } from '../ai_interface.js'; | ||||||
|  | import { PROVIDER_CONSTANTS } from '../constants/provider_constants.js'; | ||||||
| 
 | 
 | ||||||
| export class OpenAIService extends BaseAIService { | export class OpenAIService extends BaseAIService { | ||||||
|     constructor() { |     constructor() { | ||||||
| @ -17,8 +18,8 @@ export class OpenAIService extends BaseAIService { | |||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         const apiKey = options.getOption('openaiApiKey'); |         const apiKey = options.getOption('openaiApiKey'); | ||||||
|         const baseUrl = options.getOption('openaiBaseUrl') || 'https://api.openai.com/v1'; |         const baseUrl = options.getOption('openaiBaseUrl') || PROVIDER_CONSTANTS.OPENAI.BASE_URL; | ||||||
|         const model = opts.model || options.getOption('openaiDefaultModel') || 'gpt-3.5-turbo'; |         const model = opts.model || options.getOption('openaiDefaultModel') || PROVIDER_CONSTANTS.OPENAI.DEFAULT_MODEL; | ||||||
|         const temperature = opts.temperature !== undefined |         const temperature = opts.temperature !== undefined | ||||||
|             ? opts.temperature |             ? opts.temperature | ||||||
|             : parseFloat(options.getOption('aiTemperature') || '0.7'); |             : parseFloat(options.getOption('aiTemperature') || '0.7'); | ||||||
|  | |||||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user
	 perf3ct
						perf3ct