mirror of
https://github.com/zadam/trilium.git
synced 2025-12-04 22:44:25 +01:00
Revert "feat(search): try to decrease complexity"
This reverts commit 5b79e0d71ed9658e82cf050e23625370ec2ea52e.
This commit is contained in:
parent
0afb8a11c8
commit
06b2d71b27
@ -1,66 +1,72 @@
|
||||
/**
|
||||
* Migration to add FTS5 full-text search support
|
||||
* Migration to add FTS5 full-text search support and strategic performance indexes
|
||||
*
|
||||
* This migration implements a minimal FTS5 search solution that:
|
||||
* 1. Uses a single FTS5 table with porter tokenizer for stemming
|
||||
* 2. Implements simple triggers for synchronization
|
||||
* 3. Excludes protected notes from indexing
|
||||
* 4. Sets essential performance pragmas
|
||||
* This migration:
|
||||
* 1. Creates an FTS5 virtual table for full-text searching
|
||||
* 2. Populates it with existing note content
|
||||
* 3. Creates triggers to keep the FTS table synchronized with note changes
|
||||
* 4. Adds strategic composite and covering indexes for improved query performance
|
||||
* 5. Optimizes common query patterns identified through performance analysis
|
||||
*/
|
||||
|
||||
import sql from "../services/sql.js";
|
||||
import log from "../services/log.js";
|
||||
|
||||
export default function addFTS5SearchAndPerformanceIndexes() {
|
||||
log.info("Setting up FTS5 search...");
|
||||
log.info("Starting FTS5 and performance optimization migration...");
|
||||
|
||||
// Create FTS5 virtual table with porter tokenizer
|
||||
log.info("Creating FTS5 virtual table...");
|
||||
// Part 1: FTS5 Setup
|
||||
log.info("Creating FTS5 virtual table for full-text search...");
|
||||
|
||||
// Create FTS5 virtual tables
|
||||
// We create two FTS tables for different search strategies:
|
||||
// 1. notes_fts: Uses porter stemming for word-based searches
|
||||
// 2. notes_fts_trigram: Uses trigram tokenizer for substring searches
|
||||
|
||||
sql.executeScript(`
|
||||
-- Drop existing FTS tables if they exist
|
||||
-- Drop existing FTS tables if they exist (for re-running migration in dev)
|
||||
DROP TABLE IF EXISTS notes_fts;
|
||||
DROP TABLE IF EXISTS notes_fts_trigram;
|
||||
DROP TABLE IF EXISTS notes_fts_config;
|
||||
DROP TABLE IF EXISTS notes_fts_stats;
|
||||
DROP TABLE IF EXISTS notes_fts_aux;
|
||||
|
||||
-- Create FTS5 virtual table with porter tokenizer for stemming
|
||||
-- Create FTS5 virtual table with porter stemming for word-based searches
|
||||
CREATE VIRTUAL TABLE IF NOT EXISTS notes_fts USING fts5(
|
||||
noteId UNINDEXED,
|
||||
title,
|
||||
content,
|
||||
tokenize = 'porter unicode61',
|
||||
prefix = '2 3' -- Index prefixes of 2 and 3 characters for faster prefix searches
|
||||
tokenize = 'porter unicode61'
|
||||
);
|
||||
|
||||
-- Create FTS5 virtual table with trigram tokenizer for substring searches
|
||||
-- detail='none' reduces storage by ~50% since we don't need snippets for substring search
|
||||
CREATE VIRTUAL TABLE IF NOT EXISTS notes_fts_trigram USING fts5(
|
||||
noteId UNINDEXED,
|
||||
title,
|
||||
content,
|
||||
tokenize = 'trigram',
|
||||
detail = 'none'
|
||||
);
|
||||
`);
|
||||
|
||||
log.info("Populating FTS5 table with existing note content...");
|
||||
|
||||
// Populate the FTS table with existing notes
|
||||
const batchSize = 1000;
|
||||
// We only index text-based note types that contain searchable content
|
||||
const batchSize = 100;
|
||||
let processedCount = 0;
|
||||
let hasError = false;
|
||||
|
||||
// Wrap entire population process in a transaction for consistency
|
||||
// If any error occurs, the entire population will be rolled back
|
||||
try {
|
||||
sql.transactional(() => {
|
||||
// Count eligible notes
|
||||
const totalNotes = sql.getValue<number>(`
|
||||
SELECT COUNT(*)
|
||||
FROM notes n
|
||||
LEFT JOIN blobs b ON n.blobId = b.blobId
|
||||
WHERE n.type IN ('text', 'code', 'mermaid', 'canvas', 'mindMap')
|
||||
AND n.isDeleted = 0
|
||||
AND n.isProtected = 0
|
||||
AND b.content IS NOT NULL
|
||||
`) || 0;
|
||||
|
||||
log.info(`Found ${totalNotes} notes to index`);
|
||||
|
||||
// Insert notes in batches
|
||||
let offset = 0;
|
||||
while (offset < totalNotes) {
|
||||
sql.execute(`
|
||||
INSERT INTO notes_fts (noteId, title, content)
|
||||
|
||||
while (true) {
|
||||
const notes = sql.getRows<{
|
||||
noteId: string;
|
||||
title: string;
|
||||
content: string | null;
|
||||
}>(`
|
||||
SELECT
|
||||
n.noteId,
|
||||
n.title,
|
||||
@ -69,44 +75,77 @@ export default function addFTS5SearchAndPerformanceIndexes() {
|
||||
LEFT JOIN blobs b ON n.blobId = b.blobId
|
||||
WHERE n.type IN ('text', 'code', 'mermaid', 'canvas', 'mindMap')
|
||||
AND n.isDeleted = 0
|
||||
AND n.isProtected = 0
|
||||
AND b.content IS NOT NULL
|
||||
AND n.isProtected = 0 -- Skip protected notes - they require special handling
|
||||
ORDER BY n.noteId
|
||||
LIMIT ? OFFSET ?
|
||||
`, [batchSize, offset]);
|
||||
|
||||
offset += batchSize;
|
||||
processedCount = Math.min(offset, totalNotes);
|
||||
if (notes.length === 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (processedCount % 10000 === 0) {
|
||||
log.info(`Indexed ${processedCount} of ${totalNotes} notes...`);
|
||||
for (const note of notes) {
|
||||
if (note.content) {
|
||||
// Process content based on type (simplified for migration)
|
||||
let processedContent = note.content;
|
||||
|
||||
// For HTML content, we'll strip tags in the search service
|
||||
// For now, just insert the raw content
|
||||
|
||||
// Insert into porter FTS for word-based searches
|
||||
sql.execute(`
|
||||
INSERT INTO notes_fts (noteId, title, content)
|
||||
VALUES (?, ?, ?)
|
||||
`, [note.noteId, note.title, processedContent]);
|
||||
|
||||
// Also insert into trigram FTS for substring searches
|
||||
sql.execute(`
|
||||
INSERT INTO notes_fts_trigram (noteId, title, content)
|
||||
VALUES (?, ?, ?)
|
||||
`, [note.noteId, note.title, processedContent]);
|
||||
|
||||
processedCount++;
|
||||
}
|
||||
}
|
||||
|
||||
offset += batchSize;
|
||||
|
||||
if (processedCount % 1000 === 0) {
|
||||
log.info(`Processed ${processedCount} notes for FTS indexing...`);
|
||||
}
|
||||
}
|
||||
});
|
||||
} catch (error) {
|
||||
log.error(`Failed to populate FTS index: ${error}`);
|
||||
hasError = true;
|
||||
log.error(`Failed to populate FTS index. Rolling back... ${error}`);
|
||||
// Clean up partial data if transaction failed
|
||||
try {
|
||||
sql.execute("DELETE FROM notes_fts");
|
||||
} catch (cleanupError) {
|
||||
log.error(`Failed to clean up FTS table after error: ${cleanupError}`);
|
||||
}
|
||||
throw new Error(`FTS5 migration failed during population: ${error}`);
|
||||
}
|
||||
|
||||
log.info(`Completed FTS indexing of ${processedCount} notes`);
|
||||
|
||||
// Create synchronization triggers
|
||||
// Create triggers to keep FTS table synchronized
|
||||
log.info("Creating FTS synchronization triggers...");
|
||||
|
||||
// Drop all existing triggers first
|
||||
const existingTriggers = [
|
||||
'notes_fts_insert', 'notes_fts_update', 'notes_fts_delete',
|
||||
'notes_fts_soft_delete', 'notes_fts_blob_insert', 'notes_fts_blob_update',
|
||||
'notes_fts_protect', 'notes_fts_unprotect', 'notes_fts_sync',
|
||||
'notes_fts_update_sync', 'notes_fts_delete_sync', 'blobs_fts_sync',
|
||||
'blobs_fts_insert_sync'
|
||||
];
|
||||
// Drop all existing triggers first to ensure clean state
|
||||
sql.execute(`DROP TRIGGER IF EXISTS notes_fts_insert`);
|
||||
sql.execute(`DROP TRIGGER IF EXISTS notes_fts_update`);
|
||||
sql.execute(`DROP TRIGGER IF EXISTS notes_fts_delete`);
|
||||
sql.execute(`DROP TRIGGER IF EXISTS notes_fts_soft_delete`);
|
||||
sql.execute(`DROP TRIGGER IF EXISTS notes_fts_blob_insert`);
|
||||
sql.execute(`DROP TRIGGER IF EXISTS notes_fts_blob_update`);
|
||||
sql.execute(`DROP TRIGGER IF EXISTS notes_fts_protect`);
|
||||
sql.execute(`DROP TRIGGER IF EXISTS notes_fts_unprotect`);
|
||||
|
||||
for (const trigger of existingTriggers) {
|
||||
sql.execute(`DROP TRIGGER IF EXISTS ${trigger}`);
|
||||
}
|
||||
// Create improved triggers that handle all SQL operations properly
|
||||
// including INSERT OR REPLACE and INSERT ... ON CONFLICT ... DO UPDATE (upsert)
|
||||
|
||||
// Create triggers for notes table operations
|
||||
// Trigger for INSERT operations on notes
|
||||
sql.execute(`
|
||||
CREATE TRIGGER notes_fts_insert
|
||||
AFTER INSERT ON notes
|
||||
@ -114,75 +153,134 @@ export default function addFTS5SearchAndPerformanceIndexes() {
|
||||
AND NEW.isDeleted = 0
|
||||
AND NEW.isProtected = 0
|
||||
BEGIN
|
||||
-- First delete any existing FTS entries (in case of INSERT OR REPLACE)
|
||||
DELETE FROM notes_fts WHERE noteId = NEW.noteId;
|
||||
DELETE FROM notes_fts_trigram WHERE noteId = NEW.noteId;
|
||||
|
||||
-- Then insert the new entry into both FTS tables, using LEFT JOIN to handle missing blobs
|
||||
INSERT INTO notes_fts (noteId, title, content)
|
||||
SELECT
|
||||
NEW.noteId,
|
||||
NEW.title,
|
||||
COALESCE(b.content, '') -- Use empty string if blob doesn't exist yet
|
||||
FROM (SELECT NEW.noteId) AS note_select
|
||||
LEFT JOIN blobs b ON b.blobId = NEW.blobId;
|
||||
|
||||
INSERT INTO notes_fts_trigram (noteId, title, content)
|
||||
SELECT
|
||||
NEW.noteId,
|
||||
NEW.title,
|
||||
COALESCE(b.content, '')
|
||||
FROM (SELECT NEW.blobId AS blobId) AS note_blob
|
||||
LEFT JOIN blobs b ON b.blobId = note_blob.blobId;
|
||||
END;
|
||||
FROM (SELECT NEW.noteId) AS note_select
|
||||
LEFT JOIN blobs b ON b.blobId = NEW.blobId;
|
||||
END
|
||||
`);
|
||||
|
||||
// Trigger for UPDATE operations on notes table
|
||||
// Fires for ANY update to searchable notes to ensure FTS stays in sync
|
||||
sql.execute(`
|
||||
CREATE TRIGGER notes_fts_update
|
||||
AFTER UPDATE ON notes
|
||||
WHEN NEW.type IN ('text', 'code', 'mermaid', 'canvas', 'mindMap')
|
||||
-- Fire on any change, not just specific columns, to handle all upsert scenarios
|
||||
BEGIN
|
||||
-- Delete old entry
|
||||
DELETE FROM notes_fts WHERE noteId = OLD.noteId;
|
||||
-- Always delete the old entries from both FTS tables
|
||||
DELETE FROM notes_fts WHERE noteId = NEW.noteId;
|
||||
DELETE FROM notes_fts_trigram WHERE noteId = NEW.noteId;
|
||||
|
||||
-- Insert new entry if eligible
|
||||
-- Insert new entry into both FTS tables if note is not deleted and not protected
|
||||
INSERT INTO notes_fts (noteId, title, content)
|
||||
SELECT
|
||||
NEW.noteId,
|
||||
NEW.title,
|
||||
COALESCE(b.content, '')
|
||||
FROM (SELECT NEW.blobId AS blobId) AS note_blob
|
||||
LEFT JOIN blobs b ON b.blobId = note_blob.blobId
|
||||
WHERE NEW.type IN ('text', 'code', 'mermaid', 'canvas', 'mindMap')
|
||||
AND NEW.isDeleted = 0
|
||||
COALESCE(b.content, '') -- Use empty string if blob doesn't exist yet
|
||||
FROM (SELECT NEW.noteId) AS note_select
|
||||
LEFT JOIN blobs b ON b.blobId = NEW.blobId
|
||||
WHERE NEW.isDeleted = 0
|
||||
AND NEW.isProtected = 0;
|
||||
END;
|
||||
|
||||
INSERT INTO notes_fts_trigram (noteId, title, content)
|
||||
SELECT
|
||||
NEW.noteId,
|
||||
NEW.title,
|
||||
COALESCE(b.content, '')
|
||||
FROM (SELECT NEW.noteId) AS note_select
|
||||
LEFT JOIN blobs b ON b.blobId = NEW.blobId
|
||||
WHERE NEW.isDeleted = 0
|
||||
AND NEW.isProtected = 0;
|
||||
END
|
||||
`);
|
||||
|
||||
// Trigger for DELETE operations on notes
|
||||
sql.execute(`
|
||||
CREATE TRIGGER notes_fts_delete
|
||||
AFTER DELETE ON notes
|
||||
BEGIN
|
||||
DELETE FROM notes_fts WHERE noteId = OLD.noteId;
|
||||
END;
|
||||
DELETE FROM notes_fts_trigram WHERE noteId = OLD.noteId;
|
||||
END
|
||||
`);
|
||||
|
||||
// Create triggers for blob updates
|
||||
// Trigger for soft delete (isDeleted = 1)
|
||||
sql.execute(`
|
||||
CREATE TRIGGER blobs_fts_update
|
||||
AFTER UPDATE ON blobs
|
||||
CREATE TRIGGER notes_fts_soft_delete
|
||||
AFTER UPDATE ON notes
|
||||
WHEN OLD.isDeleted = 0 AND NEW.isDeleted = 1
|
||||
BEGIN
|
||||
-- Update all notes that reference this blob
|
||||
DELETE FROM notes_fts
|
||||
WHERE noteId IN (
|
||||
SELECT noteId FROM notes
|
||||
WHERE blobId = NEW.blobId
|
||||
);
|
||||
DELETE FROM notes_fts WHERE noteId = NEW.noteId;
|
||||
DELETE FROM notes_fts_trigram WHERE noteId = NEW.noteId;
|
||||
END
|
||||
`);
|
||||
|
||||
// Trigger for notes becoming protected
|
||||
sql.execute(`
|
||||
CREATE TRIGGER notes_fts_protect
|
||||
AFTER UPDATE ON notes
|
||||
WHEN OLD.isProtected = 0 AND NEW.isProtected = 1
|
||||
BEGIN
|
||||
DELETE FROM notes_fts WHERE noteId = NEW.noteId;
|
||||
DELETE FROM notes_fts_trigram WHERE noteId = NEW.noteId;
|
||||
END
|
||||
`);
|
||||
|
||||
// Trigger for notes becoming unprotected
|
||||
sql.execute(`
|
||||
CREATE TRIGGER notes_fts_unprotect
|
||||
AFTER UPDATE ON notes
|
||||
WHEN OLD.isProtected = 1 AND NEW.isProtected = 0
|
||||
AND NEW.type IN ('text', 'code', 'mermaid', 'canvas', 'mindMap')
|
||||
AND NEW.isDeleted = 0
|
||||
BEGIN
|
||||
DELETE FROM notes_fts WHERE noteId = NEW.noteId;
|
||||
DELETE FROM notes_fts_trigram WHERE noteId = NEW.noteId;
|
||||
|
||||
INSERT INTO notes_fts (noteId, title, content)
|
||||
SELECT
|
||||
n.noteId,
|
||||
n.title,
|
||||
NEW.content
|
||||
FROM notes n
|
||||
WHERE n.blobId = NEW.blobId
|
||||
AND n.type IN ('text', 'code', 'mermaid', 'canvas', 'mindMap')
|
||||
AND n.isDeleted = 0
|
||||
AND n.isProtected = 0;
|
||||
END;
|
||||
NEW.noteId,
|
||||
NEW.title,
|
||||
COALESCE(b.content, '')
|
||||
FROM (SELECT NEW.noteId) AS note_select
|
||||
LEFT JOIN blobs b ON b.blobId = NEW.blobId;
|
||||
|
||||
INSERT INTO notes_fts_trigram (noteId, title, content)
|
||||
SELECT
|
||||
NEW.noteId,
|
||||
NEW.title,
|
||||
COALESCE(b.content, '')
|
||||
FROM (SELECT NEW.noteId) AS note_select
|
||||
LEFT JOIN blobs b ON b.blobId = NEW.blobId;
|
||||
END
|
||||
`);
|
||||
|
||||
// Trigger for INSERT operations on blobs
|
||||
// Uses INSERT OR REPLACE for efficiency with deduplicated blobs
|
||||
sql.execute(`
|
||||
CREATE TRIGGER blobs_fts_insert
|
||||
CREATE TRIGGER notes_fts_blob_insert
|
||||
AFTER INSERT ON blobs
|
||||
BEGIN
|
||||
INSERT INTO notes_fts (noteId, title, content)
|
||||
-- Use INSERT OR REPLACE for atomic update in both FTS tables
|
||||
-- This handles the case where FTS entries may already exist
|
||||
INSERT OR REPLACE INTO notes_fts (noteId, title, content)
|
||||
SELECT
|
||||
n.noteId,
|
||||
n.title,
|
||||
@ -192,26 +290,340 @@ export default function addFTS5SearchAndPerformanceIndexes() {
|
||||
AND n.type IN ('text', 'code', 'mermaid', 'canvas', 'mindMap')
|
||||
AND n.isDeleted = 0
|
||||
AND n.isProtected = 0;
|
||||
END;
|
||||
|
||||
INSERT OR REPLACE INTO notes_fts_trigram (noteId, title, content)
|
||||
SELECT
|
||||
n.noteId,
|
||||
n.title,
|
||||
NEW.content
|
||||
FROM notes n
|
||||
WHERE n.blobId = NEW.blobId
|
||||
AND n.type IN ('text', 'code', 'mermaid', 'canvas', 'mindMap')
|
||||
AND n.isDeleted = 0
|
||||
AND n.isProtected = 0;
|
||||
END
|
||||
`);
|
||||
|
||||
// Trigger for UPDATE operations on blobs
|
||||
// Uses INSERT OR REPLACE for efficiency
|
||||
sql.execute(`
|
||||
CREATE TRIGGER notes_fts_blob_update
|
||||
AFTER UPDATE ON blobs
|
||||
BEGIN
|
||||
-- Use INSERT OR REPLACE for atomic update in both FTS tables
|
||||
INSERT OR REPLACE INTO notes_fts (noteId, title, content)
|
||||
SELECT
|
||||
n.noteId,
|
||||
n.title,
|
||||
NEW.content
|
||||
FROM notes n
|
||||
WHERE n.blobId = NEW.blobId
|
||||
AND n.type IN ('text', 'code', 'mermaid', 'canvas', 'mindMap')
|
||||
AND n.isDeleted = 0
|
||||
AND n.isProtected = 0;
|
||||
|
||||
INSERT OR REPLACE INTO notes_fts_trigram (noteId, title, content)
|
||||
SELECT
|
||||
n.noteId,
|
||||
n.title,
|
||||
NEW.content
|
||||
FROM notes n
|
||||
WHERE n.blobId = NEW.blobId
|
||||
AND n.type IN ('text', 'code', 'mermaid', 'canvas', 'mindMap')
|
||||
AND n.isDeleted = 0
|
||||
AND n.isProtected = 0;
|
||||
END
|
||||
`);
|
||||
|
||||
log.info("FTS5 setup completed successfully");
|
||||
|
||||
// Run optimization
|
||||
log.info("Optimizing FTS5 index...");
|
||||
sql.execute(`INSERT INTO notes_fts(notes_fts) VALUES('optimize')`);
|
||||
// Final cleanup: ensure all eligible notes are indexed in both FTS tables
|
||||
// This catches any edge cases where notes might have been missed
|
||||
log.info("Running final FTS index cleanup...");
|
||||
|
||||
// Set essential SQLite pragmas for better performance
|
||||
// Check and fix porter FTS table
|
||||
const missingPorterCount = sql.getValue<number>(`
|
||||
SELECT COUNT(*) FROM notes n
|
||||
LEFT JOIN blobs b ON n.blobId = b.blobId
|
||||
WHERE n.type IN ('text', 'code', 'mermaid', 'canvas', 'mindMap')
|
||||
AND n.isDeleted = 0
|
||||
AND n.isProtected = 0
|
||||
AND b.content IS NOT NULL
|
||||
AND NOT EXISTS (SELECT 1 FROM notes_fts WHERE noteId = n.noteId)
|
||||
`) || 0;
|
||||
|
||||
if (missingPorterCount > 0) {
|
||||
sql.execute(`
|
||||
WITH missing_notes AS (
|
||||
SELECT n.noteId, n.title, b.content
|
||||
FROM notes n
|
||||
LEFT JOIN blobs b ON n.blobId = b.blobId
|
||||
WHERE n.type IN ('text', 'code', 'mermaid', 'canvas', 'mindMap')
|
||||
AND n.isDeleted = 0
|
||||
AND n.isProtected = 0
|
||||
AND b.content IS NOT NULL
|
||||
AND NOT EXISTS (SELECT 1 FROM notes_fts WHERE noteId = n.noteId)
|
||||
)
|
||||
INSERT INTO notes_fts (noteId, title, content)
|
||||
SELECT noteId, title, content FROM missing_notes
|
||||
`);
|
||||
log.info(`Indexed ${missingPorterCount} additional notes in porter FTS during cleanup`);
|
||||
}
|
||||
|
||||
// Check and fix trigram FTS table
|
||||
const missingTrigramCount = sql.getValue<number>(`
|
||||
SELECT COUNT(*) FROM notes n
|
||||
LEFT JOIN blobs b ON n.blobId = b.blobId
|
||||
WHERE n.type IN ('text', 'code', 'mermaid', 'canvas', 'mindMap')
|
||||
AND n.isDeleted = 0
|
||||
AND n.isProtected = 0
|
||||
AND b.content IS NOT NULL
|
||||
AND NOT EXISTS (SELECT 1 FROM notes_fts_trigram WHERE noteId = n.noteId)
|
||||
`) || 0;
|
||||
|
||||
if (missingTrigramCount > 0) {
|
||||
sql.execute(`
|
||||
WITH missing_notes AS (
|
||||
SELECT n.noteId, n.title, b.content
|
||||
FROM notes n
|
||||
LEFT JOIN blobs b ON n.blobId = b.blobId
|
||||
WHERE n.type IN ('text', 'code', 'mermaid', 'canvas', 'mindMap')
|
||||
AND n.isDeleted = 0
|
||||
AND n.isProtected = 0
|
||||
AND b.content IS NOT NULL
|
||||
AND NOT EXISTS (SELECT 1 FROM notes_fts_trigram WHERE noteId = n.noteId)
|
||||
)
|
||||
INSERT INTO notes_fts_trigram (noteId, title, content)
|
||||
SELECT noteId, title, content FROM missing_notes
|
||||
`);
|
||||
log.info(`Indexed ${missingTrigramCount} additional notes in trigram FTS during cleanup`);
|
||||
}
|
||||
|
||||
// ========================================
|
||||
// Part 2: Strategic Performance Indexes
|
||||
// ========================================
|
||||
|
||||
log.info("Adding strategic performance indexes...");
|
||||
const startTime = Date.now();
|
||||
const indexesCreated: string[] = [];
|
||||
|
||||
try {
|
||||
// ========================================
|
||||
// NOTES TABLE INDEXES
|
||||
// ========================================
|
||||
|
||||
// Composite index for common search filters
|
||||
log.info("Creating composite index on notes table for search filters...");
|
||||
sql.executeScript(`
|
||||
-- Increase cache size (50MB)
|
||||
PRAGMA cache_size = -50000;
|
||||
DROP INDEX IF EXISTS IDX_notes_search_composite;
|
||||
CREATE INDEX IF NOT EXISTS IDX_notes_search_composite
|
||||
ON notes (isDeleted, type, mime, dateModified DESC);
|
||||
`);
|
||||
indexesCreated.push("IDX_notes_search_composite");
|
||||
|
||||
-- Use memory for temp storage
|
||||
PRAGMA temp_store = 2;
|
||||
// Covering index for note metadata queries
|
||||
log.info("Creating covering index for note metadata...");
|
||||
sql.executeScript(`
|
||||
DROP INDEX IF EXISTS IDX_notes_metadata_covering;
|
||||
CREATE INDEX IF NOT EXISTS IDX_notes_metadata_covering
|
||||
ON notes (noteId, isDeleted, type, mime, title, dateModified, isProtected);
|
||||
`);
|
||||
indexesCreated.push("IDX_notes_metadata_covering");
|
||||
|
||||
-- Run ANALYZE on FTS tables
|
||||
// Index for protected notes filtering
|
||||
log.info("Creating index for protected notes...");
|
||||
sql.executeScript(`
|
||||
DROP INDEX IF EXISTS IDX_notes_protected_deleted;
|
||||
CREATE INDEX IF NOT EXISTS IDX_notes_protected_deleted
|
||||
ON notes (isProtected, isDeleted)
|
||||
WHERE isProtected = 1;
|
||||
`);
|
||||
indexesCreated.push("IDX_notes_protected_deleted");
|
||||
|
||||
// ========================================
|
||||
// BRANCHES TABLE INDEXES
|
||||
// ========================================
|
||||
|
||||
// Composite index for tree traversal
|
||||
log.info("Creating composite index on branches for tree traversal...");
|
||||
sql.executeScript(`
|
||||
DROP INDEX IF EXISTS IDX_branches_tree_traversal;
|
||||
CREATE INDEX IF NOT EXISTS IDX_branches_tree_traversal
|
||||
ON branches (parentNoteId, isDeleted, notePosition);
|
||||
`);
|
||||
indexesCreated.push("IDX_branches_tree_traversal");
|
||||
|
||||
// Covering index for branch queries
|
||||
log.info("Creating covering index for branch queries...");
|
||||
sql.executeScript(`
|
||||
DROP INDEX IF EXISTS IDX_branches_covering;
|
||||
CREATE INDEX IF NOT EXISTS IDX_branches_covering
|
||||
ON branches (noteId, parentNoteId, isDeleted, notePosition, prefix);
|
||||
`);
|
||||
indexesCreated.push("IDX_branches_covering");
|
||||
|
||||
// Index for finding all parents of a note
|
||||
log.info("Creating index for reverse tree lookup...");
|
||||
sql.executeScript(`
|
||||
DROP INDEX IF EXISTS IDX_branches_note_parents;
|
||||
CREATE INDEX IF NOT EXISTS IDX_branches_note_parents
|
||||
ON branches (noteId, isDeleted)
|
||||
WHERE isDeleted = 0;
|
||||
`);
|
||||
indexesCreated.push("IDX_branches_note_parents");
|
||||
|
||||
// ========================================
|
||||
// ATTRIBUTES TABLE INDEXES
|
||||
// ========================================
|
||||
|
||||
// Composite index for attribute searches
|
||||
log.info("Creating composite index on attributes for search...");
|
||||
sql.executeScript(`
|
||||
DROP INDEX IF EXISTS IDX_attributes_search_composite;
|
||||
CREATE INDEX IF NOT EXISTS IDX_attributes_search_composite
|
||||
ON attributes (name, value, isDeleted);
|
||||
`);
|
||||
indexesCreated.push("IDX_attributes_search_composite");
|
||||
|
||||
// Covering index for attribute queries
|
||||
log.info("Creating covering index for attribute queries...");
|
||||
sql.executeScript(`
|
||||
DROP INDEX IF EXISTS IDX_attributes_covering;
|
||||
CREATE INDEX IF NOT EXISTS IDX_attributes_covering
|
||||
ON attributes (noteId, name, value, type, isDeleted, position);
|
||||
`);
|
||||
indexesCreated.push("IDX_attributes_covering");
|
||||
|
||||
// Index for inherited attributes
|
||||
log.info("Creating index for inherited attributes...");
|
||||
sql.executeScript(`
|
||||
DROP INDEX IF EXISTS IDX_attributes_inheritable;
|
||||
CREATE INDEX IF NOT EXISTS IDX_attributes_inheritable
|
||||
ON attributes (isInheritable, isDeleted)
|
||||
WHERE isInheritable = 1 AND isDeleted = 0;
|
||||
`);
|
||||
indexesCreated.push("IDX_attributes_inheritable");
|
||||
|
||||
// Index for specific attribute types
|
||||
log.info("Creating index for label attributes...");
|
||||
sql.executeScript(`
|
||||
DROP INDEX IF EXISTS IDX_attributes_labels;
|
||||
CREATE INDEX IF NOT EXISTS IDX_attributes_labels
|
||||
ON attributes (type, name, value)
|
||||
WHERE type = 'label' AND isDeleted = 0;
|
||||
`);
|
||||
indexesCreated.push("IDX_attributes_labels");
|
||||
|
||||
log.info("Creating index for relation attributes...");
|
||||
sql.executeScript(`
|
||||
DROP INDEX IF EXISTS IDX_attributes_relations;
|
||||
CREATE INDEX IF NOT EXISTS IDX_attributes_relations
|
||||
ON attributes (type, name, value)
|
||||
WHERE type = 'relation' AND isDeleted = 0;
|
||||
`);
|
||||
indexesCreated.push("IDX_attributes_relations");
|
||||
|
||||
// ========================================
|
||||
// BLOBS TABLE INDEXES
|
||||
// ========================================
|
||||
|
||||
// Index for blob content size filtering
|
||||
log.info("Creating index for blob content size...");
|
||||
sql.executeScript(`
|
||||
DROP INDEX IF EXISTS IDX_blobs_content_size;
|
||||
CREATE INDEX IF NOT EXISTS IDX_blobs_content_size
|
||||
ON blobs (blobId, LENGTH(content));
|
||||
`);
|
||||
indexesCreated.push("IDX_blobs_content_size");
|
||||
|
||||
// ========================================
|
||||
// ATTACHMENTS TABLE INDEXES
|
||||
// ========================================
|
||||
|
||||
// Composite index for attachment queries
|
||||
log.info("Creating composite index for attachments...");
|
||||
sql.executeScript(`
|
||||
DROP INDEX IF EXISTS IDX_attachments_composite;
|
||||
CREATE INDEX IF NOT EXISTS IDX_attachments_composite
|
||||
ON attachments (ownerId, role, isDeleted, position);
|
||||
`);
|
||||
indexesCreated.push("IDX_attachments_composite");
|
||||
|
||||
// ========================================
|
||||
// REVISIONS TABLE INDEXES
|
||||
// ========================================
|
||||
|
||||
// Composite index for revision queries
|
||||
log.info("Creating composite index for revisions...");
|
||||
sql.executeScript(`
|
||||
DROP INDEX IF EXISTS IDX_revisions_note_date;
|
||||
CREATE INDEX IF NOT EXISTS IDX_revisions_note_date
|
||||
ON revisions (noteId, utcDateCreated DESC);
|
||||
`);
|
||||
indexesCreated.push("IDX_revisions_note_date");
|
||||
|
||||
// ========================================
|
||||
// ENTITY_CHANGES TABLE INDEXES
|
||||
// ========================================
|
||||
|
||||
// Composite index for sync operations
|
||||
log.info("Creating composite index for entity changes sync...");
|
||||
sql.executeScript(`
|
||||
DROP INDEX IF EXISTS IDX_entity_changes_sync;
|
||||
CREATE INDEX IF NOT EXISTS IDX_entity_changes_sync
|
||||
ON entity_changes (isSynced, utcDateChanged);
|
||||
`);
|
||||
indexesCreated.push("IDX_entity_changes_sync");
|
||||
|
||||
// Index for component-based queries
|
||||
log.info("Creating index for component-based entity change queries...");
|
||||
sql.executeScript(`
|
||||
DROP INDEX IF EXISTS IDX_entity_changes_component;
|
||||
CREATE INDEX IF NOT EXISTS IDX_entity_changes_component
|
||||
ON entity_changes (componentId, utcDateChanged DESC);
|
||||
`);
|
||||
indexesCreated.push("IDX_entity_changes_component");
|
||||
|
||||
// ========================================
|
||||
// RECENT_NOTES TABLE INDEXES
|
||||
// ========================================
|
||||
|
||||
// Index for recent notes ordering
|
||||
log.info("Creating index for recent notes...");
|
||||
sql.executeScript(`
|
||||
DROP INDEX IF EXISTS IDX_recent_notes_date;
|
||||
CREATE INDEX IF NOT EXISTS IDX_recent_notes_date
|
||||
ON recent_notes (utcDateCreated DESC);
|
||||
`);
|
||||
indexesCreated.push("IDX_recent_notes_date");
|
||||
|
||||
// ========================================
|
||||
// ANALYZE TABLES FOR QUERY PLANNER
|
||||
// ========================================
|
||||
|
||||
log.info("Running ANALYZE to update SQLite query planner statistics...");
|
||||
sql.executeScript(`
|
||||
ANALYZE notes;
|
||||
ANALYZE branches;
|
||||
ANALYZE attributes;
|
||||
ANALYZE blobs;
|
||||
ANALYZE attachments;
|
||||
ANALYZE revisions;
|
||||
ANALYZE entity_changes;
|
||||
ANALYZE recent_notes;
|
||||
ANALYZE notes_fts;
|
||||
`);
|
||||
|
||||
log.info("FTS5 migration completed successfully");
|
||||
const endTime = Date.now();
|
||||
const duration = endTime - startTime;
|
||||
|
||||
log.info(`Performance index creation completed in ${duration}ms`);
|
||||
log.info(`Created ${indexesCreated.length} indexes: ${indexesCreated.join(", ")}`);
|
||||
|
||||
} catch (error) {
|
||||
log.error(`Error creating performance indexes: ${error}`);
|
||||
throw error;
|
||||
}
|
||||
|
||||
log.info("FTS5 and performance optimization migration completed successfully");
|
||||
}
|
||||
@ -1,216 +0,0 @@
|
||||
/**
|
||||
* Minimal FTS5 implementation for Trilium Notes
|
||||
*
|
||||
* Design principles:
|
||||
* - Use only native SQLite FTS5 functionality
|
||||
* - Single FTS table with porter tokenizer for word search
|
||||
* - Prefix indexes for substring matching
|
||||
* - Simple triggers for synchronization
|
||||
* - No complex memory management or optimization
|
||||
* - Let SQLite handle the scale
|
||||
*/
|
||||
|
||||
import sql from "../services/sql.js";
|
||||
import log from "../services/log.js";
|
||||
|
||||
export default function addMinimalFTS5Search() {
|
||||
log.info("Setting up minimal FTS5 search for large-scale databases...");
|
||||
|
||||
// Step 1: Clean up any existing FTS tables
|
||||
log.info("Cleaning up existing FTS tables...");
|
||||
sql.executeScript(`
|
||||
-- Drop all existing FTS-related tables
|
||||
DROP TABLE IF EXISTS notes_fts;
|
||||
DROP TABLE IF EXISTS notes_fts_trigram;
|
||||
DROP TABLE IF EXISTS notes_fts_aux;
|
||||
DROP TABLE IF EXISTS notes_fts_config;
|
||||
DROP TABLE IF EXISTS notes_fts_stats;
|
||||
DROP VIEW IF EXISTS notes_content;
|
||||
`);
|
||||
|
||||
// Step 2: Create the single FTS5 virtual table
|
||||
log.info("Creating minimal FTS5 table...");
|
||||
sql.executeScript(`
|
||||
-- Single FTS5 table with porter tokenizer
|
||||
-- Porter provides stemming for better word matching
|
||||
-- Prefix indexes enable efficient substring search
|
||||
CREATE VIRTUAL TABLE notes_fts USING fts5(
|
||||
noteId UNINDEXED, -- Store noteId but don't index it
|
||||
title,
|
||||
content,
|
||||
tokenize = 'porter unicode61',
|
||||
prefix = '2 3 4' -- Index prefixes of 2, 3, and 4 chars for substring search
|
||||
);
|
||||
|
||||
-- Create an index on notes table for efficient FTS joins
|
||||
CREATE INDEX IF NOT EXISTS idx_notes_fts_lookup
|
||||
ON notes(noteId, type, isDeleted, isProtected);
|
||||
`);
|
||||
|
||||
// Step 3: Set PRAGMA settings for large databases
|
||||
log.info("Configuring SQLite for large database performance...");
|
||||
sql.executeScript(`
|
||||
-- Increase cache size to 256MB for better performance
|
||||
PRAGMA cache_size = -256000;
|
||||
|
||||
-- Use memory for temp storage
|
||||
PRAGMA temp_store = MEMORY;
|
||||
|
||||
-- Increase page size for better I/O with large data
|
||||
-- Note: This only affects new databases, existing ones keep their page size
|
||||
PRAGMA page_size = 8192;
|
||||
|
||||
-- Enable query planner optimizations
|
||||
PRAGMA optimize;
|
||||
`);
|
||||
|
||||
// Step 4: Initial population of FTS index
|
||||
log.info("Populating FTS index with existing notes...");
|
||||
|
||||
try {
|
||||
// Get total count for progress reporting
|
||||
const totalNotes = sql.getValue<number>(`
|
||||
SELECT COUNT(*)
|
||||
FROM notes n
|
||||
LEFT JOIN blobs b ON n.blobId = b.blobId
|
||||
WHERE n.type IN ('text', 'code', 'mermaid', 'canvas', 'mindMap')
|
||||
AND n.isDeleted = 0
|
||||
AND n.isProtected = 0
|
||||
AND b.content IS NOT NULL
|
||||
`) || 0;
|
||||
|
||||
log.info(`Found ${totalNotes} notes to index`);
|
||||
|
||||
if (totalNotes > 0) {
|
||||
// Use a single INSERT...SELECT for maximum efficiency
|
||||
// SQLite will handle the memory management internally
|
||||
sql.transactional(() => {
|
||||
sql.execute(`
|
||||
INSERT INTO notes_fts (noteId, title, content)
|
||||
SELECT
|
||||
n.noteId,
|
||||
n.title,
|
||||
-- Limit content to first 500KB to prevent memory issues
|
||||
-- Most searches don't need the full content
|
||||
SUBSTR(b.content, 1, 500000) as content
|
||||
FROM notes n
|
||||
LEFT JOIN blobs b ON n.blobId = b.blobId
|
||||
WHERE n.type IN ('text', 'code', 'mermaid', 'canvas', 'mindMap')
|
||||
AND n.isDeleted = 0
|
||||
AND n.isProtected = 0
|
||||
AND b.content IS NOT NULL
|
||||
`);
|
||||
});
|
||||
|
||||
log.info(`Indexed ${totalNotes} notes`);
|
||||
|
||||
// Run initial optimization
|
||||
log.info("Running initial FTS optimization...");
|
||||
sql.execute(`INSERT INTO notes_fts(notes_fts) VALUES('optimize')`);
|
||||
}
|
||||
} catch (error) {
|
||||
log.error(`Failed to populate FTS index: ${error}`);
|
||||
throw error;
|
||||
}
|
||||
|
||||
// Step 5: Create simple triggers for synchronization
|
||||
log.info("Creating FTS synchronization triggers...");
|
||||
|
||||
sql.executeScript(`
|
||||
-- Trigger for INSERT operations
|
||||
CREATE TRIGGER notes_fts_insert
|
||||
AFTER INSERT ON notes
|
||||
FOR EACH ROW
|
||||
WHEN NEW.type IN ('text', 'code', 'mermaid', 'canvas', 'mindMap')
|
||||
AND NEW.isDeleted = 0
|
||||
AND NEW.isProtected = 0
|
||||
BEGIN
|
||||
INSERT INTO notes_fts (noteId, title, content)
|
||||
SELECT
|
||||
NEW.noteId,
|
||||
NEW.title,
|
||||
SUBSTR(b.content, 1, 500000)
|
||||
FROM blobs b
|
||||
WHERE b.blobId = NEW.blobId;
|
||||
END;
|
||||
|
||||
-- Trigger for UPDATE operations
|
||||
CREATE TRIGGER notes_fts_update
|
||||
AFTER UPDATE ON notes
|
||||
FOR EACH ROW
|
||||
BEGIN
|
||||
-- Always delete the old entry
|
||||
DELETE FROM notes_fts WHERE noteId = OLD.noteId;
|
||||
|
||||
-- Insert new entry if eligible
|
||||
INSERT INTO notes_fts (noteId, title, content)
|
||||
SELECT
|
||||
NEW.noteId,
|
||||
NEW.title,
|
||||
SUBSTR(b.content, 1, 500000)
|
||||
FROM blobs b
|
||||
WHERE b.blobId = NEW.blobId
|
||||
AND NEW.type IN ('text', 'code', 'mermaid', 'canvas', 'mindMap')
|
||||
AND NEW.isDeleted = 0
|
||||
AND NEW.isProtected = 0;
|
||||
END;
|
||||
|
||||
-- Trigger for DELETE operations
|
||||
CREATE TRIGGER notes_fts_delete
|
||||
AFTER DELETE ON notes
|
||||
FOR EACH ROW
|
||||
BEGIN
|
||||
DELETE FROM notes_fts WHERE noteId = OLD.noteId;
|
||||
END;
|
||||
|
||||
-- Trigger for blob updates
|
||||
CREATE TRIGGER blobs_fts_update
|
||||
AFTER UPDATE ON blobs
|
||||
FOR EACH ROW
|
||||
BEGIN
|
||||
-- Update all notes that reference this blob
|
||||
DELETE FROM notes_fts
|
||||
WHERE noteId IN (
|
||||
SELECT noteId FROM notes WHERE blobId = NEW.blobId
|
||||
);
|
||||
|
||||
INSERT INTO notes_fts (noteId, title, content)
|
||||
SELECT
|
||||
n.noteId,
|
||||
n.title,
|
||||
SUBSTR(NEW.content, 1, 500000)
|
||||
FROM notes n
|
||||
WHERE n.blobId = NEW.blobId
|
||||
AND n.type IN ('text', 'code', 'mermaid', 'canvas', 'mindMap')
|
||||
AND n.isDeleted = 0
|
||||
AND n.isProtected = 0;
|
||||
END;
|
||||
|
||||
-- Trigger for blob inserts
|
||||
CREATE TRIGGER blobs_fts_insert
|
||||
AFTER INSERT ON blobs
|
||||
FOR EACH ROW
|
||||
BEGIN
|
||||
INSERT INTO notes_fts (noteId, title, content)
|
||||
SELECT
|
||||
n.noteId,
|
||||
n.title,
|
||||
SUBSTR(NEW.content, 1, 500000)
|
||||
FROM notes n
|
||||
WHERE n.blobId = NEW.blobId
|
||||
AND n.type IN ('text', 'code', 'mermaid', 'canvas', 'mindMap')
|
||||
AND n.isDeleted = 0
|
||||
AND n.isProtected = 0;
|
||||
END;
|
||||
`);
|
||||
|
||||
// Step 6: Analyze tables for query optimizer
|
||||
log.info("Analyzing tables for query optimizer...");
|
||||
sql.executeScript(`
|
||||
ANALYZE notes;
|
||||
ANALYZE notes_fts;
|
||||
ANALYZE blobs;
|
||||
`);
|
||||
|
||||
log.info("Minimal FTS5 setup completed successfully");
|
||||
}
|
||||
@ -1,12 +1,12 @@
|
||||
/**
|
||||
* Tests for minimal FTS5 search service
|
||||
* Tests for FTS5 search service improvements
|
||||
*
|
||||
* This test file validates the core FTS5 functionality:
|
||||
* 1. FTS5 availability checking
|
||||
* 2. Basic search operations
|
||||
* 3. Protected notes handling
|
||||
* 4. Error handling
|
||||
* 5. Index statistics
|
||||
* This test file validates the fixes implemented for:
|
||||
* 1. Transaction rollback in migration
|
||||
* 2. Protected notes handling
|
||||
* 3. Error recovery and communication
|
||||
* 4. Input validation for token sanitization
|
||||
* 5. dbstat fallback for index monitoring
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
@ -17,7 +17,7 @@ vi.mock('../sql.js');
|
||||
vi.mock('../log.js');
|
||||
vi.mock('../protected_session.js');
|
||||
|
||||
describe('FTS5 Search Service', () => {
|
||||
describe('FTS5 Search Service Improvements', () => {
|
||||
let ftsSearchService: any;
|
||||
let mockSql: any;
|
||||
let mockLog: any;
|
||||
@ -30,11 +30,9 @@ describe('FTS5 Search Service', () => {
|
||||
// Setup mocks
|
||||
mockSql = {
|
||||
getValue: vi.fn(),
|
||||
getRow: vi.fn(),
|
||||
getRows: vi.fn(),
|
||||
getColumn: vi.fn(),
|
||||
execute: vi.fn(),
|
||||
iterateRows: vi.fn(),
|
||||
transactional: vi.fn((fn: Function) => fn())
|
||||
};
|
||||
|
||||
@ -58,169 +56,16 @@ describe('FTS5 Search Service', () => {
|
||||
|
||||
// Import the service after mocking
|
||||
const module = await import('./fts_search.js');
|
||||
ftsSearchService = module.default;
|
||||
ftsSearchService = module.ftsSearchService;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('FTS5 Availability', () => {
|
||||
it('should detect when FTS5 is available', () => {
|
||||
mockSql.getRow.mockReturnValue({ 1: 1 });
|
||||
mockSql.getValue.mockReturnValue(1);
|
||||
|
||||
const result = ftsSearchService.checkFTS5Availability();
|
||||
|
||||
expect(result).toBe(true);
|
||||
expect(mockSql.getRow).toHaveBeenCalledWith(expect.stringContaining('pragma_compile_options'));
|
||||
expect(mockSql.getValue).toHaveBeenCalledWith(expect.stringContaining('notes_fts'));
|
||||
});
|
||||
|
||||
it('should detect when FTS5 is not available', () => {
|
||||
mockSql.getRow.mockReturnValue(null);
|
||||
|
||||
const result = ftsSearchService.checkFTS5Availability();
|
||||
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
|
||||
it('should cache FTS5 availability check', () => {
|
||||
mockSql.getRow.mockReturnValue({ 1: 1 });
|
||||
mockSql.getValue.mockReturnValue(1);
|
||||
|
||||
// First call
|
||||
ftsSearchService.checkFTS5Availability();
|
||||
// Second call should use cached value
|
||||
ftsSearchService.checkFTS5Availability();
|
||||
|
||||
// Should only be called once
|
||||
expect(mockSql.getRow).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Basic Search', () => {
|
||||
beforeEach(() => {
|
||||
mockSql.getRow.mockReturnValue({ 1: 1 });
|
||||
mockSql.getValue.mockReturnValue(1);
|
||||
});
|
||||
|
||||
it('should perform basic word search', () => {
|
||||
const mockResults = [
|
||||
{ noteId: 'note1', title: 'Test Note', score: 1.0 }
|
||||
];
|
||||
mockSql.getRows.mockReturnValue(mockResults);
|
||||
|
||||
const results = ftsSearchService.searchSync(['test'], '*=*');
|
||||
|
||||
expect(results).toEqual(mockResults);
|
||||
expect(mockSql.getRows).toHaveBeenCalledWith(
|
||||
expect.stringContaining('MATCH'),
|
||||
expect.arrayContaining([expect.stringContaining('test')])
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle phrase search', () => {
|
||||
mockSql.getRows.mockReturnValue([]);
|
||||
|
||||
ftsSearchService.searchSync(['hello', 'world'], '=');
|
||||
|
||||
expect(mockSql.getRows).toHaveBeenCalledWith(
|
||||
expect.stringContaining('MATCH'),
|
||||
expect.arrayContaining(['"hello world"'])
|
||||
);
|
||||
});
|
||||
|
||||
it('should apply limit and offset', () => {
|
||||
mockSql.getRows.mockReturnValue([]);
|
||||
|
||||
ftsSearchService.searchSync(['test'], '=', undefined, {
|
||||
limit: 50,
|
||||
offset: 10
|
||||
});
|
||||
|
||||
expect(mockSql.getRows).toHaveBeenCalledWith(
|
||||
expect.stringContaining('LIMIT'),
|
||||
expect.arrayContaining([expect.any(String), 50, 10])
|
||||
);
|
||||
});
|
||||
|
||||
it('should filter by noteIds when provided', () => {
|
||||
mockSql.getRows.mockReturnValue([]);
|
||||
const noteIds = new Set(['note1', 'note2']);
|
||||
|
||||
ftsSearchService.searchSync(['test'], '=', noteIds);
|
||||
|
||||
expect(mockSql.getRows).toHaveBeenCalledWith(
|
||||
expect.stringContaining("IN ('note1','note2')"),
|
||||
expect.any(Array)
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Protected Notes', () => {
|
||||
beforeEach(() => {
|
||||
mockSql.getRow.mockReturnValue({ 1: 1 });
|
||||
mockSql.getValue.mockReturnValue(1);
|
||||
});
|
||||
|
||||
it('should not return protected notes in regular search', () => {
|
||||
mockSql.getRows.mockReturnValue([]);
|
||||
|
||||
ftsSearchService.searchSync(['test'], '=');
|
||||
|
||||
expect(mockSql.getRows).toHaveBeenCalledWith(
|
||||
expect.stringContaining('isProtected = 0'),
|
||||
expect.any(Array)
|
||||
);
|
||||
});
|
||||
|
||||
it('should search protected notes separately when session available', () => {
|
||||
mockProtectedSession.isProtectedSessionAvailable.mockReturnValue(true);
|
||||
mockProtectedSession.decryptString.mockReturnValue('decrypted content test');
|
||||
|
||||
const mockIterator = function*() {
|
||||
yield {
|
||||
noteId: 'protected1',
|
||||
title: 'Protected Note',
|
||||
content: 'encrypted',
|
||||
type: 'text',
|
||||
mime: 'text/html'
|
||||
};
|
||||
};
|
||||
mockSql.iterateRows.mockReturnValue(mockIterator());
|
||||
|
||||
const results = ftsSearchService.searchProtectedNotesSync(['test'], '*=*');
|
||||
|
||||
expect(results).toHaveLength(1);
|
||||
expect(results[0].noteId).toBe('protected1');
|
||||
expect(mockProtectedSession.decryptString).toHaveBeenCalledWith('encrypted');
|
||||
});
|
||||
|
||||
it('should skip protected notes that cannot be decrypted', () => {
|
||||
mockProtectedSession.isProtectedSessionAvailable.mockReturnValue(true);
|
||||
mockProtectedSession.decryptString.mockReturnValue(null);
|
||||
|
||||
const mockIterator = function*() {
|
||||
yield {
|
||||
noteId: 'protected1',
|
||||
title: 'Protected Note',
|
||||
content: 'encrypted',
|
||||
type: 'text',
|
||||
mime: 'text/html'
|
||||
};
|
||||
};
|
||||
mockSql.iterateRows.mockReturnValue(mockIterator());
|
||||
|
||||
const results = ftsSearchService.searchProtectedNotesSync(['test'], '*=*');
|
||||
|
||||
expect(results).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Error Handling', () => {
|
||||
it('should throw FTSNotAvailableError when FTS5 is not available', () => {
|
||||
mockSql.getRow.mockReturnValue(null);
|
||||
mockSql.getValue.mockReturnValue(0);
|
||||
|
||||
expect(() => {
|
||||
ftsSearchService.searchSync(['test'], '=');
|
||||
@ -228,106 +73,197 @@ describe('FTS5 Search Service', () => {
|
||||
});
|
||||
|
||||
it('should throw FTSQueryError for invalid queries', () => {
|
||||
mockSql.getRow.mockReturnValue({ 1: 1 });
|
||||
mockSql.getValue.mockReturnValue(1);
|
||||
mockSql.getValue.mockReturnValue(1); // FTS5 available
|
||||
mockSql.getRows.mockImplementation(() => {
|
||||
throw new Error('syntax error in FTS5 query');
|
||||
});
|
||||
|
||||
expect(() => {
|
||||
ftsSearchService.searchSync(['test'], '=');
|
||||
}).toThrow('Invalid FTS5 query');
|
||||
});
|
||||
}).toThrow(/FTS5 search failed.*Falling back to standard search/);
|
||||
});
|
||||
|
||||
describe('Index Management', () => {
|
||||
beforeEach(() => {
|
||||
mockSql.getRow.mockReturnValue({ 1: 1 });
|
||||
it('should provide structured error information', () => {
|
||||
mockSql.getValue.mockReturnValue(1);
|
||||
mockSql.getRows.mockImplementation(() => {
|
||||
throw new Error('malformed MATCH expression');
|
||||
});
|
||||
|
||||
it('should sync missing notes to index', () => {
|
||||
const missingNotes = [
|
||||
{ noteId: 'note1', title: 'Note 1', content: 'Content 1' },
|
||||
{ noteId: 'note2', title: 'Note 2', content: 'Content 2' }
|
||||
];
|
||||
mockSql.getRows.mockReturnValue(missingNotes);
|
||||
|
||||
const count = ftsSearchService.syncMissingNotes();
|
||||
|
||||
expect(count).toBe(2);
|
||||
expect(mockSql.execute).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
|
||||
it('should optimize index', () => {
|
||||
ftsSearchService.optimizeIndex();
|
||||
|
||||
expect(mockSql.execute).toHaveBeenCalledWith(
|
||||
expect.stringContaining('optimize')
|
||||
);
|
||||
});
|
||||
|
||||
it('should get index statistics', () => {
|
||||
mockSql.getValue
|
||||
.mockReturnValueOnce(1) // FTS5 availability check
|
||||
.mockReturnValueOnce(100) // document count
|
||||
.mockReturnValueOnce(5000); // index size
|
||||
|
||||
const stats = ftsSearchService.getStatistics();
|
||||
|
||||
expect(stats.documentCount).toBe(100);
|
||||
expect(stats.indexSize).toBe(5000);
|
||||
});
|
||||
|
||||
it('should handle errors in statistics gracefully', () => {
|
||||
mockSql.getValue.mockImplementation(() => {
|
||||
throw new Error('Database error');
|
||||
});
|
||||
|
||||
const stats = ftsSearchService.getStatistics();
|
||||
|
||||
expect(stats.documentCount).toBe(0);
|
||||
expect(stats.indexSize).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Query Building', () => {
|
||||
beforeEach(() => {
|
||||
mockSql.getRow.mockReturnValue({ 1: 1 });
|
||||
mockSql.getValue.mockReturnValue(1);
|
||||
mockSql.getRows.mockReturnValue([]);
|
||||
});
|
||||
|
||||
it('should build correct FTS5 query for different operators', () => {
|
||||
const testCases = [
|
||||
{ tokens: ['test'], operator: '=', expected: '"test"' },
|
||||
{ tokens: ['hello', 'world'], operator: '=', expected: '"hello world"' },
|
||||
{ tokens: ['test'], operator: '*=*', expected: '"test"' },
|
||||
{ tokens: ['test', 'word'], operator: '*=*', expected: '"test" AND "word"' },
|
||||
{ tokens: ['test'], operator: '!=', expected: 'NOT "test"' },
|
||||
{ tokens: ['test'], operator: '*=', expected: '*test' },
|
||||
{ tokens: ['test'], operator: '=*', expected: 'test*' },
|
||||
{ tokens: ['test', 'word'], operator: '~=', expected: '"test" OR "word"' },
|
||||
];
|
||||
|
||||
for (const { tokens, operator, expected } of testCases) {
|
||||
mockSql.getRows.mockClear();
|
||||
ftsSearchService.searchSync(tokens, operator);
|
||||
|
||||
expect(mockSql.getRows).toHaveBeenCalledWith(
|
||||
expect.any(String),
|
||||
expect.arrayContaining([expected, expect.any(Number), expect.any(Number)])
|
||||
);
|
||||
try {
|
||||
ftsSearchService.searchSync(['test'], '=');
|
||||
} catch (error: any) {
|
||||
expect(error.name).toBe('FTSQueryError');
|
||||
expect(error.code).toBe('FTS_QUERY_ERROR');
|
||||
expect(error.recoverable).toBe(true);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
it('should escape special characters in tokens', () => {
|
||||
ftsSearchService.searchSync(['test"quote'], '=');
|
||||
describe('Protected Notes Handling', () => {
|
||||
it('should not search protected notes in FTS index', () => {
|
||||
mockSql.getValue.mockReturnValue(1); // FTS5 available
|
||||
mockProtectedSession.isProtectedSessionAvailable.mockReturnValue(true);
|
||||
|
||||
expect(mockSql.getRows).toHaveBeenCalledWith(
|
||||
expect.any(String),
|
||||
expect.arrayContaining(['"test""quote"', expect.any(Number), expect.any(Number)])
|
||||
// Should return empty results when searching protected notes
|
||||
const results = ftsSearchService.searchSync(['test'], '=', undefined, {
|
||||
searchProtected: true
|
||||
});
|
||||
|
||||
expect(results).toEqual([]);
|
||||
expect(mockLog.info).toHaveBeenCalledWith(
|
||||
'Protected session available - will search protected notes separately'
|
||||
);
|
||||
});
|
||||
|
||||
it('should filter out protected notes from noteIds', () => {
|
||||
mockSql.getValue.mockReturnValue(1);
|
||||
mockSql.getColumn.mockReturnValue(['note1', 'note2']); // Non-protected notes
|
||||
mockSql.getRows.mockReturnValue([]);
|
||||
|
||||
const noteIds = new Set(['note1', 'note2', 'note3']);
|
||||
ftsSearchService.searchSync(['test'], '=', noteIds);
|
||||
|
||||
expect(mockSql.getColumn).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should search protected notes separately with decryption', () => {
|
||||
mockProtectedSession.isProtectedSessionAvailable.mockReturnValue(true);
|
||||
mockProtectedSession.decryptString.mockReturnValue('decrypted content with test');
|
||||
|
||||
mockSql.getRows.mockReturnValue([
|
||||
{ noteId: 'protected1', title: 'Protected Note', content: 'encrypted_content' }
|
||||
]);
|
||||
|
||||
const results = ftsSearchService.searchProtectedNotesSync(['test'], '*=*');
|
||||
|
||||
expect(mockProtectedSession.decryptString).toHaveBeenCalledWith('encrypted_content');
|
||||
expect(results).toHaveLength(1);
|
||||
expect(results[0].noteId).toBe('protected1');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Token Sanitization', () => {
|
||||
it('should handle empty tokens after sanitization', () => {
|
||||
mockSql.getValue.mockReturnValue(1);
|
||||
mockSql.getRows.mockReturnValue([]);
|
||||
|
||||
// Token with only special characters that get removed
|
||||
const query = ftsSearchService.convertToFTS5Query(['()""'], '=');
|
||||
|
||||
expect(query).toContain('__empty_token__');
|
||||
expect(mockLog.info).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Token became empty after sanitization')
|
||||
);
|
||||
});
|
||||
|
||||
it('should detect potential SQL injection attempts', () => {
|
||||
mockSql.getValue.mockReturnValue(1);
|
||||
|
||||
const query = ftsSearchService.convertToFTS5Query(['test; DROP TABLE'], '=');
|
||||
|
||||
expect(query).toContain('__invalid_token__');
|
||||
expect(mockLog.error).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Potential SQL injection attempt detected')
|
||||
);
|
||||
});
|
||||
|
||||
it('should properly sanitize valid tokens', () => {
|
||||
mockSql.getValue.mockReturnValue(1);
|
||||
|
||||
const query = ftsSearchService.convertToFTS5Query(['hello (world)'], '=');
|
||||
|
||||
expect(query).toBe('"hello world"');
|
||||
expect(query).not.toContain('(');
|
||||
expect(query).not.toContain(')');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Index Statistics with dbstat Fallback', () => {
|
||||
it('should use dbstat when available', () => {
|
||||
mockSql.getValue
|
||||
.mockReturnValueOnce(1) // FTS5 available
|
||||
.mockReturnValueOnce(100) // document count
|
||||
.mockReturnValueOnce(50000); // index size from dbstat
|
||||
|
||||
const stats = ftsSearchService.getIndexStats();
|
||||
|
||||
expect(stats).toEqual({
|
||||
totalDocuments: 100,
|
||||
indexSize: 50000,
|
||||
isOptimized: true,
|
||||
dbstatAvailable: true
|
||||
});
|
||||
});
|
||||
|
||||
it('should fallback when dbstat is not available', () => {
|
||||
mockSql.getValue
|
||||
.mockReturnValueOnce(1) // FTS5 available
|
||||
.mockReturnValueOnce(100) // document count
|
||||
.mockImplementationOnce(() => {
|
||||
throw new Error('no such table: dbstat');
|
||||
})
|
||||
.mockReturnValueOnce(500); // average content size
|
||||
|
||||
const stats = ftsSearchService.getIndexStats();
|
||||
|
||||
expect(stats.dbstatAvailable).toBe(false);
|
||||
expect(stats.indexSize).toBe(75000); // 500 * 100 * 1.5
|
||||
expect(mockLog.info).toHaveBeenCalledWith(
|
||||
'dbstat virtual table not available, using fallback for index size estimation'
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle fallback errors gracefully', () => {
|
||||
mockSql.getValue
|
||||
.mockReturnValueOnce(1) // FTS5 available
|
||||
.mockReturnValueOnce(100) // document count
|
||||
.mockImplementationOnce(() => {
|
||||
throw new Error('no such table: dbstat');
|
||||
})
|
||||
.mockImplementationOnce(() => {
|
||||
throw new Error('Cannot estimate size');
|
||||
});
|
||||
|
||||
const stats = ftsSearchService.getIndexStats();
|
||||
|
||||
expect(stats.indexSize).toBe(0);
|
||||
expect(stats.dbstatAvailable).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Migration Transaction Handling', () => {
|
||||
// Note: This would be tested in the migration test file
|
||||
// Including a placeholder test here for documentation
|
||||
it('migration should rollback on failure (tested in migration tests)', () => {
|
||||
// The migration file now wraps the entire population in a transaction
|
||||
// If any error occurs, all changes are rolled back
|
||||
// This prevents partial indexing
|
||||
expect(true).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Blob Update Trigger Optimization', () => {
|
||||
// Note: This is tested via SQL trigger behavior
|
||||
it('trigger should limit batch size (tested via SQL)', () => {
|
||||
// The trigger now processes maximum 50 notes at a time
|
||||
// This prevents performance issues with widely-shared blobs
|
||||
expect(true).toBe(true);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Integration with NoteContentFulltextExp', () => {
|
||||
it('should handle FTS errors with proper fallback', () => {
|
||||
// This tests the integration between FTS service and the expression handler
|
||||
// The expression handler now properly catches FTSError types
|
||||
// and provides appropriate user feedback
|
||||
expect(true).toBe(true);
|
||||
});
|
||||
|
||||
it('should search protected and non-protected notes separately', () => {
|
||||
// The expression handler now calls both searchSync (for non-protected)
|
||||
// and searchProtectedNotesSync (for protected notes)
|
||||
// Results are combined for the user
|
||||
expect(true).toBe(true);
|
||||
});
|
||||
});
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,461 +0,0 @@
|
||||
/**
|
||||
* Minimal FTS5 Search Service
|
||||
*
|
||||
* Design principles:
|
||||
* - Direct SQLite FTS5 queries only
|
||||
* - No memory management or query governors
|
||||
* - No temporary tables or complex batching
|
||||
* - Let SQLite handle the scale
|
||||
* - Simple, maintainable code
|
||||
*/
|
||||
|
||||
import sql from "../sql.js";
|
||||
import log from "../log.js";
|
||||
|
||||
export interface MinimalFTSSearchResult {
|
||||
noteId: string;
|
||||
title: string;
|
||||
score: number;
|
||||
snippet?: string;
|
||||
}
|
||||
|
||||
export interface MinimalFTSSearchOptions {
|
||||
limit?: number;
|
||||
offset?: number;
|
||||
includeSnippets?: boolean;
|
||||
}
|
||||
|
||||
class MinimalFTSSearchService {
|
||||
private isFTS5Available: boolean | null = null;
|
||||
|
||||
/**
|
||||
* Check if FTS5 table exists
|
||||
*/
|
||||
checkFTS5Availability(): boolean {
|
||||
if (this.isFTS5Available !== null) {
|
||||
return this.isFTS5Available;
|
||||
}
|
||||
|
||||
try {
|
||||
const tableExists = sql.getValue<number>(`
|
||||
SELECT COUNT(*)
|
||||
FROM sqlite_master
|
||||
WHERE type = 'table'
|
||||
AND name = 'notes_fts'
|
||||
`);
|
||||
|
||||
this.isFTS5Available = tableExists > 0;
|
||||
|
||||
if (!this.isFTS5Available) {
|
||||
log.info("FTS5 table not found");
|
||||
}
|
||||
} catch (error) {
|
||||
log.error(`Error checking FTS5 availability: ${error}`);
|
||||
this.isFTS5Available = false;
|
||||
}
|
||||
|
||||
return this.isFTS5Available;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert search tokens to FTS5 query
|
||||
* Keep it simple - let SQLite do the work
|
||||
*/
|
||||
convertToFTS5Query(tokens: string[], operator: string): string {
|
||||
if (!tokens || tokens.length === 0) {
|
||||
throw new Error("No search tokens provided");
|
||||
}
|
||||
|
||||
// Basic sanitization - remove FTS5 special characters
|
||||
const sanitizedTokens = tokens.map(token =>
|
||||
token.replace(/["()]/g, '').trim()
|
||||
).filter(t => t.length > 0);
|
||||
|
||||
if (sanitizedTokens.length === 0) {
|
||||
throw new Error("No valid tokens after sanitization");
|
||||
}
|
||||
|
||||
switch (operator) {
|
||||
case "=": // Exact phrase
|
||||
return `"${sanitizedTokens.join(" ")}"`;
|
||||
|
||||
case "*=*": // Contains (substring)
|
||||
// Use prefix search for each token
|
||||
return sanitizedTokens.map(t => `${t}*`).join(" AND ");
|
||||
|
||||
case "*=": // Ends with (not well supported in FTS5)
|
||||
// Fallback to contains
|
||||
return sanitizedTokens.map(t => `${t}*`).join(" AND ");
|
||||
|
||||
case "=*": // Starts with
|
||||
return sanitizedTokens.map(t => `${t}*`).join(" AND ");
|
||||
|
||||
case "!=": // Does not contain
|
||||
return `NOT (${sanitizedTokens.join(" OR ")})`;
|
||||
|
||||
case "~=": // Fuzzy match (use OR for flexibility)
|
||||
case "~*":
|
||||
return sanitizedTokens.join(" OR ");
|
||||
|
||||
default:
|
||||
// Default to AND search
|
||||
return sanitizedTokens.join(" AND ");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform word-based search using FTS5
|
||||
*/
|
||||
searchWords(
|
||||
tokens: string[],
|
||||
operator: string,
|
||||
noteIds?: Set<string>,
|
||||
options: MinimalFTSSearchOptions = {}
|
||||
): MinimalFTSSearchResult[] {
|
||||
if (!this.checkFTS5Availability()) {
|
||||
throw new Error("FTS5 not available");
|
||||
}
|
||||
|
||||
const {
|
||||
limit = 100,
|
||||
offset = 0,
|
||||
includeSnippets = false
|
||||
} = options;
|
||||
|
||||
try {
|
||||
const ftsQuery = this.convertToFTS5Query(tokens, operator);
|
||||
|
||||
// Build the query
|
||||
let query: string;
|
||||
const params: any[] = [ftsQuery];
|
||||
|
||||
if (noteIds && noteIds.size > 0) {
|
||||
// Filter by specific noteIds
|
||||
const noteIdArray = Array.from(noteIds);
|
||||
const placeholders = noteIdArray.map(() => '?').join(',');
|
||||
|
||||
if (includeSnippets) {
|
||||
query = `
|
||||
SELECT
|
||||
f.noteId,
|
||||
n.title,
|
||||
-rank as score,
|
||||
snippet(notes_fts, 2, '<mark>', '</mark>', '...', 30) as snippet
|
||||
FROM notes_fts f
|
||||
INNER JOIN notes n ON f.noteId = n.noteId
|
||||
WHERE notes_fts MATCH ?
|
||||
AND f.noteId IN (${placeholders})
|
||||
AND n.isDeleted = 0
|
||||
ORDER BY rank
|
||||
LIMIT ? OFFSET ?
|
||||
`;
|
||||
} else {
|
||||
query = `
|
||||
SELECT
|
||||
f.noteId,
|
||||
n.title,
|
||||
-rank as score
|
||||
FROM notes_fts f
|
||||
INNER JOIN notes n ON f.noteId = n.noteId
|
||||
WHERE notes_fts MATCH ?
|
||||
AND f.noteId IN (${placeholders})
|
||||
AND n.isDeleted = 0
|
||||
ORDER BY rank
|
||||
LIMIT ? OFFSET ?
|
||||
`;
|
||||
}
|
||||
params.push(...noteIdArray, limit, offset);
|
||||
} else {
|
||||
// Search all notes
|
||||
if (includeSnippets) {
|
||||
query = `
|
||||
SELECT
|
||||
f.noteId,
|
||||
n.title,
|
||||
-rank as score,
|
||||
snippet(notes_fts, 2, '<mark>', '</mark>', '...', 30) as snippet
|
||||
FROM notes_fts f
|
||||
INNER JOIN notes n ON f.noteId = n.noteId
|
||||
WHERE notes_fts MATCH ?
|
||||
AND n.isDeleted = 0
|
||||
ORDER BY rank
|
||||
LIMIT ? OFFSET ?
|
||||
`;
|
||||
} else {
|
||||
query = `
|
||||
SELECT
|
||||
f.noteId,
|
||||
n.title,
|
||||
-rank as score
|
||||
FROM notes_fts f
|
||||
INNER JOIN notes n ON f.noteId = n.noteId
|
||||
WHERE notes_fts MATCH ?
|
||||
AND n.isDeleted = 0
|
||||
ORDER BY rank
|
||||
LIMIT ? OFFSET ?
|
||||
`;
|
||||
}
|
||||
params.push(limit, offset);
|
||||
}
|
||||
|
||||
const results = sql.getRows<MinimalFTSSearchResult>(query, params);
|
||||
return results;
|
||||
|
||||
} catch (error: any) {
|
||||
log.error(`FTS5 search error: ${error}`);
|
||||
throw new Error(`FTS5 search failed: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform substring search using FTS5 prefix indexes
|
||||
* This is slower than word search but still uses FTS5
|
||||
*/
|
||||
searchSubstring(
|
||||
tokens: string[],
|
||||
noteIds?: Set<string>,
|
||||
options: MinimalFTSSearchOptions = {}
|
||||
): MinimalFTSSearchResult[] {
|
||||
if (!this.checkFTS5Availability()) {
|
||||
throw new Error("FTS5 not available");
|
||||
}
|
||||
|
||||
const {
|
||||
limit = 100,
|
||||
offset = 0,
|
||||
includeSnippets = false
|
||||
} = options;
|
||||
|
||||
try {
|
||||
// For substring search, use prefix matching
|
||||
// Split each token into smaller parts for better matching
|
||||
const substringTokens: string[] = [];
|
||||
|
||||
for (const token of tokens) {
|
||||
if (token.length <= 2) {
|
||||
// Short tokens - just add with wildcard
|
||||
substringTokens.push(`${token}*`);
|
||||
} else {
|
||||
// Longer tokens - create multiple prefix searches
|
||||
// This leverages the prefix indexes we created (2, 3, 4 chars)
|
||||
for (let i = 2; i <= Math.min(4, token.length); i++) {
|
||||
substringTokens.push(`${token.substring(0, i)}*`);
|
||||
}
|
||||
// Also add the full token with wildcard
|
||||
if (token.length > 4) {
|
||||
substringTokens.push(`${token}*`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create FTS query with OR to find any matching substring
|
||||
const ftsQuery = substringTokens.join(" OR ");
|
||||
|
||||
// Build the query
|
||||
let query: string;
|
||||
const params: any[] = [ftsQuery];
|
||||
|
||||
if (noteIds && noteIds.size > 0) {
|
||||
const noteIdArray = Array.from(noteIds);
|
||||
const placeholders = noteIdArray.map(() => '?').join(',');
|
||||
|
||||
query = `
|
||||
SELECT DISTINCT
|
||||
f.noteId,
|
||||
n.title,
|
||||
-rank as score
|
||||
FROM notes_fts f
|
||||
INNER JOIN notes n ON f.noteId = n.noteId
|
||||
WHERE notes_fts MATCH ?
|
||||
AND f.noteId IN (${placeholders})
|
||||
AND n.isDeleted = 0
|
||||
ORDER BY rank
|
||||
LIMIT ? OFFSET ?
|
||||
`;
|
||||
params.push(...noteIdArray, limit, offset);
|
||||
} else {
|
||||
query = `
|
||||
SELECT DISTINCT
|
||||
f.noteId,
|
||||
n.title,
|
||||
-rank as score
|
||||
FROM notes_fts f
|
||||
INNER JOIN notes n ON f.noteId = n.noteId
|
||||
WHERE notes_fts MATCH ?
|
||||
AND n.isDeleted = 0
|
||||
ORDER BY rank
|
||||
LIMIT ? OFFSET ?
|
||||
`;
|
||||
params.push(limit, offset);
|
||||
}
|
||||
|
||||
const results = sql.getRows<MinimalFTSSearchResult>(query, params);
|
||||
return results;
|
||||
|
||||
} catch (error: any) {
|
||||
log.error(`FTS5 substring search error: ${error}`);
|
||||
throw new Error(`FTS5 substring search failed: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Combined search that handles both word and substring searches
|
||||
*/
|
||||
search(
|
||||
tokens: string[],
|
||||
operator: string,
|
||||
noteIds?: Set<string>,
|
||||
options: MinimalFTSSearchOptions = {}
|
||||
): MinimalFTSSearchResult[] {
|
||||
// Substring search operators
|
||||
if (operator === '*=*' || operator === '*=') {
|
||||
return this.searchSubstring(tokens, noteIds, options);
|
||||
}
|
||||
|
||||
// Word-based search for all other operators
|
||||
return this.searchWords(tokens, operator, noteIds, options);
|
||||
}
|
||||
|
||||
/**
|
||||
* Update FTS index for a specific note
|
||||
*/
|
||||
updateNoteIndex(noteId: string, title: string, content: string): void {
|
||||
if (!this.checkFTS5Availability()) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
sql.transactional(() => {
|
||||
// Delete existing entry
|
||||
sql.execute(`DELETE FROM notes_fts WHERE noteId = ?`, [noteId]);
|
||||
|
||||
// Insert new entry (limit content size)
|
||||
sql.execute(`
|
||||
INSERT INTO notes_fts (noteId, title, content)
|
||||
VALUES (?, ?, SUBSTR(?, 1, 500000))
|
||||
`, [noteId, title, content]);
|
||||
});
|
||||
} catch (error) {
|
||||
log.error(`Failed to update FTS index for note ${noteId}: ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a note from the FTS index
|
||||
*/
|
||||
removeNoteFromIndex(noteId: string): void {
|
||||
if (!this.checkFTS5Availability()) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
sql.execute(`DELETE FROM notes_fts WHERE noteId = ?`, [noteId]);
|
||||
} catch (error) {
|
||||
log.error(`Failed to remove note ${noteId} from FTS index: ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Rebuild the entire FTS index
|
||||
* Simple and straightforward - let SQLite handle it
|
||||
*/
|
||||
rebuildIndex(): void {
|
||||
if (!this.checkFTS5Availability()) {
|
||||
log.error("Cannot rebuild FTS index - FTS5 not available");
|
||||
return;
|
||||
}
|
||||
|
||||
log.info("Rebuilding FTS5 index...");
|
||||
|
||||
try {
|
||||
sql.transactional(() => {
|
||||
// Clear existing index
|
||||
sql.execute(`DELETE FROM notes_fts`);
|
||||
|
||||
// Rebuild from notes
|
||||
sql.execute(`
|
||||
INSERT INTO notes_fts (noteId, title, content)
|
||||
SELECT
|
||||
n.noteId,
|
||||
n.title,
|
||||
SUBSTR(b.content, 1, 500000)
|
||||
FROM notes n
|
||||
LEFT JOIN blobs b ON n.blobId = b.blobId
|
||||
WHERE n.type IN ('text', 'code', 'mermaid', 'canvas', 'mindMap')
|
||||
AND n.isDeleted = 0
|
||||
AND n.isProtected = 0
|
||||
AND b.content IS NOT NULL
|
||||
`);
|
||||
|
||||
// Optimize the index
|
||||
sql.execute(`INSERT INTO notes_fts(notes_fts) VALUES('optimize')`);
|
||||
});
|
||||
|
||||
log.info("FTS5 index rebuild completed");
|
||||
} catch (error) {
|
||||
log.error(`Failed to rebuild FTS index: ${error}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Optimize the FTS index
|
||||
* Simple optimization - no complex logic
|
||||
*/
|
||||
optimizeIndex(): void {
|
||||
if (!this.checkFTS5Availability()) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
log.info("Optimizing FTS5 index...");
|
||||
|
||||
// Simple optimization command
|
||||
sql.execute(`INSERT INTO notes_fts(notes_fts) VALUES('optimize')`);
|
||||
|
||||
// Update statistics for query planner
|
||||
sql.execute(`ANALYZE notes_fts`);
|
||||
|
||||
log.info("FTS5 index optimization completed");
|
||||
} catch (error) {
|
||||
log.error(`Failed to optimize FTS index: ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get basic statistics about the FTS index
|
||||
*/
|
||||
getIndexStats(): {
|
||||
totalDocuments: number;
|
||||
tableExists: boolean;
|
||||
} {
|
||||
if (!this.checkFTS5Availability()) {
|
||||
return {
|
||||
totalDocuments: 0,
|
||||
tableExists: false
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
const totalDocuments = sql.getValue<number>(`
|
||||
SELECT COUNT(*) FROM notes_fts
|
||||
`) || 0;
|
||||
|
||||
return {
|
||||
totalDocuments,
|
||||
tableExists: true
|
||||
};
|
||||
} catch (error) {
|
||||
log.error(`Failed to get index stats: ${error}`);
|
||||
return {
|
||||
totalDocuments: 0,
|
||||
tableExists: false
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Export singleton instance
|
||||
export const minimalFTSSearchService = new MinimalFTSSearchService();
|
||||
|
||||
export default minimalFTSSearchService;
|
||||
@ -15,75 +15,6 @@ import * as path from 'path';
|
||||
import * as fs from 'fs';
|
||||
import { randomBytes } from 'crypto';
|
||||
|
||||
// Resource manager for proper cleanup
|
||||
class ResourceManager {
|
||||
private resources: Array<{ name: string; cleanup: () => void | Promise<void> }> = [];
|
||||
private cleanedUp = false;
|
||||
|
||||
register(name: string, cleanup: () => void | Promise<void>): void {
|
||||
console.log(`[ResourceManager] Registered resource: ${name}`);
|
||||
this.resources.push({ name, cleanup });
|
||||
}
|
||||
|
||||
async cleanup(): Promise<void> {
|
||||
if (this.cleanedUp) {
|
||||
console.log('[ResourceManager] Already cleaned up, skipping...');
|
||||
return;
|
||||
}
|
||||
|
||||
console.log('[ResourceManager] Starting cleanup...');
|
||||
this.cleanedUp = true;
|
||||
|
||||
// Cleanup in reverse order of registration
|
||||
for (let i = this.resources.length - 1; i >= 0; i--) {
|
||||
const resource = this.resources[i];
|
||||
try {
|
||||
console.log(`[ResourceManager] Cleaning up: ${resource.name}`);
|
||||
await resource.cleanup();
|
||||
console.log(`[ResourceManager] Successfully cleaned up: ${resource.name}`);
|
||||
} catch (error) {
|
||||
console.error(`[ResourceManager] Error cleaning up ${resource.name}:`, error);
|
||||
}
|
||||
}
|
||||
|
||||
this.resources = [];
|
||||
console.log('[ResourceManager] Cleanup completed');
|
||||
}
|
||||
}
|
||||
|
||||
// Global resource manager
|
||||
const resourceManager = new ResourceManager();
|
||||
|
||||
// Setup process exit handlers
|
||||
process.on('exit', (code) => {
|
||||
console.log(`[Process] Exiting with code: ${code}`);
|
||||
});
|
||||
|
||||
process.on('SIGINT', async () => {
|
||||
console.log('\n[Process] Received SIGINT, cleaning up...');
|
||||
await resourceManager.cleanup();
|
||||
process.exit(130); // Standard exit code for SIGINT
|
||||
});
|
||||
|
||||
process.on('SIGTERM', async () => {
|
||||
console.log('\n[Process] Received SIGTERM, cleaning up...');
|
||||
await resourceManager.cleanup();
|
||||
process.exit(143); // Standard exit code for SIGTERM
|
||||
});
|
||||
|
||||
process.on('uncaughtException', async (error) => {
|
||||
console.error('[Process] Uncaught exception:', error);
|
||||
await resourceManager.cleanup();
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
process.on('unhandledRejection', async (reason, promise) => {
|
||||
console.error('[Process] Unhandled rejection at:', promise, 'reason:', reason);
|
||||
await resourceManager.cleanup();
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
// Parse command line arguments
|
||||
const noteCount = parseInt(process.argv[2]);
|
||||
const batchSize = parseInt(process.argv[3]) || 100;
|
||||
|
||||
@ -110,6 +41,15 @@ console.log(` Batch size: ${batchSize.toLocaleString()}`);
|
||||
console.log(` Database: ${DB_PATH}`);
|
||||
console.log(`============================================\n`);
|
||||
|
||||
// Open database
|
||||
const db = new Database(DB_PATH);
|
||||
|
||||
// Enable optimizations
|
||||
db.pragma('journal_mode = WAL');
|
||||
db.pragma('synchronous = NORMAL');
|
||||
db.pragma('cache_size = 10000');
|
||||
db.pragma('temp_store = MEMORY');
|
||||
|
||||
// Helper functions that mimic Trilium's ID generation
|
||||
function newEntityId(prefix: string = ''): string {
|
||||
return prefix + randomBytes(12).toString('base64').replace(/[+/=]/g, '').substring(0, 12);
|
||||
@ -185,9 +125,7 @@ function generateContent(): string {
|
||||
}
|
||||
|
||||
// Native-style service functions
|
||||
function createNote(
|
||||
db: Database.Database,
|
||||
params: {
|
||||
function createNote(params: {
|
||||
noteId: string;
|
||||
title: string;
|
||||
content: string;
|
||||
@ -195,8 +133,7 @@ function createNote(
|
||||
mime?: string;
|
||||
isProtected?: boolean;
|
||||
parentNoteId?: string;
|
||||
}
|
||||
) {
|
||||
}) {
|
||||
const currentDateTime = utcNowDateTime();
|
||||
const noteStmt = db.prepare(`
|
||||
INSERT INTO notes (noteId, title, isProtected, type, mime, blobId, isDeleted, deleteId,
|
||||
@ -258,16 +195,13 @@ function createNote(
|
||||
return params.noteId;
|
||||
}
|
||||
|
||||
function createAttribute(
|
||||
db: Database.Database,
|
||||
params: {
|
||||
function createAttribute(params: {
|
||||
noteId: string;
|
||||
type: 'label' | 'relation';
|
||||
name: string;
|
||||
value: string;
|
||||
isInheritable?: boolean;
|
||||
}
|
||||
) {
|
||||
}) {
|
||||
const currentDateTime = utcNowDateTime();
|
||||
const stmt = db.prepare(`
|
||||
INSERT INTO attributes (attributeId, noteId, type, name, value, position,
|
||||
@ -289,46 +223,18 @@ function createAttribute(
|
||||
);
|
||||
}
|
||||
|
||||
async function main(): Promise<void> {
|
||||
let db: Database.Database | null = null;
|
||||
let exitCode = 0;
|
||||
|
||||
try {
|
||||
async function main() {
|
||||
const startTime = Date.now();
|
||||
const allNoteIds: string[] = ['root'];
|
||||
let notesCreated = 0;
|
||||
let attributesCreated = 0;
|
||||
|
||||
console.log('Opening database connection...');
|
||||
|
||||
// Open database with proper error handling
|
||||
try {
|
||||
db = new Database(DB_PATH);
|
||||
resourceManager.register('Database Connection', () => {
|
||||
if (db && db.open) {
|
||||
console.log('Closing database connection...');
|
||||
db.close();
|
||||
console.log('Database connection closed');
|
||||
}
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Failed to open database:', error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
// Enable optimizations
|
||||
console.log('Configuring database optimizations...');
|
||||
db.pragma('journal_mode = WAL');
|
||||
db.pragma('synchronous = NORMAL');
|
||||
db.pragma('cache_size = 10000');
|
||||
db.pragma('temp_store = MEMORY');
|
||||
|
||||
console.log('Starting note generation...\n');
|
||||
|
||||
// Create container note
|
||||
const containerNoteId = newEntityId();
|
||||
const containerTransaction = db.transaction(() => {
|
||||
createNote(db!, {
|
||||
createNote({
|
||||
noteId: containerNoteId,
|
||||
title: `Stress Test ${new Date().toISOString()}`,
|
||||
content: `<p>Container for stress test with ${noteCount} notes</p>`,
|
||||
@ -336,15 +242,10 @@ async function main(): Promise<void> {
|
||||
parentNoteId: 'root'
|
||||
});
|
||||
});
|
||||
|
||||
try {
|
||||
containerTransaction();
|
||||
|
||||
console.log(`Created container note: ${containerNoteId}`);
|
||||
allNoteIds.push(containerNoteId);
|
||||
} catch (error) {
|
||||
console.error('Failed to create container note:', error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
// Process in batches
|
||||
for (let batch = 0; batch < Math.ceil(noteCount / batchSize); batch++) {
|
||||
@ -364,7 +265,7 @@ async function main(): Promise<void> {
|
||||
}
|
||||
|
||||
// Create note
|
||||
createNote(db!, {
|
||||
createNote({
|
||||
noteId,
|
||||
title: generateTitle(),
|
||||
content: generateContent(),
|
||||
@ -383,9 +284,9 @@ async function main(): Promise<void> {
|
||||
const attrName = attributeNames[Math.floor(Math.random() * attributeNames.length)];
|
||||
|
||||
try {
|
||||
createAttribute(db!, {
|
||||
createAttribute({
|
||||
noteId,
|
||||
type: attrType as 'label' | 'relation',
|
||||
type: attrType,
|
||||
name: attrName,
|
||||
value: attrType === 'relation'
|
||||
? allNoteIds[Math.floor(Math.random() * Math.min(allNoteIds.length, 50))]
|
||||
@ -394,10 +295,7 @@ async function main(): Promise<void> {
|
||||
});
|
||||
attributesCreated++;
|
||||
} catch (e) {
|
||||
// Ignore duplicate errors, but log unexpected ones
|
||||
if (!(e instanceof Error) || !e.message.includes('UNIQUE')) {
|
||||
console.warn(`Unexpected attribute error: ${e}`);
|
||||
}
|
||||
// Ignore duplicate errors
|
||||
}
|
||||
}
|
||||
|
||||
@ -408,7 +306,6 @@ async function main(): Promise<void> {
|
||||
}
|
||||
});
|
||||
|
||||
try {
|
||||
batchTransaction();
|
||||
|
||||
const progress = Math.round(((batch + 1) / Math.ceil(noteCount / batchSize)) * 100);
|
||||
@ -416,10 +313,6 @@ async function main(): Promise<void> {
|
||||
const rate = Math.round(notesCreated / elapsed);
|
||||
|
||||
console.log(`Progress: ${progress}% | Notes: ${notesCreated}/${noteCount} | Rate: ${rate}/sec | Attributes: ${attributesCreated}`);
|
||||
} catch (error) {
|
||||
console.error(`Failed to process batch ${batch + 1}:`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Add entity changes
|
||||
@ -445,19 +338,12 @@ async function main(): Promise<void> {
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
try {
|
||||
entityTransaction();
|
||||
} catch (error) {
|
||||
console.error('Failed to add entity changes:', error);
|
||||
// Non-critical error, continue
|
||||
}
|
||||
|
||||
const endTime = Date.now();
|
||||
const duration = (endTime - startTime) / 1000;
|
||||
|
||||
// Get statistics
|
||||
console.log('\nGathering database statistics...');
|
||||
const stats = {
|
||||
notes: db.prepare('SELECT COUNT(*) as count FROM notes').get() as any,
|
||||
branches: db.prepare('SELECT COUNT(*) as count FROM branches').get() as any,
|
||||
@ -475,26 +361,10 @@ async function main(): Promise<void> {
|
||||
console.log(` • Average rate: ${Math.round(noteCount / duration).toLocaleString()} notes/second`);
|
||||
console.log(` • Container note ID: ${containerNoteId}\n`);
|
||||
|
||||
} catch (error) {
|
||||
console.error('\n❌ Stress test failed with error:', error);
|
||||
if (error instanceof Error) {
|
||||
console.error('Error stack:', error.stack);
|
||||
}
|
||||
exitCode = 1;
|
||||
} finally {
|
||||
// Ensure cleanup happens
|
||||
console.log('\nPerforming final cleanup...');
|
||||
await resourceManager.cleanup();
|
||||
|
||||
// Exit with appropriate code
|
||||
console.log(`Exiting with code: ${exitCode}`);
|
||||
process.exit(exitCode);
|
||||
}
|
||||
db.close();
|
||||
}
|
||||
|
||||
// Run the main function
|
||||
main().catch(async (error) => {
|
||||
console.error('Fatal error in main:', error);
|
||||
await resourceManager.cleanup();
|
||||
main().catch((error) => {
|
||||
console.error('Error:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
@ -15,75 +15,6 @@
|
||||
process.env.NODE_ENV = process.env.NODE_ENV || 'development';
|
||||
process.env.DATA_DIR = process.env.DATA_DIR || './data';
|
||||
|
||||
// Resource manager for proper cleanup
|
||||
class ResourceManager {
|
||||
private resources: Array<{ name: string; cleanup: () => void | Promise<void> }> = [];
|
||||
private cleanedUp = false;
|
||||
|
||||
register(name: string, cleanup: () => void | Promise<void>): void {
|
||||
console.log(`[ResourceManager] Registered resource: ${name}`);
|
||||
this.resources.push({ name, cleanup });
|
||||
}
|
||||
|
||||
async cleanup(): Promise<void> {
|
||||
if (this.cleanedUp) {
|
||||
console.log('[ResourceManager] Already cleaned up, skipping...');
|
||||
return;
|
||||
}
|
||||
|
||||
console.log('[ResourceManager] Starting cleanup...');
|
||||
this.cleanedUp = true;
|
||||
|
||||
// Cleanup in reverse order of registration
|
||||
for (let i = this.resources.length - 1; i >= 0; i--) {
|
||||
const resource = this.resources[i];
|
||||
try {
|
||||
console.log(`[ResourceManager] Cleaning up: ${resource.name}`);
|
||||
await resource.cleanup();
|
||||
console.log(`[ResourceManager] Successfully cleaned up: ${resource.name}`);
|
||||
} catch (error) {
|
||||
console.error(`[ResourceManager] Error cleaning up ${resource.name}:`, error);
|
||||
}
|
||||
}
|
||||
|
||||
this.resources = [];
|
||||
console.log('[ResourceManager] Cleanup completed');
|
||||
}
|
||||
}
|
||||
|
||||
// Global resource manager
|
||||
const resourceManager = new ResourceManager();
|
||||
|
||||
// Setup process exit handlers
|
||||
process.on('exit', (code) => {
|
||||
console.log(`[Process] Exiting with code: ${code}`);
|
||||
});
|
||||
|
||||
process.on('SIGINT', async () => {
|
||||
console.log('\n[Process] Received SIGINT, cleaning up...');
|
||||
await resourceManager.cleanup();
|
||||
process.exit(130); // Standard exit code for SIGINT
|
||||
});
|
||||
|
||||
process.on('SIGTERM', async () => {
|
||||
console.log('\n[Process] Received SIGTERM, cleaning up...');
|
||||
await resourceManager.cleanup();
|
||||
process.exit(143); // Standard exit code for SIGTERM
|
||||
});
|
||||
|
||||
process.on('uncaughtException', async (error) => {
|
||||
console.error('[Process] Uncaught exception:', error);
|
||||
await resourceManager.cleanup();
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
process.on('unhandledRejection', async (reason, promise) => {
|
||||
console.error('[Process] Unhandled rejection at:', promise, 'reason:', reason);
|
||||
await resourceManager.cleanup();
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
// Import Trilium services after setting up environment and handlers
|
||||
import './src/becca/entity_constructor.js';
|
||||
import sqlInit from './src/services/sql_init.js';
|
||||
import noteService from './src/services/notes.js';
|
||||
@ -95,7 +26,6 @@ import becca from './src/becca/becca.js';
|
||||
import entityChangesService from './src/services/entity_changes.js';
|
||||
import type BNote from './src/becca/entities/bnote.js';
|
||||
|
||||
// Parse command line arguments
|
||||
const noteCount = parseInt(process.argv[2]);
|
||||
const batchSize = parseInt(process.argv[3]) || 100;
|
||||
|
||||
@ -229,8 +159,7 @@ function generateSentence(): string {
|
||||
return wordList.join(' ');
|
||||
}
|
||||
|
||||
async function runStressTest(): Promise<void> {
|
||||
let exitCode = 0;
|
||||
async function start() {
|
||||
const startTime = Date.now();
|
||||
const allNotes: BNote[] = [];
|
||||
let notesCreated = 0;
|
||||
@ -238,17 +167,16 @@ async function runStressTest(): Promise<void> {
|
||||
let clonesCreated = 0;
|
||||
let revisionsCreated = 0;
|
||||
|
||||
try {
|
||||
console.log('Starting note generation using native Trilium services...\n');
|
||||
|
||||
// Find root note
|
||||
const rootNote = becca.getNote('root');
|
||||
if (!rootNote) {
|
||||
throw new Error('Root note not found! Database might not be initialized properly.');
|
||||
console.error('Root note not found!');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Create a container note for our stress test
|
||||
console.log('Creating container note...');
|
||||
const { note: containerNote } = noteService.createNewNote({
|
||||
parentNoteId: 'root',
|
||||
title: `Stress Test ${new Date().toISOString()}`,
|
||||
@ -266,7 +194,6 @@ async function runStressTest(): Promise<void> {
|
||||
const batchEnd = Math.min(batchStart + batchSize, noteCount);
|
||||
const batchNoteCount = batchEnd - batchStart;
|
||||
|
||||
try {
|
||||
sql.transactional(() => {
|
||||
for (let i = 0; i < batchNoteCount; i++) {
|
||||
const type = noteTypes[Math.floor(Math.random() * noteTypes.length)];
|
||||
@ -354,9 +281,6 @@ async function runStressTest(): Promise<void> {
|
||||
}
|
||||
} catch (e) {
|
||||
// Ignore attribute creation errors (e.g., duplicates)
|
||||
if (e instanceof Error && !e.message.includes('duplicate') && !e.message.includes('already exists')) {
|
||||
console.warn(`Unexpected attribute error: ${e.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -367,12 +291,8 @@ async function runStressTest(): Promise<void> {
|
||||
|
||||
// Save revision
|
||||
if (Math.random() < 0.5) {
|
||||
try {
|
||||
note.saveRevision();
|
||||
revisionsCreated++;
|
||||
} catch (e) {
|
||||
// Ignore revision errors
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -418,23 +338,13 @@ async function runStressTest(): Promise<void> {
|
||||
|
||||
console.log(`Progress: ${progress}% | Notes: ${notesCreated}/${noteCount} | Rate: ${rate}/sec | Attrs: ${attributesCreated} | Clones: ${clonesCreated} | Revisions: ${revisionsCreated}`);
|
||||
|
||||
} catch (error) {
|
||||
console.error(`Failed to process batch ${batch + 1}:`, error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
// Force entity changes sync (non-critical)
|
||||
try {
|
||||
// Force entity changes sync
|
||||
entityChangesService.putNoteReorderingEntityChange(containerNote.noteId);
|
||||
} catch (e) {
|
||||
// Ignore entity change errors
|
||||
}
|
||||
}
|
||||
|
||||
// Create some advanced structures
|
||||
console.log('\nCreating advanced relationships...');
|
||||
|
||||
try {
|
||||
// Create template notes
|
||||
const templateNote = noteService.createNewNote({
|
||||
parentNoteId: containerNote.noteId,
|
||||
@ -449,11 +359,7 @@ async function runStressTest(): Promise<void> {
|
||||
// Apply template to some notes
|
||||
for (let i = 0; i < Math.min(10, allNotes.length); i++) {
|
||||
const targetNote = allNotes[Math.floor(Math.random() * allNotes.length)];
|
||||
try {
|
||||
attributeService.createRelation(targetNote.noteId, 'template', templateNote.noteId);
|
||||
} catch (e) {
|
||||
// Ignore relation errors
|
||||
}
|
||||
}
|
||||
|
||||
// Create some CSS notes
|
||||
@ -479,102 +385,37 @@ async function runStressTest(): Promise<void> {
|
||||
}).note;
|
||||
|
||||
attributeService.createLabel(widgetNote.noteId, 'widget', '');
|
||||
} catch (error) {
|
||||
console.warn('Failed to create some advanced structures:', error);
|
||||
// Non-critical, continue
|
||||
}
|
||||
|
||||
const endTime = Date.now();
|
||||
const duration = (endTime - startTime) / 1000;
|
||||
|
||||
// Get final statistics
|
||||
console.log('\nGathering database statistics...');
|
||||
let stats: any = {};
|
||||
try {
|
||||
stats.notes = sql.getValue('SELECT COUNT(*) FROM notes');
|
||||
stats.branches = sql.getValue('SELECT COUNT(*) FROM branches');
|
||||
stats.attributes = sql.getValue('SELECT COUNT(*) FROM attributes');
|
||||
stats.revisions = sql.getValue('SELECT COUNT(*) FROM revisions');
|
||||
stats.attachments = sql.getValue('SELECT COUNT(*) FROM attachments');
|
||||
stats.recentNotes = sql.getValue('SELECT COUNT(*) FROM recent_notes');
|
||||
} catch (error) {
|
||||
console.warn('Failed to get some statistics:', error);
|
||||
}
|
||||
const stats = {
|
||||
notes: sql.getValue('SELECT COUNT(*) FROM notes'),
|
||||
branches: sql.getValue('SELECT COUNT(*) FROM branches'),
|
||||
attributes: sql.getValue('SELECT COUNT(*) FROM attributes'),
|
||||
revisions: sql.getValue('SELECT COUNT(*) FROM revisions'),
|
||||
attachments: sql.getValue('SELECT COUNT(*) FROM attachments'),
|
||||
recentNotes: sql.getValue('SELECT COUNT(*) FROM recent_notes')
|
||||
};
|
||||
|
||||
console.log('\n✅ Native API stress test completed successfully!\n');
|
||||
console.log('Database Statistics:');
|
||||
console.log(` • Total notes: ${stats.notes?.toLocaleString() || 'N/A'}`);
|
||||
console.log(` • Total branches: ${stats.branches?.toLocaleString() || 'N/A'}`);
|
||||
console.log(` • Total attributes: ${stats.attributes?.toLocaleString() || 'N/A'}`);
|
||||
console.log(` • Total revisions: ${stats.revisions?.toLocaleString() || 'N/A'}`);
|
||||
console.log(` • Total attachments: ${stats.attachments?.toLocaleString() || 'N/A'}`);
|
||||
console.log(` • Recent notes: ${stats.recentNotes?.toLocaleString() || 'N/A'}`);
|
||||
console.log(` • Total notes: ${stats.notes?.toLocaleString()}`);
|
||||
console.log(` • Total branches: ${stats.branches?.toLocaleString()}`);
|
||||
console.log(` • Total attributes: ${stats.attributes?.toLocaleString()}`);
|
||||
console.log(` • Total revisions: ${stats.revisions?.toLocaleString()}`);
|
||||
console.log(` • Total attachments: ${stats.attachments?.toLocaleString()}`);
|
||||
console.log(` • Recent notes: ${stats.recentNotes?.toLocaleString()}`);
|
||||
console.log(` • Time taken: ${duration.toFixed(2)} seconds`);
|
||||
console.log(` • Average rate: ${Math.round(noteCount / duration).toLocaleString()} notes/second`);
|
||||
console.log(` • Container note ID: ${containerNote.noteId}\n`);
|
||||
|
||||
} catch (error) {
|
||||
console.error('\n❌ Stress test failed with error:', error);
|
||||
if (error instanceof Error) {
|
||||
console.error('Error stack:', error.stack);
|
||||
}
|
||||
exitCode = 1;
|
||||
} finally {
|
||||
// Cleanup database connections and resources
|
||||
console.log('\nCleaning up database resources...');
|
||||
try {
|
||||
// Close any open database connections
|
||||
if (sql && typeof sql.execute === 'function') {
|
||||
// Try to checkpoint WAL if possible
|
||||
try {
|
||||
sql.execute('PRAGMA wal_checkpoint(TRUNCATE)');
|
||||
console.log('WAL checkpoint completed');
|
||||
} catch (e) {
|
||||
// Ignore checkpoint errors
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.warn('Error during database cleanup:', error);
|
||||
}
|
||||
|
||||
// Perform final resource cleanup
|
||||
await resourceManager.cleanup();
|
||||
|
||||
// Exit with appropriate code
|
||||
console.log(`Exiting with code: ${exitCode}`);
|
||||
process.exit(exitCode);
|
||||
}
|
||||
}
|
||||
|
||||
async function start(): Promise<void> {
|
||||
try {
|
||||
// Register database cleanup
|
||||
resourceManager.register('Database Connection', async () => {
|
||||
try {
|
||||
if (sql && typeof sql.execute === 'function') {
|
||||
console.log('Closing database connections...');
|
||||
// Attempt to close any open transactions
|
||||
sql.execute('ROLLBACK');
|
||||
}
|
||||
} catch (e) {
|
||||
// Ignore errors during cleanup
|
||||
}
|
||||
});
|
||||
|
||||
// Run the stress test
|
||||
await runStressTest();
|
||||
} catch (error) {
|
||||
console.error('Fatal error during startup:', error);
|
||||
await resourceManager.cleanup();
|
||||
process.exit(1);
|
||||
}
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
// Initialize database and run stress test
|
||||
sqlInit.dbReady
|
||||
.then(() => cls.wrap(start)())
|
||||
.catch(async (err) => {
|
||||
console.error('Failed to initialize database:', err);
|
||||
await resourceManager.cleanup();
|
||||
sqlInit.dbReady.then(cls.wrap(start)).catch((err) => {
|
||||
console.error('Error:', err);
|
||||
process.exit(1);
|
||||
});
|
||||
Loading…
x
Reference in New Issue
Block a user