don't log known slow queries

This commit is contained in:
zadam 2023-10-20 09:36:57 +02:00
parent 5c10fc26be
commit 4773f9392b
4 changed files with 65 additions and 33 deletions

View File

@ -15,11 +15,13 @@ const entityConstructor = require("../becca/entity_constructor");
const beccaLoaded = new Promise((res, rej) => { const beccaLoaded = new Promise((res, rej) => {
sqlInit.dbReady.then(() => { sqlInit.dbReady.then(() => {
load(); cls.init(() => {
load();
cls.init(() => require('../services/options_init').initStartupOptions()); require('../services/options_init').initStartupOptions();
res(); res();
});
}); });
}); });
@ -27,32 +29,35 @@ function load() {
const start = Date.now(); const start = Date.now();
becca.reset(); becca.reset();
// using a raw query and passing arrays to avoid allocating new objects, // we know this is slow and the total becca load time is logged
// this is worth it for the becca load since it happens every run and blocks the app until finished sql.disableSlowQueryLogging(() => {
// using a raw query and passing arrays to avoid allocating new objects,
// this is worth it for the becca load since it happens every run and blocks the app until finished
for (const row of sql.getRawRows(`SELECT noteId, title, type, mime, isProtected, blobId, dateCreated, dateModified, utcDateCreated, utcDateModified FROM notes WHERE isDeleted = 0`)) { for (const row of sql.getRawRows(`SELECT noteId, title, type, mime, isProtected, blobId, dateCreated, dateModified, utcDateCreated, utcDateModified FROM notes WHERE isDeleted = 0`)) {
new BNote().update(row).init(); new BNote().update(row).init();
} }
const branchRows = sql.getRawRows(`SELECT branchId, noteId, parentNoteId, prefix, notePosition, isExpanded, utcDateModified FROM branches WHERE isDeleted = 0`); const branchRows = sql.getRawRows(`SELECT branchId, noteId, parentNoteId, prefix, notePosition, isExpanded, utcDateModified FROM branches WHERE isDeleted = 0`);
// in-memory sort is faster than in the DB // in-memory sort is faster than in the DB
branchRows.sort((a, b) => a.notePosition - b.notePosition); branchRows.sort((a, b) => a.notePosition - b.notePosition);
for (const row of branchRows) { for (const row of branchRows) {
new BBranch().update(row).init(); new BBranch().update(row).init();
} }
for (const row of sql.getRawRows(`SELECT attributeId, noteId, type, name, value, isInheritable, position, utcDateModified FROM attributes WHERE isDeleted = 0`)) { for (const row of sql.getRawRows(`SELECT attributeId, noteId, type, name, value, isInheritable, position, utcDateModified FROM attributes WHERE isDeleted = 0`)) {
new BAttribute().update(row).init(); new BAttribute().update(row).init();
} }
for (const row of sql.getRows(`SELECT name, value, isSynced, utcDateModified FROM options`)) { for (const row of sql.getRows(`SELECT name, value, isSynced, utcDateModified FROM options`)) {
new BOption(row); new BOption(row);
} }
for (const row of sql.getRows(`SELECT etapiTokenId, name, tokenHash, utcDateCreated, utcDateModified FROM etapi_tokens WHERE isDeleted = 0`)) { for (const row of sql.getRows(`SELECT etapiTokenId, name, tokenHash, utcDateCreated, utcDateModified FROM etapi_tokens WHERE isDeleted = 0`)) {
new BEtapiToken(row); new BEtapiToken(row);
} }
});
for (const noteId in becca.notes) { for (const noteId in becca.notes) {
becca.notes[noteId].sortParents(); becca.notes[noteId].sortParents();

View File

@ -48,6 +48,14 @@ function isEntityEventsDisabled() {
return !!namespace.get('disableEntityEvents'); return !!namespace.get('disableEntityEvents');
} }
function disableSlowQueryLogging(disable) {
namespace.set('disableSlowQueryLogging', disable);
}
function isSlowQueryLoggingDisabled() {
return !!namespace.get('disableSlowQueryLogging');
}
function getAndClearEntityChangeIds() { function getAndClearEntityChangeIds() {
const entityChangeIds = namespace.get('entityChangeIds') || []; const entityChangeIds = namespace.get('entityChangeIds') || [];
@ -93,4 +101,6 @@ module.exports = {
getAndClearEntityChangeIds, getAndClearEntityChangeIds,
putEntityChange, putEntityChange,
ignoreEntityChangeIds, ignoreEntityChangeIds,
disableSlowQueryLogging,
isSlowQueryLoggingDisabled
}; };

View File

@ -11,14 +11,17 @@ function getEntityHashes() {
const startTime = new Date(); const startTime = new Date();
const hashRows = sql.getRawRows(` // we know this is slow and the total content hash calculation time is logged
SELECT entityName, const hashRows = sql.disableSlowQueryLogging(
entityId, () => sql.getRawRows(`
hash, SELECT entityName,
isErased entityId,
FROM entity_changes hash,
WHERE isSynced = 1 isErased
AND entityName != 'note_reordering'`); FROM entity_changes
WHERE isSynced = 1
AND entityName != 'note_reordering'`)
);
// sorting is faster in memory // sorting is faster in memory
// sorting by entityId is enough, hashes will be segmented by entityName later on anyway // sorting by entityId is enough, hashes will be segmented by entityName later on anyway

View File

@ -225,7 +225,7 @@ function wrap(query, func) {
const milliseconds = Date.now() - startTimestamp; const milliseconds = Date.now() - startTimestamp;
if (milliseconds >= 20) { if (milliseconds >= 20 && !cls.isSlowQueryLoggingDisabled()) {
if (query.includes("WITH RECURSIVE")) { if (query.includes("WITH RECURSIVE")) {
log.info(`Slow recursive query took ${milliseconds}ms.`); log.info(`Slow recursive query took ${milliseconds}ms.`);
} }
@ -295,6 +295,19 @@ async function copyDatabase(targetFilePath) {
await dbConnection.backup(targetFilePath); await dbConnection.backup(targetFilePath);
} }
function disableSlowQueryLogging(cb) {
const orig = cls.isSlowQueryLoggingDisabled();
try {
cls.disableSlowQueryLogging(true);
return cb();
}
finally {
cls.disableSlowQueryLogging(orig);
}
}
module.exports = { module.exports = {
dbConnection, dbConnection,
insert, insert,
@ -367,5 +380,6 @@ module.exports = {
transactional, transactional,
upsert, upsert,
fillParamList, fillParamList,
copyDatabase copyDatabase,
disableSlowQueryLogging
}; };