bugfixes to sync

This commit is contained in:
zadam 2023-12-30 00:34:46 +01:00
parent 8dbc592563
commit f704cacdee
8 changed files with 38 additions and 10 deletions

View File

@ -0,0 +1,14 @@
UPDATE blobs SET blobId = REPLACE(blobId, '+', 'X');
UPDATE blobs SET blobId = REPLACE(blobId, '/', 'Y');
UPDATE notes SET blobId = REPLACE(blobId, '+', 'X');
UPDATE notes SET blobId = REPLACE(blobId, '/', 'Y');
UPDATE attachments SET blobId = REPLACE(blobId, '+', 'X');
UPDATE attachments SET blobId = REPLACE(blobId, '/', 'Y');
UPDATE revisions SET blobId = REPLACE(blobId, '+', 'X');
UPDATE revisions SET blobId = REPLACE(blobId, '/', 'Y');
UPDATE entity_changes SET entityId = REPLACE(entityId, '+', 'X') WHERE entityName = 'blobs';
UPDATE entity_changes SET entityId = REPLACE(entityId, '/', 'Y') WHERE entityName = 'blobs';

View File

@ -4,8 +4,8 @@ const build = require('./build.js');
const packageJson = require('../../package.json');
const {TRILIUM_DATA_DIR} = require('./data_dir.js');
const APP_DB_VERSION = 227;
const SYNC_VERSION = 31;
const APP_DB_VERSION = 228;
const SYNC_VERSION = 32;
const CLIPPER_PROTOCOL_VERSION = "1.0";
module.exports = {

View File

@ -5,12 +5,14 @@ const utils = require('./utils.js');
function getBlobPojo(entityName, entityId) {
const entity = becca.getEntity(entityName, entityId);
if (!entity) {
throw new NotFoundError(`Entity ${entityName} '${entityId}' was not found.`);
}
const blob = becca.getBlob(entity);
if (!blob) {
throw new NotFoundError(`Blob ${entity.blobId} for ${entityName} '${entityId}' was not found.`);
}
const pojo = blob.getPojo();

View File

@ -17,6 +17,9 @@ const {sanitizeAttributeName} = require('./sanitize_attribute_name.js');
const noteTypes = require('../services/note_types.js').getNoteTypeNames();
class ConsistencyChecks {
/**
* @param autoFix - automatically fix all encountered problems. False is only for debugging during development (fail fast)
*/
constructor(autoFix) {
this.autoFix = autoFix;
this.unrecoveredConsistencyErrors = false;

View File

@ -37,6 +37,8 @@ function eraseNotes(noteIdsToErase) {
function setEntityChangesAsErased(entityChanges) {
for (const ec of entityChanges) {
ec.isErased = true;
// we're not changing hash here, not sure if good or not
// content hash check takes isErased flag into account, though
ec.utcDateChanged = dateUtils.utcNowDateTime();
entityChangesService.putEntityChangeWithForcedChange(ec);

View File

@ -73,9 +73,12 @@ async function migrate() {
}
});
if (currentDbVersion === 214) {
// special VACUUM after the big migration
log.info("VACUUMing database, this might take a while ...");
sql.execute("VACUUM");
}
}
function executeMigration(mig) {
if (mig.type === 'sql') {

View File

@ -91,12 +91,16 @@ function updateNormalEntity(remoteEC, remoteEntityRow, instanceId, updateContext
updateContext.updated[remoteEC.entityName].push(remoteEC.entityId);
}
if (!localEC || localEC.utcDateChanged < remoteEC.utcDateChanged || localEC.hash !== remoteEC.hash) {
if (!localEC || localEC.utcDateChanged < remoteEC.utcDateChanged
|| localEC.hash !== remoteEC.hash
|| localEC.isErased !== remoteEC.isErased
) {
entityChangesService.putEntityChangeWithInstanceId(remoteEC, instanceId);
}
return true;
} else if (localEC.hash !== remoteEC.hash && localEC.utcDateChanged > remoteEC.utcDateChanged) {
} else if ((localEC.hash !== remoteEC.hash || localEC.isErased !== remoteEC.isErased)
&& localEC.utcDateChanged > remoteEC.utcDateChanged) {
// the change on our side is newer than on the other side, so the other side should update
entityChangesService.putEntityChangeForOtherInstances(localEC);
@ -148,7 +152,7 @@ function eraseEntity(entityChange) {
];
if (!entityNames.includes(entityName)) {
log.error(`Cannot erase entity '${entityName}', id '${entityId}'.`);
log.error(`Cannot erase ${entityName} '${entityId}'.`);
return;
}

View File

@ -34,8 +34,8 @@ function hashedBlobId(content) {
// we don't want such + and / in the IDs
const kindaBase62Hash = base64Hash
.replace('+', 'X')
.replace('/', 'Y');
.replaceAll('+', 'X')
.replaceAll('/', 'Y');
// 20 characters of base62 gives us ~120 bit of entropy which is plenty enough
return kindaBase62Hash.substr(0, 20);