diff --git a/src/server/handler/akappend.cpp b/src/server/handler/akappend.cpp --- a/src/server/handler/akappend.cpp +++ b/src/server/handler/akappend.cpp @@ -436,16 +436,33 @@ } storageTrx.commit(); } else { - qCDebug(AKONADISERVER_LOG) << "Multiple merge candidates:"; + qCWarning(AKONADISERVER_LOG) << "Multiple merge candidates:"; for (const PimItem &item : result) { - qCDebug(AKONADISERVER_LOG) << "\tID:" << item.id() << ", RID:" << item.remoteId() + qCWarning(AKONADISERVER_LOG) << "\tID:" << item.id() << ", RID:" << item.remoteId() << ", GID:" << item.gid() + << ", Remote rev:" << item.remoteRevision() << ", Collection:" << item.collection().name() << "(" << item.collectionId() << ")" << ", Resource:" << item.collection().resource().name() << "(" << item.collection().resourceId() << ")"; } - // Nor GID or RID are guaranteed to be unique, so make sure we don't merge - // something we don't want - return failureResponse(QStringLiteral("Multiple merge candidates, aborting")); + qCWarning(AKONADISERVER_LOG) << "for:\tUpdate:" << item; + if (cmd.mergeModes() & Protocol::CreateItemCommand::RemoteID) { + // If we are indexing by remote-id, we need to update all copies matching the remote-id as that + // is how we identify them with the remote host. Though this state is an error that should be + // fixed somewhere else in Akonadi, but the alternative is drop the entire cache and redownload it. + for (PimItem existingItem : result) { + if (!mergeItem(cmd, item, existingItem, parentCol)) { + return false; + } + } + if (!transaction.commit()) { + return failureResponse("Failed to commit transaction"); + } + storageTrx.commit(); + } else { + // GID is not guaranteed to be unique, so make sure we don't merge + // something we don't want + return failureResponse(QStringLiteral("Multiple merge candidates, aborting")); + } } }