diff --git a/libs/image/3rdparty/lock_free_map/qsbr.h b/libs/image/3rdparty/lock_free_map/qsbr.h index 6acc6b8440..502356526a 100644 --- a/libs/image/3rdparty/lock_free_map/qsbr.h +++ b/libs/image/3rdparty/lock_free_map/qsbr.h @@ -1,130 +1,123 @@ /*------------------------------------------------------------------------ Junction: Concurrent data structures in C++ Copyright (c) 2016 Jeff Preshing Distributed under the Simplified BSD License. Original location: https://github.com/preshing/junction This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file for more information. ------------------------------------------------------------------------*/ #ifndef QSBR_H #define QSBR_H #include #include #include #include #define CALL_MEMBER(obj, pmf) ((obj).*(pmf)) class QSBR { private: struct Action { void (*func)(void*); quint64 param[4]; // Size limit found experimentally. Verified by assert below. Action() = default; Action(void (*f)(void*), void* p, quint64 paramSize) : func(f) { KIS_ASSERT(paramSize <= sizeof(param)); // Verify size limit. memcpy(¶m, p, paramSize); } void operator()() { func(¶m); } }; QAtomicInt m_rawPointerUsers; KisLocklessStack m_pendingActions; KisLocklessStack m_migrationReclaimActions; std::atomic_flag m_isProcessing = ATOMIC_FLAG_INIT; - void releasePoolSafely(KisLocklessStack *pool) { - Action action; + void releasePoolSafely(KisLocklessStack *pool, bool force = false) { + KisLocklessStack tmp; + tmp.mergeFrom(*pool); + if (tmp.isEmpty()) return; - while (pool->pop(action)) { - action(); + if (force || tmp.size() > 4096) { + while (m_rawPointerUsers.loadAcquire()); + + Action action; + while (tmp.pop(action)) { + action(); + } + } else { + if (!m_rawPointerUsers.loadAcquire()) { + Action action; + while (tmp.pop(action)) { + action(); + } + } else { + // push elements back to the source + pool->mergeFrom(tmp); + } } } public: template void enqueue(void (T::*pmf)(), T* target, bool migration = false) { struct Closure { void (T::*pmf)(); T* target; static void thunk(void* param) { Closure* self = (Closure*) param; CALL_MEMBER(*self->target, self->pmf)(); } }; Closure closure = {pmf, target}; if (migration) { m_migrationReclaimActions.push(Action(Closure::thunk, &closure, sizeof(closure))); } else { m_pendingActions.push(Action(Closure::thunk, &closure, sizeof(closure))); } } void update(bool migrationInProgress) { - if (m_rawPointerUsers.testAndSetAcquire(0, 1)) { - - /// TODO: theoretically, there is a race condition: - /// if the user's code enters critical section and - /// releases an object *after* we started purging - /// garbage. In such a case we can free still used - /// tile. I didn't check this hypothesis yet though. - - releasePoolSafely(&m_pendingActions); - - if (!migrationInProgress) { - releasePoolSafely(&m_migrationReclaimActions); - } - - m_rawPointerUsers.deref(); - - } else if (m_pendingActions.size() > 4098) { - // TODO: make pool size limit configurable! - - while (!m_rawPointerUsers.testAndSetAcquire(0, 1)); - - releasePoolSafely(&m_pendingActions); + releasePoolSafely(&m_pendingActions); - m_rawPointerUsers.deref(); + if (!migrationInProgress) { + releasePoolSafely(&m_migrationReclaimActions); } } void flush() { - while (!m_rawPointerUsers.testAndSetAcquire(0, 1)); - - releasePoolSafely(&m_pendingActions); - releasePoolSafely(&m_migrationReclaimActions); - - m_rawPointerUsers.deref(); + releasePoolSafely(&m_pendingActions, true); + releasePoolSafely(&m_migrationReclaimActions, true); } void lockRawPointerAccess() { m_rawPointerUsers.ref(); } void unlockRawPointerAccess() { m_rawPointerUsers.deref(); } }; #endif // QSBR_H diff --git a/libs/image/tiles3/kis_lockless_stack.h b/libs/image/tiles3/kis_lockless_stack.h index 6aa5b8bf29..264d061460 100644 --- a/libs/image/tiles3/kis_lockless_stack.h +++ b/libs/image/tiles3/kis_lockless_stack.h @@ -1,213 +1,235 @@ /* * Copyright (c) 2010 Dmitry Kazakov * * References: * * Maged M. Michael, Safe memory reclamation for dynamic * lock-free objects using atomic reads and writes, * Proceedings of the twenty-first annual symposium on * Principles of distributed computing, July 21-24, 2002, * Monterey, California * * * Idea of m_deleteBlockers is taken from Andrey Gulin's blog * http://users.livejournal.com/_foreseer/34284.html * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef __KIS_LOCKLESS_STACK_H #define __KIS_LOCKLESS_STACK_H #include template class KisLocklessStack { private: struct Node { Node *next; T data; }; public: KisLocklessStack() { } ~KisLocklessStack() { freeList(m_top.fetchAndStoreOrdered(0)); freeList(m_freeNodes.fetchAndStoreOrdered(0)); } void push(T data) { Node *newNode = new Node(); newNode->data = data; Node *top; do { top = m_top; newNode->next = top; } while (!m_top.testAndSetOrdered(top, newNode)); m_numNodes.ref(); } bool pop(T &value) { bool result = false; m_deleteBlockers.ref(); while(1) { Node *top = (Node*) m_top; if(!top) break; // This is safe as we ref'ed m_deleteBlockers Node *next = top->next; if(m_top.testAndSetOrdered(top, next)) { m_numNodes.deref(); result = true; value = top->data; /** * Test if we are the only delete blocker left * (it means that we are the only owner of 'top') * If there is someone else in "delete-blocked section", * then just add the struct to the list of free nodes. */ if (m_deleteBlockers == 1) { cleanUpNodes(); delete top; } else { releaseNode(top); } break; } } m_deleteBlockers.deref(); return result; } void clear() { // a fast-path without write ops if(!m_top) return; m_deleteBlockers.ref(); Node *top = m_top.fetchAndStoreOrdered(0); int removedChunkSize = 0; Node *tmp = top; while(tmp) { removedChunkSize++; tmp = tmp->next; } m_numNodes.fetchAndAddOrdered(-removedChunkSize); while(top) { Node *next = top->next; if (m_deleteBlockers == 1) { /** * We are the only owner of top contents. * So we can delete it freely. */ cleanUpNodes(); freeList(top); next = 0; } else { releaseNode(top); } top = next; } m_deleteBlockers.deref(); } + void mergeFrom(KisLocklessStack &other) { + Node *otherTop = other.m_top.fetchAndStoreOrdered(0); + if (!otherTop) return; + + int removedChunkSize = 1; + Node *last = otherTop; + while(last->next) { + removedChunkSize++; + last = last->next; + } + other.m_numNodes.fetchAndAddOrdered(-removedChunkSize); + + Node *top; + + do { + top = m_top; + last->next = top; + } while (!m_top.testAndSetOrdered(top, otherTop)); + + m_numNodes.fetchAndAddOrdered(removedChunkSize); + } + /** * This is impossible to measure the size of the stack * in highly concurrent environment. So we return approximate * value! Do not rely on this value much! */ qint32 size() { return m_numNodes; } bool isEmpty() { return !m_numNodes; } private: inline void releaseNode(Node *node) { Node *top; do { top = m_freeNodes; node->next = top; } while (!m_freeNodes.testAndSetOrdered(top, node)); } inline void cleanUpNodes() { Node *cleanChain = m_freeNodes.fetchAndStoreOrdered(0); if (!cleanChain) return; /** * If we are the only users of the objects is cleanChain, * then just free it. Otherwise, push them back into the * recycling list and keep them there till another * chance comes. */ if (m_deleteBlockers == 1) { freeList(cleanChain); } else { Node *last = cleanChain; while (last->next) last = last->next; Node *freeTop; do { freeTop = m_freeNodes; last->next = freeTop; } while (!m_freeNodes.testAndSetOrdered(freeTop, cleanChain)); } } inline void freeList(Node *first) { Node *next; while (first) { next = first->next; delete first; first = next; } } private: Q_DISABLE_COPY(KisLocklessStack) QAtomicPointer m_top; QAtomicPointer m_freeNodes; QAtomicInt m_deleteBlockers; QAtomicInt m_numNodes; }; #endif /* __KIS_LOCKLESS_STACK_H */