diff --git a/libs/image/3rdparty/lock_free_map/concurrent_map.h b/libs/image/3rdparty/lock_free_map/concurrent_map.h index c8aaf84145..f9218694d6 100644 --- a/libs/image/3rdparty/lock_free_map/concurrent_map.h +++ b/libs/image/3rdparty/lock_free_map/concurrent_map.h @@ -1,348 +1,355 @@ /*------------------------------------------------------------------------ Junction: Concurrent data structures in C++ Copyright (c) 2016 Jeff Preshing Distributed under the Simplified BSD License. Original location: https://github.com/preshing/junction This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file for more information. ------------------------------------------------------------------------*/ #ifndef CONCURRENTMAP_H #define CONCURRENTMAP_H #include "leapfrog.h" template , class VT = DefaultValueTraits > class ConcurrentMap { public: typedef K Key; typedef V Value; typedef KT KeyTraits; typedef VT ValueTraits; typedef quint32 Hash; typedef Leapfrog Details; private: Atomic m_root; public: ConcurrentMap(quint64 capacity = Details::InitialSize) : m_root(Details::Table::create(capacity)) { } ~ConcurrentMap() { typename Details::Table* table = m_root.loadNonatomic(); table->destroy(); } // publishTableMigration() is called by exactly one thread from Details::TableMigration::run() // after all the threads participating in the migration have completed their work. void publishTableMigration(typename Details::TableMigration* migration) { // There are no racing calls to this function. typename Details::Table* oldRoot = m_root.loadNonatomic(); m_root.store(migration->m_destination, Release); Q_ASSERT(oldRoot == migration->getSources()[0].table); // Caller will GC the TableMigration and the source table. } // A Mutator represents a known cell in the hash table. // It's meant for manipulations within a temporary function scope. // Obviously you must not call QSBR::Update while holding a Mutator. // Any operation that modifies the table (exchangeValue, eraseValue) // may be forced to follow a redirected cell, which changes the Mutator itself. // Note that even if the Mutator was constructed from an existing cell, // exchangeValue() can still trigger a resize if the existing cell was previously marked deleted, // or if another thread deletes the key between the two steps. class Mutator { private: friend class ConcurrentMap; ConcurrentMap& m_map; typename Details::Table* m_table; typename Details::Cell* m_cell; Value m_value; // Constructor: Find existing cell Mutator(ConcurrentMap& map, Key key, bool) : m_map(map), m_value(Value(ValueTraits::NullValue)) { Hash hash = KeyTraits::hash(key); for (;;) { m_table = m_map.m_root.load(Consume); m_cell = Details::find(hash, m_table); if (!m_cell) { return; } Value value = m_cell->value.load(Consume); if (value != Value(ValueTraits::Redirect)) { // Found an existing value m_value = value; return; } // We've encountered a Redirect value. Help finish the migration. m_table->jobCoordinator.participate(); // Try again using the latest root. } } // Constructor: Insert or find cell Mutator(ConcurrentMap& map, Key key) : m_map(map), m_value(Value(ValueTraits::NullValue)) { Hash hash = KeyTraits::hash(key); for (;;) { m_table = m_map.m_root.load(Consume); quint64 overflowIdx; switch (Details::insertOrFind(hash, m_table, m_cell, overflowIdx)) { // Modifies m_cell case Details::InsertResult_InsertedNew: { // We've inserted a new cell. Don't load m_cell->value. return; } case Details::InsertResult_AlreadyFound: { // The hash was already found in the table. Value value = m_cell->value.load(Consume); if (value == Value(ValueTraits::Redirect)) { // We've encountered a Redirect value. break; // Help finish the migration. } // Found an existing value m_value = value; return; } case Details::InsertResult_Overflow: { // Unlike ConcurrentMap_Linear, we don't need to keep track of & pass a "mustDouble" flag. // Passing overflowIdx is sufficient to prevent an infinite loop here. // It defines the start of the range of cells to check while estimating total cells in use. // After the first migration, deleted keys are purged, so if we hit this line during the // second loop iteration, every cell in the range will be in use, thus the estimate will be 100%. // (Concurrent deletes could result in further iterations, but it will eventually settle.) Details::beginTableMigration(m_map, m_table, overflowIdx); break; } } // A migration has been started (either by us, or another thread). Participate until it's complete. m_table->jobCoordinator.participate(); // Try again using the latest root. } } public: Value getValue() const { // Return previously loaded value. Don't load it again. return Value(m_value); } Value exchangeValue(Value desired) { Q_ASSERT(desired != Value(ValueTraits::NullValue)); Q_ASSERT(desired != Value(ValueTraits::Redirect)); Q_ASSERT(m_cell); // Cell must have been found or inserted for (;;) { Value oldValue = m_value; if (m_cell->value.compareExchangeStrong(m_value, desired, ConsumeRelease)) { // Exchange was successful. Return previous value. Value result = m_value; m_value = desired; // Leave the mutator in a valid state return result; } // The CAS failed and m_value has been updated with the latest value. if (m_value != Value(ValueTraits::Redirect)) { if (oldValue == Value(ValueTraits::NullValue) && m_value != Value(ValueTraits::NullValue)) { // racing write inserted new value } // There was a racing write (or erase) to this cell. // Pretend we exchanged with ourselves, and just let the racing write win. return desired; } // We've encountered a Redirect value. Help finish the migration. Hash hash = m_cell->hash.load(Relaxed); for (;;) { // Help complete the migration. m_table->jobCoordinator.participate(); // Try again in the new table. m_table = m_map.m_root.load(Consume); m_value = Value(ValueTraits::NullValue); quint64 overflowIdx; switch (Details::insertOrFind(hash, m_table, m_cell, overflowIdx)) { // Modifies m_cell case Details::InsertResult_AlreadyFound: m_value = m_cell->value.load(Consume); if (m_value == Value(ValueTraits::Redirect)) { break; } goto breakOuter; case Details::InsertResult_InsertedNew: goto breakOuter; case Details::InsertResult_Overflow: Details::beginTableMigration(m_map, m_table, overflowIdx); break; } // We were redirected... again } breakOuter:; // Try again in the new table. } } void assignValue(Value desired) { exchangeValue(desired); } Value eraseValue() { Q_ASSERT(m_cell); // Cell must have been found or inserted for (;;) { if (m_value == Value(ValueTraits::NullValue)) { return Value(m_value); } if (m_cell->value.compareExchangeStrong(m_value, Value(ValueTraits::NullValue), Consume)) { // Exchange was successful and a non-NULL value was erased and returned by reference in m_value. Q_ASSERT(m_value != Value(ValueTraits::NullValue)); // Implied by the test at the start of the loop. Value result = m_value; m_value = Value(ValueTraits::NullValue); // Leave the mutator in a valid state return result; } // The CAS failed and m_value has been updated with the latest value. if (m_value != Value(ValueTraits::Redirect)) { // There was a racing write (or erase) to this cell. // Pretend we erased nothing, and just let the racing write win. return Value(ValueTraits::NullValue); } // We've been redirected to a new table. Hash hash = m_cell->hash.load(Relaxed); // Re-fetch hash for (;;) { // Help complete the migration. m_table->jobCoordinator.participate(); // Try again in the new table. m_table = m_map.m_root.load(Consume); m_cell = Details::find(hash, m_table); if (!m_cell) { m_value = Value(ValueTraits::NullValue); return m_value; } m_value = m_cell->value.load(Relaxed); if (m_value != Value(ValueTraits::Redirect)) { break; } } } } }; Mutator insertOrFind(Key key) { return Mutator(*this, key); } Mutator find(Key key) { return Mutator(*this, key, false); } // Lookup without creating a temporary Mutator. Value get(Key key) { Hash hash = KeyTraits::hash(key); for (;;) { typename Details::Table* table = m_root.load(Consume); typename Details::Cell* cell = Details::find(hash, table); if (!cell) { return Value(ValueTraits::NullValue); } Value value = cell->value.load(Consume); if (value != Value(ValueTraits::Redirect)) { return value; // Found an existing value } // We've been redirected to a new table. Help with the migration. table->jobCoordinator.participate(); // Try again in the new table. } } Value assign(Key key, Value desired) { Mutator iter(*this, key); return iter.exchangeValue(desired); } Value exchange(Key key, Value desired) { Mutator iter(*this, key); return iter.exchangeValue(desired); } Value erase(Key key) { Mutator iter(*this, key, false); return iter.eraseValue(); } // The easiest way to implement an Iterator is to prevent all Redirects. // The currrent Iterator does that by forbidding concurrent inserts. // To make it work with concurrent inserts, we'd need a way to block TableMigrations. class Iterator { private: typename Details::Table* m_table; quint64 m_idx; Key m_hash; Value m_value; public: Iterator(ConcurrentMap& map) { // Since we've forbidden concurrent inserts (for now), nonatomic would suffice here, but let's plan ahead: m_table = map.m_root.load(Consume); m_idx = -1; next(); } void next() { Q_ASSERT(m_table); Q_ASSERT(isValid() || m_idx == -1); // Either the Iterator is already valid, or we've just started iterating. while (++m_idx <= m_table->sizeMask) { // Index still inside range of table. typename Details::CellGroup* group = m_table->getCellGroups() + (m_idx >> 2); typename Details::Cell* cell = group->cells + (m_idx & 3); m_hash = cell->hash.load(Relaxed); if (m_hash != KeyTraits::NullHash) { // Cell has been reserved. m_value = cell->value.load(Relaxed); Q_ASSERT(m_value != Value(ValueTraits::Redirect)); if (m_value != Value(ValueTraits::NullValue)) return; // Yield this cell. } } // That's the end of the map. m_hash = KeyTraits::NullHash; m_value = Value(ValueTraits::NullValue); } bool isValid() const { return m_value != Value(ValueTraits::NullValue); } + Key getKey() const + { + Q_ASSERT(isValid()); + // Since we've forbidden concurrent inserts (for now), nonatomic would suffice here, but let's plan ahead: + return KeyTraits::dehash(m_hash); + } + Value getValue() const { Q_ASSERT(isValid()); return m_value; } }; }; #endif // CONCURRENTMAP_LEAPFROG_H diff --git a/libs/image/3rdparty/lock_free_map/map_traits.h b/libs/image/3rdparty/lock_free_map/map_traits.h index 006bbe06c2..92d97e6f4f 100644 --- a/libs/image/3rdparty/lock_free_map/map_traits.h +++ b/libs/image/3rdparty/lock_free_map/map_traits.h @@ -1,55 +1,80 @@ /*------------------------------------------------------------------------ Junction: Concurrent data structures in C++ Copyright (c) 2016 Jeff Preshing Distributed under the Simplified BSD License. Original location: https://github.com/preshing/junction This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file for more information. ------------------------------------------------------------------------*/ #ifndef MAPTRAITS_H #define MAPTRAITS_H #include inline quint64 roundUpPowerOf2(quint64 v) { v--; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; v |= v >> 32; v++; return v; } inline bool isPowerOf2(quint64 v) { return (v & (v - 1)) == 0; } +inline quint32 avalanche(quint32 h) +{ + h ^= h >> 16; + h *= 0x85ebca6b; + h ^= h >> 13; + h *= 0xc2b2ae35; + h ^= h >> 16; + return h; +} + +inline quint32 deavalanche(quint32 h) +{ + h ^= h >> 16; + h *= 0x7ed1b41d; + h ^= (h ^ (h >> 13)) >> 13; + h *= 0xa5cb9243; + h ^= h >> 16; + return h; +} + template struct DefaultKeyTraits { typedef T Key; typedef quint32 Hash; static const Key NullKey = Key(0); static const Hash NullHash = Hash(0); static Hash hash(T key) { - return std::hash()(Hash(key)); + return avalanche(Hash(key)); + } + + static Key dehash(Hash hash) + { + return (T) deavalanche(hash); } }; template struct DefaultValueTraits { typedef T Value; typedef quint32 IntType; static const IntType NullValue = 0; static const IntType Redirect = 1; }; #endif // MAPTRAITS_H diff --git a/libs/image/tiles3/kis_tile_hash_table2.h b/libs/image/tiles3/kis_tile_hash_table2.h index 88d28f207e..e4f427af83 100644 --- a/libs/image/tiles3/kis_tile_hash_table2.h +++ b/libs/image/tiles3/kis_tile_hash_table2.h @@ -1,226 +1,309 @@ #ifndef KIS_TILEHASHTABLE_2_H #define KIS_TILEHASHTABLE_2_H #include "kis_shared.h" #include "kis_shared_ptr.h" #include "3rdparty/lock_free_map/concurrent_map.h" #include "kis_memento_manager.h" +#include template class KisTileHashTableTraits2 { static constexpr bool isInherited = std::is_convertible::value; Q_STATIC_ASSERT_X(isInherited, "Template must inherit KisShared"); public: typedef T TileType; typedef KisSharedPtr TileTypeSP; typedef KisWeakSharedPtr TileTypeWSP; KisTileHashTableTraits2(); KisTileHashTableTraits2(KisMementoManager *mm); KisTileHashTableTraits2(const KisTileHashTableTraits2 &ht, KisMementoManager *mm); ~KisTileHashTableTraits2(); TileTypeSP insert(qint32 key, TileTypeSP value) { - m_rawPointerUsers.fetchAndAddOrdered(1); + m_rawPointerUsers.fetchAndAddRelaxed(1); TileTypeSP::ref(&value, value.data()); TileType *result = m_map.assign(key, value.data()); if (result) { MemoryReclaimer *tmp = new MemoryReclaimer(result); QSBR::instance().enqueue(&MemoryReclaimer::destroy, tmp); + } else { + m_numTiles.fetchAndAddRelaxed(1); } TileTypeSP ptr(result); - m_rawPointerUsers.fetchAndSubOrdered(1); + m_rawPointerUsers.fetchAndSubRelaxed(1); return ptr; } TileTypeSP erase(qint32 key) { - m_rawPointerUsers.fetchAndAddOrdered(1); + m_rawPointerUsers.fetchAndAddRelaxed(1); TileType *result = m_map.erase(key); TileTypeSP ptr(result); if (result) { + m_numTiles.fetchAndSubRelaxed(1); MemoryReclaimer *tmp = new MemoryReclaimer(result); QSBR::instance().enqueue(&MemoryReclaimer::destroy, tmp); } if (m_rawPointerUsers == 1) { QSBR::instance().update(m_context); } - m_rawPointerUsers.fetchAndSubOrdered(1); + m_rawPointerUsers.fetchAndSubRelaxed(1); return ptr; } TileTypeSP get(qint32 key) { - m_rawPointerUsers.fetchAndAddOrdered(1); + m_rawPointerUsers.fetchAndAddRelaxed(1); TileTypeSP result(m_map.get(key)); - m_rawPointerUsers.fetchAndSubOrdered(1); + m_rawPointerUsers.fetchAndSubRelaxed(1); return result; } - TileTypeSP getLazy(qint32 key) + TileTypeSP getLazy(qint32 key, TileTypeSP value) { - m_rawPointerUsers.fetchAndAddOrdered(1); + m_rawPointerUsers.fetchAndAddRelaxed(1); typename ConcurrentMap::Mutator iter = m_map.insertOrFind(key); if (!iter.getValue()) { - TileTypeSP value(new TileType); TileTypeSP::ref(&value, value.data()); if (iter.exchangeValue(value.data()) == value.data()) { TileTypeSP::deref(&value, value.data()); + } else { + m_numTiles.fetchAndAddRelaxed(1); } } TileTypeSP result(iter.getValue()); - m_rawPointerUsers.fetchAndSubOrdered(1); + m_rawPointerUsers.fetchAndSubRelaxed(1); return result; } - bool tileExists(qint32 key); - TileTypeSP getExistingTile(qint32 key); - TileTypeSP getTileLazy(qint32 key); - TileTypeSP getReadOnlyTileLazy(qint32 key, bool &existingTile); - - void addTile(qint32 key, TileTypeSP value); - bool deleteTile(qint32 key); + bool isEmpty() + { + return !m_numTiles; + } - void setDefaultTileDataImp(KisTileData *defaultTileData); - KisTileData* defaultTileDataImp() const; + bool tileExists(qint32 col, qint32 row); + + /** + * Returns a tile in position (col,row). If no tile exists, + * returns null. + * \param col column of the tile + * \param row row of the tile + */ + TileTypeSP getExistingTile(qint32 col, qint32 row); + + /** + * Returns a tile in position (col,row). If no tile exists, + * creates a new one, attaches it to the list and returns. + * \param col column of the tile + * \param row row of the tile + * \param newTile out-parameter, returns true if a new tile + * was created + */ + TileTypeSP getTileLazy(qint32 col, qint32 row, bool& newTile); + + /** + * Returns a tile in position (col,row). If no tile exists, + * creates nothing, but returns shared default tile object + * of the table. Be careful, this object has column and row + * parameters set to (qint32_MIN, qint32_MIN). + * \param col column of the tile + * \param row row of the tile + * \param existingTile returns true if the tile actually exists in the table + * and it is not a lazily created default wrapper tile + */ + TileTypeSP getReadOnlyTileLazy(qint32 col, qint32 row, bool &existingTile); + void addTile(TileTypeSP tile); + bool deleteTile(TileTypeSP tile); + bool deleteTile(qint32 col, qint32 row); + + void clear(); + + void setDefaultTileData(KisTileData *defaultTileData); + KisTileData* defaultTileData() const; + + qint32 numTiles() + { + return m_numTiles; + } void debugPrintInfo(); void debugMaxListLength(qint32 &min, qint32 &max); private: + static inline quint32 calculateHash(qint32 col, qint32 row); + struct MemoryReclaimer { MemoryReclaimer(TileType *data) : d(data) {} ~MemoryReclaimer() = default; void destroy() { TileTypeSP::deref(reinterpret_cast(this), d); this->MemoryReclaimer::~MemoryReclaimer(); delete this; } private: TileType *d; }; - ConcurrentMap m_map; +private: + ConcurrentMap m_map; QSBR::Context m_context; QAtomicInt m_rawPointerUsers; + QAtomicInt m_numTiles; KisTileData *m_defaultTileData; KisMementoManager *m_mementoManager; }; template KisTileHashTableTraits2::KisTileHashTableTraits2() - : m_context(QSBR::instance().createContext()), m_rawPointerUsers(0), + : m_context(QSBR::instance().createContext()), m_rawPointerUsers(0), m_numTiles(0), m_defaultTileData(0), m_mementoManager(0) { } template KisTileHashTableTraits2::KisTileHashTableTraits2(KisMementoManager *mm) : KisTileHashTableTraits2() { m_mementoManager = mm; } template KisTileHashTableTraits2::KisTileHashTableTraits2(const KisTileHashTableTraits2 &ht, KisMementoManager *mm) + : KisTileHashTableTraits2(mm) { + setDefaultTileData(ht.m_defaultTileData); + typename ConcurrentMap::Iterator iter(ht); + while (iter.isValid()) { + insert(iter.getKey(), iter.getValue()); + iter.next(); + } } template KisTileHashTableTraits2::~KisTileHashTableTraits2() { + clear(); QSBR::instance().destroyContext(m_context); } template -bool KisTileHashTableTraits2::tileExists(qint32 key) +bool KisTileHashTableTraits2::tileExists(qint32 col, qint32 row) { - return get(key) != nullptr; + return get(calculateHash(col, row)) != nullptr; } template -typename KisTileHashTableTraits2::TileTypeSP KisTileHashTableTraits2::getExistingTile(qint32 key) +typename KisTileHashTableTraits2::TileTypeSP KisTileHashTableTraits2::getExistingTile(qint32 col, qint32 row) { - return get(key); + return get(calculateHash(col, row)); } template -typename KisTileHashTableTraits2::TileTypeSP KisTileHashTableTraits2::getTileLazy(qint32 key) +typename KisTileHashTableTraits2::TileTypeSP KisTileHashTableTraits2::getTileLazy(qint32 col, qint32 row, bool &newTile) { - return getLazy(key); + TileTypeSP tile(new TileType(col, row, m_defaultTileData, m_mementoManager)); + return getLazy(calculateHash(col, row), tile); } template -typename KisTileHashTableTraits2::TileTypeSP KisTileHashTableTraits2::getReadOnlyTileLazy(qint32 key, bool &existingTile) +typename KisTileHashTableTraits2::TileTypeSP KisTileHashTableTraits2::getReadOnlyTileLazy(qint32 col, qint32 row, bool &existingTile) { - m_rawPointerUsers.fetchAndAddOrdered(1); - TileTypeSP tile(m_map.get(key)); + m_rawPointerUsers.fetchAndAddRelaxed(1); + TileTypeSP tile(m_map.get(calculateHash(col, row))); existingTile = tile; if (!existingTile) { - tile = new TileType; + tile = new TileType(col, row, m_defaultTileData, 0); } - m_rawPointerUsers.fetchAndSubOrdered(1); + m_rawPointerUsers.fetchAndSubRelaxed(1); return tile; } template -void KisTileHashTableTraits2::addTile(qint32 key, TileTypeSP value) +void KisTileHashTableTraits2::addTile(TileTypeSP value) +{ + insert(m_numTiles.load() + 1, value); +} + +template +bool KisTileHashTableTraits2::deleteTile(TileTypeSP tile) { - insert(key, value); + return deleteTile(tile->col(), tile->row()); } template -bool KisTileHashTableTraits2::deleteTile(qint32 key) +bool KisTileHashTableTraits2::deleteTile(qint32 col, qint32 row) { - return erase(key) != nullptr; + return erase(calculateHash(col, row)) != nullptr; } template -inline void KisTileHashTableTraits2::setDefaultTileDataImp(KisTileData *defaultTileData) +void KisTileHashTableTraits2::clear() { + typename ConcurrentMap::Iterator iter(m_map); + while (iter.isValid()) { + erase(iter.getKey()); + iter.next(); + } +} + +template +inline void KisTileHashTableTraits2::setDefaultTileData(KisTileData *defaultTileData) +{ + m_rawPointerUsers.fetchAndAddRelaxed(1); + if (m_defaultTileData) { m_defaultTileData->release(); m_defaultTileData = 0; } if (defaultTileData) { defaultTileData->acquire(); m_defaultTileData = defaultTileData; } + + m_rawPointerUsers.fetchAndSubRelaxed(1); } -template -inline KisTileData* KisTileHashTableTraits2::defaultTileDataImp() const +template +inline KisTileData* KisTileHashTableTraits2::defaultTileData() const { return m_defaultTileData; } template void KisTileHashTableTraits2::debugPrintInfo() { } template void KisTileHashTableTraits2::debugMaxListLength(qint32 &min, qint32 &max) { } +template +quint32 KisTileHashTableTraits2::calculateHash(qint32 col, qint32 row) +{ + return ((row << 5) + (col & 0x1F)) & 0x3FF; +} + #endif // KIS_TILEHASHTABLE_2_H diff --git a/libs/image/tiles3/tests/kis_lock_free_map_test.cpp b/libs/image/tiles3/tests/kis_lock_free_map_test.cpp index 3c487c80b5..a702a9e2a2 100644 --- a/libs/image/tiles3/tests/kis_lock_free_map_test.cpp +++ b/libs/image/tiles3/tests/kis_lock_free_map_test.cpp @@ -1,185 +1,187 @@ #include "kis_lock_free_map_test.h" #include #include "kis_debug.h" #include "tiles3/kis_memento_item.h" #include "tiles3/kis_tile_hash_table2.h" #define NUM_THREADS 10 class Wrapper : public KisShared { public: Wrapper() : m_member(0) {} - Wrapper(qint32 member) : m_member(member) {} + Wrapper(qint32 col, qint32 row, + KisTileData *defaultTileData, KisMementoManager* mm) + : m_member(col) {} qint32 member() { return m_member; } private: qint32 m_member; }; class StressJob : public QRunnable { public: StressJob(const std::function func) : m_func(func), m_eraseSum(0), m_insertSum(0) { } qint64 eraseSum() { return m_eraseSum; } qint64 insertSum() { return m_insertSum; } protected: void run() override { m_func(m_eraseSum, m_insertSum); } private: const std::function m_func; qint64 m_eraseSum; qint64 m_insertSum; }; void LockFreeMapTest::testMainOperations() { const qint32 numCycles = 60000; const qint32 numTypes = 3; QList jobs; KisTileHashTableTraits2 map; auto func = [&](qint64 & eraseSum, qint64 & insertSum) { for (qint32 i = 1; i < numCycles + 1; ++i) { auto type = i % numTypes; switch (type) { case 0: { auto result = map.erase(i - 2); if (result.data()) { eraseSum += result->member(); } break; } case 1: { - auto result = map.insert(i, KisSharedPtr(new Wrapper(i))); + auto result = map.insert(i, KisSharedPtr(new Wrapper(i, 0, 0, 0))); if (result.data()) { insertSum -= result->member(); } insertSum += i; break; } case 2: { map.get(i - 1); break; } } } }; for (qint32 i = 0; i < NUM_THREADS; ++i) { StressJob *job = new StressJob(func); job->setAutoDelete(false); jobs.append(job); } QThreadPool pool; pool.setMaxThreadCount(NUM_THREADS); QBENCHMARK { for (auto &job : jobs) { pool.start(job); } pool.waitForDone(); } qint64 insertSum = 0; qint64 eraseSum = 0; for (qint32 i = 0; i < NUM_THREADS; ++i) { StressJob *job = jobs.takeLast(); eraseSum += job->eraseSum(); insertSum += job->insertSum(); delete job; } QVERIFY(insertSum == eraseSum); } void LockFreeMapTest::testLazy() { const qint32 numCycles = 50000; const qint32 numTypes = 2; QList jobs; KisTileHashTableTraits2 map; - auto func = [&](qint64 &eraseSum, qint64 &insertSum) { + auto func = [&](qint64 & eraseSum, qint64 & insertSum) { for (qint32 i = 1; i < numCycles + 1; ++i) { auto type = i % numTypes; switch (type) { case 0: { auto result = map.erase(i - 1); if (result.data()) { eraseSum += result->member(); } break; } case 1: { - auto result = map.getLazy(i); + auto result = map.getLazy(i, KisSharedPtr(new Wrapper())); if (result.data()) { insertSum += result->member(); } break; } } } }; for (qint32 i = 0; i < NUM_THREADS; ++i) { StressJob *job = new StressJob(func); job->setAutoDelete(false); jobs.append(job); } QThreadPool pool; pool.setMaxThreadCount(NUM_THREADS); QBENCHMARK { for (auto &job : jobs) { pool.start(job); } pool.waitForDone(); } qint64 insertSum = 0; qint64 eraseSum = 0; for (qint32 i = 0; i < NUM_THREADS; ++i) { StressJob *job = jobs.takeLast(); eraseSum += job->eraseSum(); insertSum += job->insertSum(); delete job; } QVERIFY(insertSum == eraseSum); } QTEST_GUILESS_MAIN(LockFreeMapTest)