diff --git a/CMakeLists.txt b/CMakeLists.txt index dffa0aa5e9..b1f52c7b34 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,738 +1,742 @@ project(krita) message(STATUS "Using CMake version: ${CMAKE_VERSION}") cmake_minimum_required(VERSION 2.8.12 FATAL_ERROR) set(MIN_QT_VERSION 5.6.0) set(MIN_FRAMEWORKS_VERSION 5.18.0) if (POLICY CMP0002) cmake_policy(SET CMP0002 OLD) endif() if (POLICY CMP0017) cmake_policy(SET CMP0017 NEW) endif () if (POLICY CMP0022) cmake_policy(SET CMP0022 OLD) endif () if (POLICY CMP0026) cmake_policy(SET CMP0026 OLD) endif() if (POLICY CMP0042) cmake_policy(SET CMP0042 NEW) endif() if (POLICY CMP0046) cmake_policy(SET CMP0046 OLD) endif () if (POLICY CMP0059) cmake_policy(SET CMP0059 OLD) endif() if (POLICY CMP0063) cmake_policy(SET CMP0063 OLD) endif() if (POLICY CMP0054) cmake_policy(SET CMP0054 OLD) endif() if (POLICY CMP0064) cmake_policy(SET CMP0064 OLD) endif() if (APPLE) set(APPLE_SUPPRESS_X11_WARNING TRUE) set(KDE_SKIP_RPATH_SETTINGS TRUE) set(CMAKE_MACOSX_RPATH 1) set(BUILD_WITH_INSTALL_RPATH 1) add_definitions(-mmacosx-version-min=10.11 -Wno-macro-redefined -Wno-deprecated-register) endif() if (CMAKE_COMPILER_IS_GNUCXX AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9 AND NOT WIN32) add_definitions(-Werror=delete-incomplete) endif() ###################### ####################### ## Constants defines ## ####################### ###################### # define common versions of Krita applications, used to generate kritaversion.h # update these version for every release: set(KRITA_VERSION_STRING "4.2.0-pre-alpha") # Major version: 3 for 3.x, 4 for 4.x, etc. set(KRITA_STABLE_VERSION_MAJOR 4) # Minor version: 0 for 4.0, 1 for 4.1, etc. set(KRITA_STABLE_VERSION_MINOR 2) # Bugfix release version, or 0 for before the first stable release set(KRITA_VERSION_RELEASE 0) # the 4th digit, really only used for the Windows installer: # - [Pre-]Alpha: Starts from 0, increment 1 per release # - Beta: Starts from 50, increment 1 per release # - Stable: Set to 100, bump to 101 if emergency update is needed set(KRITA_VERSION_REVISION 0) set(KRITA_ALPHA 1) # uncomment only for Alpha #set(KRITA_BETA 1) # uncomment only for Beta #set(KRITA_RC 1) # uncomment only for RC set(KRITA_YEAR 2018) # update every year if(NOT DEFINED KRITA_ALPHA AND NOT DEFINED KRITA_BETA AND NOT DEFINED KRITA_RC) set(KRITA_STABLE 1) # do not edit endif() message(STATUS "Krita version: ${KRITA_VERSION_STRING}") # Define the generic version of the Krita libraries here # This makes it easy to advance it when the next Krita release comes. # 14 was the last GENERIC_KRITA_LIB_VERSION_MAJOR of the previous Krita series # (2.x) so we're starting with 15 in 3.x series, 16 in 4.x series if(KRITA_STABLE_VERSION_MAJOR EQUAL 4) math(EXPR GENERIC_KRITA_LIB_VERSION_MAJOR "${KRITA_STABLE_VERSION_MINOR} + 16") else() # let's make sure we won't forget to update the "16" message(FATAL_ERROR "Reminder: please update offset == 16 used to compute GENERIC_KRITA_LIB_VERSION_MAJOR to something bigger") endif() set(GENERIC_KRITA_LIB_VERSION "${GENERIC_KRITA_LIB_VERSION_MAJOR}.0.0") set(GENERIC_KRITA_LIB_SOVERSION "${GENERIC_KRITA_LIB_VERSION_MAJOR}") LIST (APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/modules") LIST (APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/kde_macro") # fetch git revision for the current build set(KRITA_GIT_SHA1_STRING "") set(KRITA_GIT_BRANCH_STRING "") include(GetGitRevisionDescription) get_git_head_hash(GIT_SHA1) get_git_branch(GIT_BRANCH) if(GIT_SHA1) string(SUBSTRING ${GIT_SHA1} 0 7 GIT_SHA1) set(KRITA_GIT_SHA1_STRING ${GIT_SHA1}) if(GIT_BRANCH) set(KRITA_GIT_BRANCH_STRING ${GIT_BRANCH}) else() set(KRITA_GIT_BRANCH_STRING "(detached HEAD)") endif() endif() # create test make targets enable_testing() # collect list of broken tests, empty here to start fresh with each cmake run set(KRITA_BROKEN_TESTS "" CACHE INTERNAL "KRITA_BROKEN_TESTS") ############ ############# ## Options ## ############# ############ include(FeatureSummary) if (WIN32) option(USE_DRMINGW "Support the Dr. Mingw crash handler (only on windows)" ON) add_feature_info("Dr. Mingw" USE_DRMINGW "Enable the Dr. Mingw crash handler") if (MINGW) option(USE_MINGW_HARDENING_LINKER "Enable DEP (NX), ASLR and high-entropy ASLR linker flags (mingw-w64)" ON) add_feature_info("Linker Security Flags" USE_MINGW_HARDENING_LINKER "Enable DEP (NX), ASLR and high-entropy ASLR linker flags") if (USE_MINGW_HARDENING_LINKER) set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--dynamicbase -Wl,--nxcompat -Wl,--disable-auto-image-base") set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--dynamicbase -Wl,--nxcompat -Wl,--disable-auto-image-base") set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -Wl,--dynamicbase -Wl,--nxcompat -Wl,--disable-auto-image-base") if ("${CMAKE_SIZEOF_VOID_P}" EQUAL "8") # Enable high-entropy ASLR for 64-bit # The image base has to be >4GB for HEASLR to be enabled. # The values used here are kind of arbitrary. set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--high-entropy-va -Wl,--image-base,0x140000000") set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--high-entropy-va -Wl,--image-base,0x180000000") set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -Wl,--high-entropy-va -Wl,--image-base,0x180000000") endif ("${CMAKE_SIZEOF_VOID_P}" EQUAL "8") else (USE_MINGW_HARDENING_LINKER) message(WARNING "Linker Security Flags not enabled!") endif (USE_MINGW_HARDENING_LINKER) endif (MINGW) endif () option(HIDE_SAFE_ASSERTS "Don't show message box for \"safe\" asserts, just ignore them automatically and dump a message to the terminal." ON) configure_file(config-hide-safe-asserts.h.cmake ${CMAKE_CURRENT_BINARY_DIR}/config-hide-safe-asserts.h) add_feature_info("Safe Asserts" HIDE_SAFE_ASSERTS "Don't show message box for \"safe\" asserts, just ignore them automatically and dump a message to the terminal.") +option(USE_LOCK_FREE_HASH_TABLE "Use lock free hash table instead of blocking." OFF) +configure_file(config-hash-table-implementaion.h.cmake ${CMAKE_CURRENT_BINARY_DIR}/config-hash-table-implementaion.h) +add_feature_info("Lock free hash table" USE_LOCK_FREE_HASH_TABLE "Use lock free hash table instead of blocking.") + option(FOUNDATION_BUILD "A Foundation build is a binary release build that can package some extra things like color themes. Linux distributions that build and install Krita into a default system location should not define this option to true." OFF) add_feature_info("Foundation Build" FOUNDATION_BUILD "A Foundation build is a binary release build that can package some extra things like color themes. Linux distributions that build and install Krita into a default system location should not define this option to true.") option(KRITA_ENABLE_BROKEN_TESTS "Enable tests that are marked as broken" OFF) add_feature_info("Enable Broken Tests" KRITA_ENABLE_BROKEN_TESTS "Runs broken test when \"make test\" is invoked (use -DKRITA_ENABLE_BROKEN_TESTS=ON to enable).") option(ENABLE_PYTHON_2 "Enables the compiler to look for Python 2.7 instead of Python 3. Some packaged scripts are not compatible with Python 2 and this should only be used if you really have to use 2.7." OFF) include(MacroJPEG) ######################################################### ## Look for Python3 It is also searched by KF5, ## ## so we should request the correct version in advance ## ######################################################### function(TestCompileLinkPythonLibs OUTPUT_VARNAME) include(CheckCXXSourceCompiles) set(CMAKE_REQUIRED_INCLUDES ${PYTHON_INCLUDE_PATH}) set(CMAKE_REQUIRED_LIBRARIES ${PYTHON_LIBRARIES}) if (MINGW) set(CMAKE_REQUIRED_DEFINITIONS -D_hypot=hypot) endif (MINGW) unset(${OUTPUT_VARNAME} CACHE) CHECK_CXX_SOURCE_COMPILES(" #include int main(int argc, char *argv[]) { Py_InitializeEx(0); }" ${OUTPUT_VARNAME}) endfunction() if(MINGW) if(ENABLE_PYTHON_2) message(FATAL_ERROR "Python 2.7 is not supported on Windows at the moment.") else(ENABLE_PYTHON_2) find_package(PythonInterp 3.6 EXACT) find_package(PythonLibs 3.6 EXACT) endif(ENABLE_PYTHON_2) if (PYTHONLIBS_FOUND AND PYTHONINTERP_FOUND) if(ENABLE_PYTHON_2) find_package(PythonLibrary 2.7) else(ENABLE_PYTHON_2) find_package(PythonLibrary 3.6) endif(ENABLE_PYTHON_2) TestCompileLinkPythonLibs(CAN_USE_PYTHON_LIBS) if (NOT CAN_USE_PYTHON_LIBS) message(FATAL_ERROR "Compiling with Python library failed, please check whether the architecture is correct. Python will be disabled.") endif (NOT CAN_USE_PYTHON_LIBS) endif (PYTHONLIBS_FOUND AND PYTHONINTERP_FOUND) else(MINGW) if(ENABLE_PYTHON_2) find_package(PythonInterp 2.7) find_package(PythonLibrary 2.7) else(ENABLE_PYTHON_2) find_package(PythonInterp 3.0) find_package(PythonLibrary 3.0) endif(ENABLE_PYTHON_2) endif(MINGW) ######################## ######################### ## Look for KDE and Qt ## ######################### ######################## find_package(ECM 5.22 REQUIRED NOMODULE) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${ECM_MODULE_PATH} ${ECM_KDE_MODULE_DIR}) include(ECMOptionalAddSubdirectory) include(ECMAddAppIcon) include(ECMSetupVersion) include(ECMMarkNonGuiExecutable) include(ECMGenerateHeaders) include(GenerateExportHeader) include(ECMMarkAsTest) include(ECMInstallIcons) include(CMakePackageConfigHelpers) include(WriteBasicConfigVersionFile) include(CheckFunctionExists) include(KDEInstallDirs) include(KDECMakeSettings) include(KDECompilerSettings) # do not reorder to be alphabetical: this is the order in which the frameworks # depend on each other. find_package(KF5 ${MIN_FRAMEWORKS_VERSION} REQUIRED COMPONENTS Archive Config WidgetsAddons Completion CoreAddons GuiAddons I18n ItemModels ItemViews WindowSystem ) # KConfig deprecated authorizeKAction. In order to be warning free, # compile with the updated function when the dependency is new enough. # Remove this (and the uses of the define) when the minimum KF5 # version is >= 5.24.0. if (${KF5Config_VERSION} VERSION_LESS "5.24.0" ) message("Old KConfig (< 5.24.0) found.") add_definitions(-DKCONFIG_BEFORE_5_24) endif() find_package(Qt5 ${MIN_QT_VERSION} REQUIRED COMPONENTS Core Gui Widgets Xml Network PrintSupport Svg Test Concurrent ) include (MacroAddFileDependencies) include (MacroBoolTo01) include (MacroEnsureOutOfSourceBuild) macro_ensure_out_of_source_build("Compiling Krita inside the source directory is not possible. Please refer to the build instruction https://community.kde.org/Krita#Build_Instructions") # Note: OPTIONAL_COMPONENTS does not seem to be reliable # (as of ECM 5.15.0, CMake 3.2) find_package(Qt5Multimedia ${MIN_QT_VERSION}) set_package_properties(Qt5Multimedia PROPERTIES DESCRIPTION "Qt multimedia integration" URL "http://www.qt.io/" TYPE OPTIONAL PURPOSE "Optionally used to provide sound support for animations") macro_bool_to_01(Qt5Multimedia_FOUND HAVE_QT_MULTIMEDIA) configure_file(config-qtmultimedia.h.cmake ${CMAKE_CURRENT_BINARY_DIR}/config-qtmultimedia.h ) if (NOT APPLE) find_package(Qt5Quick ${MIN_QT_VERSION}) set_package_properties(Qt5Quick PROPERTIES DESCRIPTION "QtQuick" URL "http://www.qt.io/" TYPE OPTIONAL PURPOSE "Optionally used for the touch gui for Krita") macro_bool_to_01(Qt5Quick_FOUND HAVE_QT_QUICK) find_package(Qt5QuickWidgets ${MIN_QT_VERSION}) set_package_properties(Qt5QuickWidgets PROPERTIES DESCRIPTION "QtQuickWidgets" URL "http://www.qt.io/" TYPE OPTIONAL PURPOSE "Optionally used for the touch gui for Krita") endif() if (NOT WIN32 AND NOT APPLE) find_package(Qt5 ${MIN_QT_VERSION} REQUIRED X11Extras) find_package(Qt5DBus ${MIN_QT_VERSION}) set(HAVE_DBUS ${Qt5DBus_FOUND}) set_package_properties(Qt5DBus PROPERTIES DESCRIPTION "Qt DBUS integration" URL "http://www.qt.io/" TYPE OPTIONAL PURPOSE "Optionally used to provide a dbus api on Linux") find_package(KF5KIO ${MIN_FRAMEWORKS_VERSION}) macro_bool_to_01(KF5KIO_FOUND HAVE_KIO) set_package_properties(KF5KIO PROPERTIES DESCRIPTION "KDE's KIO Framework" URL "http://api.kde.org/frameworks-api/frameworks5-apidocs/kio/html/index.html" TYPE OPTIONAL PURPOSE "Optionally used for recent document handling") find_package(KF5Crash ${MIN_FRAMEWORKS_VERSION}) macro_bool_to_01(KF5Crash_FOUND HAVE_KCRASH) set_package_properties(KF5Crash PROPERTIES DESCRIPTION "KDE's Crash Handler" URL "http://api.kde.org/frameworks-api/frameworks5-apidocs/kcrash/html/index.html" TYPE OPTIONAL PURPOSE "Optionally used to provide crash reporting on Linux") find_package(X11 REQUIRED COMPONENTS Xinput) set(HAVE_X11 TRUE) add_definitions(-DHAVE_X11) find_package(XCB COMPONENTS XCB ATOM) set(HAVE_XCB ${XCB_FOUND}) else() set(HAVE_DBUS FALSE) set(HAVE_X11 FALSE) set(HAVE_XCB FALSE) endif() add_definitions( -DQT_USE_QSTRINGBUILDER -DQT_STRICT_ITERATORS -DQT_NO_SIGNALS_SLOTS_KEYWORDS -DQT_NO_URL_CAST_FROM_STRING ) if (${Qt5_VERSION} VERSION_GREATER "5.8.0" ) add_definitions(-DQT_DISABLE_DEPRECATED_BEFORE=0x50900) elseif(${Qt5_VERSION} VERSION_GREATER "5.7.0" ) add_definitions(-DQT_DISABLE_DEPRECATED_BEFORE=0x50800) elseif(${Qt5_VERSION} VERSION_GREATER "5.6.0" ) add_definitions(-DQT_DISABLE_DEPRECATED_BEFORE=0x50700) else() add_definitions(-DQT_DISABLE_DEPRECATED_BEFORE=0x50600) endif() add_definitions(-DTRANSLATION_DOMAIN=\"krita\") # # The reason for this mode is that the Debug mode disable inlining # if(CMAKE_COMPILER_IS_GNUCXX) set(CMAKE_CXX_FLAGS_KRITADEVS "-O3 -g" CACHE STRING "" FORCE) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fext-numeric-literals") endif() if(UNIX) set(CMAKE_REQUIRED_LIBRARIES "${CMAKE_REQUIRED_LIBRARIES};m") endif() if(WIN32) if(MSVC) # C4522: 'class' : multiple assignment operators specified set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -wd4522") endif() endif() # KDECompilerSettings adds the `--export-all-symbols` linker flag. # We don't really need it. if(MINGW) string(REPLACE "-Wl,--export-all-symbols" "" CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS}") string(REPLACE "-Wl,--export-all-symbols" "" CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS}") endif(MINGW) # enable exceptions globally kde_enable_exceptions() set(KRITA_DEFAULT_TEST_DATA_DIR ${CMAKE_SOURCE_DIR}/sdk/tests/data/) macro(macro_add_unittest_definitions) add_definitions(-DFILES_DATA_DIR="${CMAKE_CURRENT_SOURCE_DIR}/data/") add_definitions(-DFILES_OUTPUT_DIR="${CMAKE_CURRENT_BINARY_DIR}") add_definitions(-DFILES_DEFAULT_DATA_DIR="${KRITA_DEFAULT_TEST_DATA_DIR}") add_definitions(-DSYSTEM_RESOURCES_DATA_DIR="${CMAKE_SOURCE_DIR}/krita/data/") endmacro() # overcome some platform incompatibilities if(WIN32) include_directories(${CMAKE_CURRENT_SOURCE_DIR}/winquirks) add_definitions(-D_USE_MATH_DEFINES) add_definitions(-DNOMINMAX) set(WIN32_PLATFORM_NET_LIBS ws2_32.lib netapi32.lib) endif() # set custom krita plugin installdir set(KRITA_PLUGIN_INSTALL_DIR ${LIB_INSTALL_DIR}/kritaplugins) ########################### ############################ ## Required dependencies ## ############################ ########################### find_package(PNG REQUIRED) if (APPLE) # this is not added correctly on OSX -- see http://forum.kde.org/viewtopic.php?f=139&t=101867&p=221242#p221242 include_directories(SYSTEM ${PNG_INCLUDE_DIR}) endif() add_definitions(-DBOOST_ALL_NO_LIB) find_package(Boost 1.55 REQUIRED COMPONENTS system) include_directories(SYSTEM ${Boost_INCLUDE_DIRS}) ## ## Test for GNU Scientific Library ## find_package(GSL) set_package_properties(GSL PROPERTIES URL "http://www.gnu.org/software/gsl" TYPE RECOMMENDED PURPOSE "Required by Krita's Transform tool.") macro_bool_to_01(GSL_FOUND HAVE_GSL) configure_file(config-gsl.h.cmake ${CMAKE_CURRENT_BINARY_DIR}/config-gsl.h ) ########################### ############################ ## Optional dependencies ## ############################ ########################### find_package(ZLIB) set_package_properties(ZLIB PROPERTIES DESCRIPTION "Compression library" URL "http://www.zlib.net/" TYPE OPTIONAL PURPOSE "Optionally used by the G'Mic and the PSD plugins") macro_bool_to_01(ZLIB_FOUND HAVE_ZLIB) find_package(OpenEXR) set_package_properties(OpenEXR PROPERTIES DESCRIPTION "High dynamic-range (HDR) image file format" URL "http://www.openexr.com" TYPE OPTIONAL PURPOSE "Required by the Krita OpenEXR filter") macro_bool_to_01(OPENEXR_FOUND HAVE_OPENEXR) set(LINK_OPENEXR_LIB) if(OPENEXR_FOUND) include_directories(SYSTEM ${OPENEXR_INCLUDE_DIR}) set(LINK_OPENEXR_LIB ${OPENEXR_LIBRARIES}) add_definitions(${OPENEXR_DEFINITIONS}) endif() find_package(TIFF) set_package_properties(TIFF PROPERTIES DESCRIPTION "TIFF Library and Utilities" URL "http://www.remotesensing.org/libtiff" TYPE OPTIONAL PURPOSE "Required by the Krita TIFF filter") find_package(JPEG) set_package_properties(JPEG PROPERTIES DESCRIPTION "Free library for JPEG image compression. Note: libjpeg8 is NOT supported." URL "http://www.libjpeg-turbo.org" TYPE OPTIONAL PURPOSE "Required by the Krita JPEG filter") find_package(GIF) set_package_properties(GIF PROPERTIES DESCRIPTION "Library for loading and saving gif files." URL "http://giflib.sourceforge.net/" TYPE OPTIONAL PURPOSE "Required by the Krita GIF filter") find_package(HEIF "1.3.0") set_package_properties(HEIF PROPERTIES DESCRIPTION "Library for loading and saving heif files." URL "https://github.com/strukturag/libheif" TYPE OPTIONAL PURPOSE "Required by the Krita HEIF filter") set(LIBRAW_MIN_VERSION "0.16") find_package(LibRaw ${LIBRAW_MIN_VERSION}) set_package_properties(LibRaw PROPERTIES DESCRIPTION "Library to decode RAW images" URL "http://www.libraw.org" TYPE OPTIONAL PURPOSE "Required to build the raw import plugin") find_package(FFTW3) set_package_properties(FFTW3 PROPERTIES DESCRIPTION "A fast, free C FFT library" URL "http://www.fftw.org/" TYPE OPTIONAL PURPOSE "Required by the Krita for fast convolution operators and some G'Mic features") macro_bool_to_01(FFTW3_FOUND HAVE_FFTW3) find_package(OCIO) set_package_properties(OCIO PROPERTIES DESCRIPTION "The OpenColorIO Library" URL "http://www.opencolorio.org" TYPE OPTIONAL PURPOSE "Required by the Krita LUT docker") macro_bool_to_01(OCIO_FOUND HAVE_OCIO) set_package_properties(PythonLibrary PROPERTIES DESCRIPTION "Python Library" URL "http://www.python.org" TYPE OPTIONAL PURPOSE "Required by the Krita PyQt plugin") macro_bool_to_01(PYTHONLIBS_FOUND HAVE_PYTHONLIBS) find_package(SIP "4.18.0") set_package_properties(SIP PROPERTIES DESCRIPTION "Support for generating SIP Python bindings" URL "https://www.riverbankcomputing.com/software/sip/download" TYPE OPTIONAL PURPOSE "Required by the Krita PyQt plugin") macro_bool_to_01(SIP_FOUND HAVE_SIP) find_package(PyQt5 "5.6.0") set_package_properties(PyQt5 PROPERTIES DESCRIPTION "Python bindings for Qt5." URL "https://www.riverbankcomputing.com/software/pyqt/download5" TYPE OPTIONAL PURPOSE "Required by the Krita PyQt plugin") macro_bool_to_01(PYQT5_FOUND HAVE_PYQT5) ## ## Look for OpenGL ## # TODO: see if there is a better check for QtGui being built with opengl support (and thus the QOpenGL* classes) if(Qt5Gui_OPENGL_IMPLEMENTATION) message(STATUS "Found QtGui OpenGL support") else() message(FATAL_ERROR "Did NOT find QtGui OpenGL support. Check your Qt configuration. You cannot build Krita without Qt OpenGL support.") endif() ## ## Test for eigen3 ## find_package(Eigen3 3.0 REQUIRED) set_package_properties(Eigen3 PROPERTIES DESCRIPTION "C++ template library for linear algebra" URL "http://eigen.tuxfamily.org" TYPE REQUIRED) ## ## Test for exiv2 ## find_package(Exiv2 0.16 REQUIRED) set_package_properties(Exiv2 PROPERTIES DESCRIPTION "Image metadata library and tools" URL "http://www.exiv2.org" PURPOSE "Required by Krita") ## ## Test for lcms ## find_package(LCMS2 2.4 REQUIRED) set_package_properties(LCMS2 PROPERTIES DESCRIPTION "LittleCMS Color management engine" URL "http://www.littlecms.com" TYPE REQUIRED PURPOSE "Will be used for color management and is necessary for Krita") if(LCMS2_FOUND) if(NOT ${LCMS2_VERSION} VERSION_LESS 2040 ) set(HAVE_LCMS24 TRUE) endif() set(HAVE_REQUIRED_LCMS_VERSION TRUE) set(HAVE_LCMS2 TRUE) endif() ## ## Test for Vc ## set(OLD_CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ) set(CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake/modules ) set(HAVE_VC FALSE) if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm") if(NOT MSVC) find_package(Vc 1.1.0) set_package_properties(Vc PROPERTIES DESCRIPTION "Portable, zero-overhead SIMD library for C++" URL "https://github.com/VcDevel/Vc" TYPE OPTIONAL PURPOSE "Required by the Krita for vectorization") macro_bool_to_01(Vc_FOUND HAVE_VC) endif() endif() configure_file(config-vc.h.cmake ${CMAKE_CURRENT_BINARY_DIR}/config-vc.h ) if(HAVE_VC) message(STATUS "Vc found!") set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/vc") include (VcMacros) if(Vc_COMPILER_IS_CLANG) set(ADDITIONAL_VC_FLAGS "-Wabi -ffp-contract=fast") if(NOT WIN32) set(ADDITIONAL_VC_FLAGS "${ADDITIONAL_VC_FLAGS} -fPIC") endif() elseif (NOT MSVC) set(ADDITIONAL_VC_FLAGS "-Wabi -fabi-version=0 -ffp-contract=fast") if(NOT WIN32) set(ADDITIONAL_VC_FLAGS "${ADDITIONAL_VC_FLAGS} -fPIC") endif() endif() #Handle Vc master if(Vc_COMPILER_IS_GCC OR Vc_COMPILER_IS_CLANG) AddCompilerFlag("-std=c++11" _ok) if(NOT _ok) AddCompilerFlag("-std=c++0x" _ok) endif() endif() macro(ko_compile_for_all_implementations_no_scalar _objs _src) vc_compile_for_all_implementations(${_objs} ${_src} FLAGS ${ADDITIONAL_VC_FLAGS} ONLY SSE2 SSSE3 SSE4_1 AVX AVX2+FMA+BMI2) endmacro() macro(ko_compile_for_all_implementations _objs _src) vc_compile_for_all_implementations(${_objs} ${_src} FLAGS ${ADDITIONAL_VC_FLAGS} ONLY Scalar SSE2 SSSE3 SSE4_1 AVX AVX2+FMA+BMI2) endmacro() endif() set(CMAKE_MODULE_PATH ${OLD_CMAKE_MODULE_PATH} ) add_definitions(${QT_DEFINITIONS} ${QT_QTDBUS_DEFINITIONS}) ## ## Test endianness ## include (TestBigEndian) test_big_endian(CMAKE_WORDS_BIGENDIAN) ## ## Test for qt-poppler ## find_package(Poppler COMPONENTS Qt5) set_package_properties(Poppler PROPERTIES DESCRIPTION "A PDF rendering library" URL "http://poppler.freedesktop.org" TYPE OPTIONAL PURPOSE "Required by the Krita PDF filter.") ############################ ############################# ## Add Krita helper macros ## ############################# ############################ include(MacroKritaAddBenchmark) #################### ##################### ## Define includes ## ##################### #################### # for config.h and includes (if any?) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_SOURCE_DIR}/interfaces ) add_subdirectory(libs) add_subdirectory(plugins) add_subdirectory(benchmarks) add_subdirectory(krita) configure_file(KoConfig.h.cmake ${CMAKE_CURRENT_BINARY_DIR}/KoConfig.h ) configure_file(config_convolution.h.cmake ${CMAKE_CURRENT_BINARY_DIR}/config_convolution.h) configure_file(config-ocio.h.cmake ${CMAKE_CURRENT_BINARY_DIR}/config-ocio.h ) check_function_exists(powf HAVE_POWF) configure_file(config-powf.h.cmake ${CMAKE_CURRENT_BINARY_DIR}/config-powf.h) if(WIN32) include(${CMAKE_CURRENT_LIST_DIR}/packaging/windows/installer/ConfigureInstallerNsis.cmake) endif() message("\nBroken tests:") foreach(tst ${KRITA_BROKEN_TESTS}) message(" * ${tst}") endforeach() feature_summary(WHAT ALL FATAL_ON_MISSING_REQUIRED_PACKAGES) if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/po OR EXISTS ${CMAKE_CURRENT_BINARY_DIR}/po ) find_package(KF5I18n CONFIG REQUIRED) ki18n_install(po) endif() diff --git a/config-hash-table-implementaion.h.cmake b/config-hash-table-implementaion.h.cmake new file mode 100644 index 0000000000..30dc23732a --- /dev/null +++ b/config-hash-table-implementaion.h.cmake @@ -0,0 +1,3 @@ +/* config-hash-table-implementation.h. Generated by cmake from config-hash-table-implementation.h.cmake */ + +#cmakedefine USE_LOCK_FREE_HASH_TABLE 1 diff --git a/libs/global/kis_shared_ptr.h b/libs/global/kis_shared_ptr.h index 5453bfabb3..c75e1cc671 100644 --- a/libs/global/kis_shared_ptr.h +++ b/libs/global/kis_shared_ptr.h @@ -1,521 +1,526 @@ /* * Copyright (c) 2005 Frerich Raabe * Copyright (c) 2006,2010 Cyrille Berger * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef KIS_SHAREDPTR_H #define KIS_SHAREDPTR_H #include #include #include "kis_memory_leak_tracker.h" template class KisWeakSharedPtr; /** * KisSharedPtr is a shared pointer similar to KSharedPtr and * boost::shared_ptr. The difference with KSharedPtr is that our * constructor is not explicit. * * A shared pointer is a wrapper around a real pointer. The shared * pointer keeps a reference count, and when the reference count drops * to 0 the contained pointer is deleted. You can use the shared * pointer just as you would use a real pointer. * * See also also item 28 and 29 of More Effective C++ and * http://bugs.kde.org/show_bug.cgi?id=52261 as well as * http://www.boost.org/libs/smart_ptr/shared_ptr.htm. * * Advantage of KisSharedPtr over boost pointer or QSharedPointer? * * The difference with boost share pointer is that in * boost::shared_ptr, the counter is kept inside the smart pointer, * meaning that you should never never remove the pointer from its * smart pointer object, because if you do that, and somewhere in the * code, the pointer is put back in a smart pointer, then you have two * counters, and when one reach zero, then the object gets deleted * while some other code thinks the pointer is still valid. * * Disadvantage of KisSharedPtr compared to boost pointer? * * KisSharedPtr requires the class to inherits KisShared. * * Difference with QSharedDataPointer * * QSharedDataPointer and KisSharedPtr are very similar, but * QSharedDataPointer has an explicit constructor which makes it more * painful to use in some constructions. And QSharedDataPointer * doesn't offer a weak pointer. */ template class KisSharedPtr { friend class KisWeakSharedPtr; public: /** * Creates a null pointer. */ inline KisSharedPtr() : d(0) { } /** * Creates a new pointer. * @param p the pointer */ inline KisSharedPtr(T* p) : d(p) { ref(); } inline KisSharedPtr(const KisWeakSharedPtr& o); // Free the pointer and set it to new value void attach(T* p); // Free the pointer void clear(); /** * Copies a pointer. * @param o the pointer to copy */ inline KisSharedPtr(const KisSharedPtr& o) : d(o.d) { ref(); } /** * Dereferences the object that this pointer points to. If it was * the last reference, the object will be deleted. */ inline ~KisSharedPtr() { deref(); } inline KisSharedPtr& operator= (const KisSharedPtr& o) { attach(o.d); return *this; } inline bool operator== (const T* p) const { return (d == p); } inline bool operator!= (const T* p) const { return (d != p); } inline bool operator== (const KisSharedPtr& o) const { return (d == o.d); } inline bool operator!= (const KisSharedPtr& o) const { return (d != o.d); } inline KisSharedPtr& operator= (T* p) { attach(p); return *this; } inline operator const T*() const { return d; } template< class T2> inline operator KisSharedPtr() const { return KisSharedPtr(d); } /** * @return the contained pointer. If you delete the contained * pointer, you will make KisSharedPtr very unhappy. It is * perfectly save to put the contained pointer in another * KisSharedPtr, though. */ inline T* data() { return d; } /** * @return the pointer */ inline const T* data() const { return d; } /** * @return a const pointer to the shared object. */ inline const T* constData() const { return d; } inline const T& operator*() const { Q_ASSERT(d); return *d; } inline T& operator*() { Q_ASSERT(d); return *d; } inline const T* operator->() const { Q_ASSERT(d); return d; } inline T* operator->() { Q_ASSERT(d); return d; } /** * @return true if the pointer is null */ inline bool isNull() const { return (d == 0); } -private: + inline static void ref(const KisSharedPtr* sp, T* t) { #ifndef HAVE_MEMORY_LEAK_TRACKER Q_UNUSED(sp); #else KisMemoryLeakTracker::instance()->reference(t, sp); #endif if (t) { t->ref(); } } - inline void ref() const - { - ref(this, d); - } + inline static bool deref(const KisSharedPtr* sp, T* t) { #ifndef HAVE_MEMORY_LEAK_TRACKER Q_UNUSED(sp); #else KisMemoryLeakTracker::instance()->dereference(t, sp); #endif if (t && !t->deref()) { delete t; return false; } return true; } + +private: + inline void ref() const + { + ref(this, d); + } + inline void deref() const { bool v = deref(this, d); #ifndef NDEBUG if (!v) { d = 0; } #else Q_UNUSED(v); #endif } + private: mutable T* d; }; /** * A weak shared ptr is an ordinary shared ptr, with two differences: * it doesn't delete the contained pointer if the refcount drops to * zero and it doesn't prevent the contained pointer from being * deleted if the last strong shared pointer goes out of scope. */ template class KisWeakSharedPtr { friend class KisSharedPtr; public: /** * Creates a null pointer. */ inline KisWeakSharedPtr() : d(0), weakReference(0) { } /** * Creates a new pointer. * @param p the pointer */ inline KisWeakSharedPtr(T* p) { load(p); } inline KisWeakSharedPtr(const KisSharedPtr& o) { load(o.d); } /** * Copies a pointer. * @param o the pointer to copy */ inline KisWeakSharedPtr(const KisWeakSharedPtr& o) { if (o.isConsistent()) { load(o.d); } else { d = 0; weakReference = 0; } } inline ~KisWeakSharedPtr() { detach(); } inline KisWeakSharedPtr& operator= (const KisWeakSharedPtr& o) { attach(o); return *this; } inline bool operator== (const T* p) const { return (d == p); } inline bool operator!= (const T* p) const { return (d != p); } inline bool operator== (const KisWeakSharedPtr& o) const { return (d == o.d); } inline bool operator!= (const KisWeakSharedPtr& o) const { return (d != o.d); } inline KisWeakSharedPtr& operator= (T* p) { attach(p); return *this; } template< class T2> inline operator KisWeakSharedPtr() const { return KisWeakSharedPtr(d); } /** * Note that if you use this function, the pointer might be destroyed * if KisSharedPtr pointing to this pointer are deleted, resulting in * a segmentation fault. Use with care. * @return a const pointer to the shared object. */ inline T* data() { if (!isConsistent()) { warnKrita << kisBacktrace(); Q_ASSERT_X(0, "KisWeakSharedPtr", "Weak pointer is not valid!"); } return d; } /** * @see data() */ inline const T* data() const { if (!isConsistent()) { warnKrita << kisBacktrace(); Q_ASSERT_X(0, "KisWeakSharedPtr", "Weak pointer is not valid!"); } return d; } /** * @see data() */ inline const T* constData() const { if (!isConsistent()) { warnKrita << kisBacktrace(); Q_ASSERT_X(0, "KisWeakSharedPtr", "Weak pointer is not valid!"); } return d; } /** * @see data() */ inline operator const T*() const { /** * This operator is used in boolean expressions where we check * for pointer consistency, so return 0 instead of asserting. */ return isConsistent() ? d : 0; } inline const T& operator*() const { if (!isValid()) { warnKrita << kisBacktrace(); Q_ASSERT_X(0, "KisWeakSharedPtr", "Weak pointer is not valid!"); } return *d; } inline T& operator*() { if (!isValid()) { warnKrita << kisBacktrace(); Q_ASSERT_X(0, "KisWeakSharedPtr", "Weak pointer is not valid!"); } return *d; } inline const T* operator->() const { if (!isValid()) { warnKrita << kisBacktrace(); Q_ASSERT_X(0, "KisWeakSharedPtr", "Weak pointer is not valid!"); } return d; } inline T* operator->() { if (!isValid()) { warnKrita << kisBacktrace(); Q_ASSERT_X(0, "KisWeakSharedPtr", "Weak pointer is not valid!"); } return d; } /** * @return true if the pointer is null */ inline bool isNull() const { return (d == 0); } /** * @return true if the weak pointer points to a valid pointer * and false if the data has been deleted or is null */ inline bool isValid() const { Q_ASSERT(!d || weakReference); return d && weakReference && isOdd((int)*weakReference); } /** * @brief toStrongRef returns a KisSharedPtr which may be dereferenced. * * Weak pointers should only be used to track ownership but never be used as pointers. * This has historically not been the case, but in new API this function should be used * instead of directly using a weak pointer as pointer. * @return a KisSharedPtr, which may be null */ inline KisSharedPtr toStrongRef() const { return KisSharedPtr(*this); } private: static const qint32 WEAK_REF = 2; static inline bool isOdd(const qint32 &x) { return x & 0x01; } inline bool isConsistent() const { Q_ASSERT(!d || weakReference); return !d || (weakReference && isOdd((int)*weakReference)); } void load(T* newValue) { d = newValue; if (d) { weakReference = d->sharedWeakReference(); weakReference->fetchAndAddOrdered(WEAK_REF); } else { weakReference = 0; } } // see note in kis_shared.cc inline void attach(T* newValue) { detach(); load(newValue); } inline void attach(const KisWeakSharedPtr& o) { detach(); if (o.isConsistent()) { load(o.d); } else { d = 0; weakReference = 0; } } // see note in kis_shared.cc void detach() { d = 0; if (weakReference && weakReference->fetchAndAddOrdered(-WEAK_REF) <= WEAK_REF) { // sanity check: Q_ASSERT((int)*weakReference == 0); delete weakReference; weakReference = 0; } } mutable T* d; QAtomicInt *weakReference; }; template Q_INLINE_TEMPLATE KisSharedPtr::KisSharedPtr(const KisWeakSharedPtr& o) : d(o.d) { if (o.isValid()) { ref(); /** * Thread safety: * Is the object we have just referenced still valid? */ Q_ASSERT(o.isConsistent()); } else { d = 0; } } template Q_INLINE_TEMPLATE void KisSharedPtr::attach(T* p) { if (d != p) { ref(this, p); T* old = d; d = p; deref(this, old); } } template Q_INLINE_TEMPLATE void KisSharedPtr::clear() { attach((T*)0); } #endif diff --git a/libs/image/3rdparty/lock_free_map/atomic.h b/libs/image/3rdparty/lock_free_map/atomic.h new file mode 100644 index 0000000000..50d948f56a --- /dev/null +++ b/libs/image/3rdparty/lock_free_map/atomic.h @@ -0,0 +1,146 @@ +/*------------------------------------------------------------------------ + Junction: Concurrent data structures in C++ + Copyright (c) 2016 Jeff Preshing + Distributed under the Simplified BSD License. + Original location: https://github.com/preshing/junction + This software is distributed WITHOUT ANY WARRANTY; without even the + implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + See the LICENSE file for more information. +------------------------------------------------------------------------*/ + +#ifndef ATOMIC_H +#define ATOMIC_H + +#include + +inline void signalFenceConsume() +{ + std::atomic_signal_fence(std::memory_order_acquire); +} + +inline void signalFenceAcquire() +{ + std::atomic_signal_fence(std::memory_order_acquire); +} + +inline void signalFenceRelease() +{ + std::atomic_signal_fence(std::memory_order_release); +} + +inline void signalFenceSeqCst() +{ + std::atomic_signal_fence(std::memory_order_seq_cst); +} + +inline void threadFenceConsume() +{ + std::atomic_thread_fence(std::memory_order_acquire); +} + +inline void threadFenceAcquire() +{ + std::atomic_thread_fence(std::memory_order_acquire); +} + +inline void threadFenceRelease() +{ + std::atomic_thread_fence(std::memory_order_release); +} + +inline void threadFenceSeqCst() +{ + std::atomic_thread_fence(std::memory_order_seq_cst); +} + +enum MemoryOrder { + Relaxed = std::memory_order_relaxed, + Consume = std::memory_order_consume, + Acquire = std::memory_order_acquire, + Release = std::memory_order_release, + ConsumeRelease = std::memory_order_acq_rel, + AcquireRelease = std::memory_order_acq_rel, +}; + +template +class Atomic : protected std::atomic +{ +private: + T operator=(T value); + +public: + Atomic() + { + } + + Atomic(T value) : std::atomic(value) + { + } + + Atomic(const Atomic& other) : std::atomic(other.loadNonatomic()) + { + } + + T loadNonatomic() const + { + return std::atomic::load(std::memory_order_relaxed); + } + + T load(MemoryOrder memoryOrder) const + { + return std::atomic::load((std::memory_order) memoryOrder); + } + + void storeNonatomic(T value) + { + return std::atomic::store(value, std::memory_order_relaxed); + } + + void store(T value, MemoryOrder memoryOrder) + { + return std::atomic::store(value, (std::memory_order) memoryOrder); + } + + T compareExchange(T expected, T desired, MemoryOrder memoryOrder) + { + std::atomic::compare_exchange_strong(expected, desired, (std::memory_order) memoryOrder); + return expected; // modified by reference by compare_exchange_strong + } + + bool compareExchangeStrong(T& expected, T desired, MemoryOrder memoryOrder) + { + return std::atomic::compare_exchange_strong(expected, desired, (std::memory_order) memoryOrder); + } + + bool compareExchangeWeak(T& expected, T desired, MemoryOrder success, MemoryOrder failure) + { + return std::atomic::compare_exchange_weak(expected, desired, (std::memory_order) success, (std::memory_order) failure); + } + + T exchange(T desired, MemoryOrder memoryOrder) + { + return std::atomic::exchange(desired, (std::memory_order) memoryOrder); + } + + T fetchAdd(T operand, MemoryOrder memoryOrder) + { + return std::atomic::fetch_add(operand, (std::memory_order) memoryOrder); + } + + T fetchSub(T operand, MemoryOrder memoryOrder) + { + return std::atomic::fetch_sub(operand, (std::memory_order) memoryOrder); + } + + T fetchAnd(T operand, MemoryOrder memoryOrder) + { + return std::atomic::fetch_and(operand, (std::memory_order) memoryOrder); + } + + T fetchOr(T operand, MemoryOrder memoryOrder) + { + return std::atomic::fetch_or(operand, (std::memory_order) memoryOrder); + } +}; + +#endif // ATOMIC_H diff --git a/libs/image/3rdparty/lock_free_map/concurrent_map.h b/libs/image/3rdparty/lock_free_map/concurrent_map.h new file mode 100644 index 0000000000..bb9bddc79c --- /dev/null +++ b/libs/image/3rdparty/lock_free_map/concurrent_map.h @@ -0,0 +1,362 @@ +/*------------------------------------------------------------------------ + Junction: Concurrent data structures in C++ + Copyright (c) 2016 Jeff Preshing + Distributed under the Simplified BSD License. + Original location: https://github.com/preshing/junction + This software is distributed WITHOUT ANY WARRANTY; without even the + implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + See the LICENSE file for more information. +------------------------------------------------------------------------*/ + +#ifndef CONCURRENTMAP_H +#define CONCURRENTMAP_H + +#include "leapfrog.h" +#include "qsbr.h" + +template , class VT = DefaultValueTraits > +class ConcurrentMap +{ +public: + typedef K Key; + typedef V Value; + typedef KT KeyTraits; + typedef VT ValueTraits; + typedef quint32 Hash; + typedef Leapfrog Details; + +private: + Atomic m_root; + QSBR m_gc; + +public: + ConcurrentMap(quint64 capacity = Details::InitialSize) : m_root(Details::Table::create(capacity)) + { + } + + ~ConcurrentMap() + { + typename Details::Table* table = m_root.loadNonatomic(); + table->destroy(); + } + + QSBR &getGC() + { + return m_gc; + } + + bool migrationInProcess() + { + return (quint64) m_root.loadNonatomic()->jobCoordinator.loadConsume() != 1; + } + + // publishTableMigration() is called by exactly one thread from Details::TableMigration::run() + // after all the threads participating in the migration have completed their work. + void publishTableMigration(typename Details::TableMigration* migration) + { + m_root.store(migration->m_destination, Release); + // Caller will GC the TableMigration and the source table. + } + + // A Mutator represents a known cell in the hash table. + // It's meant for manipulations within a temporary function scope. + // Obviously you must not call QSBR::Update while holding a Mutator. + // Any operation that modifies the table (exchangeValue, eraseValue) + // may be forced to follow a redirected cell, which changes the Mutator itself. + // Note that even if the Mutator was constructed from an existing cell, + // exchangeValue() can still trigger a resize if the existing cell was previously marked deleted, + // or if another thread deletes the key between the two steps. + class Mutator + { + private: + friend class ConcurrentMap; + + ConcurrentMap& m_map; + typename Details::Table* m_table; + typename Details::Cell* m_cell; + Value m_value; + + // Constructor: Find existing cell + Mutator(ConcurrentMap& map, Key key, bool) : m_map(map), m_value(Value(ValueTraits::NullValue)) + { + Hash hash = KeyTraits::hash(key); + for (;;) { + m_table = m_map.m_root.load(Consume); + m_cell = Details::find(hash, m_table); + if (!m_cell) { + return; + } + + Value value = m_cell->value.load(Consume); + if (value != Value(ValueTraits::Redirect)) { + // Found an existing value + m_value = value; + return; + } + // We've encountered a Redirect value. Help finish the migration. + m_table->jobCoordinator.participate(); + // Try again using the latest root. + } + } + + // Constructor: Insert or find cell + Mutator(ConcurrentMap& map, Key key) : m_map(map), m_value(Value(ValueTraits::NullValue)) + { + Hash hash = KeyTraits::hash(key); + for (;;) { + m_table = m_map.m_root.load(Consume); + quint64 overflowIdx; + switch (Details::insertOrFind(hash, m_table, m_cell, overflowIdx)) { // Modifies m_cell + case Details::InsertResult_InsertedNew: { + // We've inserted a new cell. Don't load m_cell->value. + return; + } + case Details::InsertResult_AlreadyFound: { + // The hash was already found in the table. + Value value = m_cell->value.load(Consume); + if (value == Value(ValueTraits::Redirect)) { + // We've encountered a Redirect value. + break; // Help finish the migration. + } + // Found an existing value + m_value = value; + return; + } + case Details::InsertResult_Overflow: { + // Unlike ConcurrentMap_Linear, we don't need to keep track of & pass a "mustDouble" flag. + // Passing overflowIdx is sufficient to prevent an infinite loop here. + // It defines the start of the range of cells to check while estimating total cells in use. + // After the first migration, deleted keys are purged, so if we hit this line during the + // second loop iteration, every cell in the range will be in use, thus the estimate will be 100%. + // (Concurrent deletes could result in further iterations, but it will eventually settle.) + Details::beginTableMigration(m_map, m_table, overflowIdx); + break; + } + } + // A migration has been started (either by us, or another thread). Participate until it's complete. + m_table->jobCoordinator.participate(); + // Try again using the latest root. + } + } + + public: + Value getValue() const + { + // Return previously loaded value. Don't load it again. + return Value(m_value); + } + + Value exchangeValue(Value desired) + { + for (;;) { + Value oldValue = m_value; + if (m_cell->value.compareExchangeStrong(m_value, desired, ConsumeRelease)) { + // Exchange was successful. Return previous value. + Value result = m_value; + m_value = desired; // Leave the mutator in a valid state + return result; + } + // The CAS failed and m_value has been updated with the latest value. + if (m_value != Value(ValueTraits::Redirect)) { + if (oldValue == Value(ValueTraits::NullValue) && m_value != Value(ValueTraits::NullValue)) { + // racing write inserted new value + } + // There was a racing write (or erase) to this cell. + // Pretend we exchanged with ourselves, and just let the racing write win. + return desired; + } + + // We've encountered a Redirect value. Help finish the migration. + Hash hash = m_cell->hash.load(Relaxed); + for (;;) { + // Help complete the migration. + m_table->jobCoordinator.participate(); + // Try again in the new table. + m_table = m_map.m_root.load(Consume); + m_value = Value(ValueTraits::NullValue); + quint64 overflowIdx; + + switch (Details::insertOrFind(hash, m_table, m_cell, overflowIdx)) { // Modifies m_cell + case Details::InsertResult_AlreadyFound: + m_value = m_cell->value.load(Consume); + if (m_value == Value(ValueTraits::Redirect)) { + break; + } + goto breakOuter; + case Details::InsertResult_InsertedNew: + goto breakOuter; + case Details::InsertResult_Overflow: + Details::beginTableMigration(m_map, m_table, overflowIdx); + break; + } + // We were redirected... again + } + breakOuter:; + // Try again in the new table. + } + } + + void assignValue(Value desired) + { + exchangeValue(desired); + } + + Value eraseValue() + { + for (;;) { + if (m_value == Value(ValueTraits::NullValue)) { + return Value(m_value); + } + + if (m_cell->value.compareExchangeStrong(m_value, Value(ValueTraits::NullValue), Consume)) { + // Exchange was successful and a non-NULL value was erased and returned by reference in m_value. + Value result = m_value; + m_value = Value(ValueTraits::NullValue); // Leave the mutator in a valid state + return result; + } + + // The CAS failed and m_value has been updated with the latest value. + if (m_value != Value(ValueTraits::Redirect)) { + // There was a racing write (or erase) to this cell. + // Pretend we erased nothing, and just let the racing write win. + return Value(ValueTraits::NullValue); + } + + // We've been redirected to a new table. + Hash hash = m_cell->hash.load(Relaxed); // Re-fetch hash + for (;;) { + // Help complete the migration. + m_table->jobCoordinator.participate(); + // Try again in the new table. + m_table = m_map.m_root.load(Consume); + m_cell = Details::find(hash, m_table); + if (!m_cell) { + m_value = Value(ValueTraits::NullValue); + return m_value; + } + + m_value = m_cell->value.load(Relaxed); + if (m_value != Value(ValueTraits::Redirect)) { + break; + } + } + } + } + }; + + Mutator insertOrFind(Key key) + { + return Mutator(*this, key); + } + + Mutator find(Key key) + { + return Mutator(*this, key, false); + } + + // Lookup without creating a temporary Mutator. + Value get(Key key) + { + Hash hash = KeyTraits::hash(key); + for (;;) { + typename Details::Table* table = m_root.load(Consume); + typename Details::Cell* cell = Details::find(hash, table); + if (!cell) { + return Value(ValueTraits::NullValue); + } + + Value value = cell->value.load(Consume); + if (value != Value(ValueTraits::Redirect)) { + return value; // Found an existing value + } + // We've been redirected to a new table. Help with the migration. + table->jobCoordinator.participate(); + // Try again in the new table. + } + } + + Value assign(Key key, Value desired) + { + Mutator iter(*this, key); + return iter.exchangeValue(desired); + } + + Value exchange(Key key, Value desired) + { + Mutator iter(*this, key); + return iter.exchangeValue(desired); + } + + Value erase(Key key) + { + Mutator iter(*this, key, false); + return iter.eraseValue(); + } + + // The easiest way to implement an Iterator is to prevent all Redirects. + // The currrent Iterator does that by forbidding concurrent inserts. + // To make it work with concurrent inserts, we'd need a way to block TableMigrations. + class Iterator + { + private: + typename Details::Table* m_table; + quint64 m_idx; + Key m_hash; + Value m_value; + + public: + Iterator() = default; + Iterator(ConcurrentMap& map) + { + // Since we've forbidden concurrent inserts (for now), nonatomic would suffice here, but let's plan ahead: + m_table = map.m_root.load(Consume); + m_idx = -1; + next(); + } + + void setMap(ConcurrentMap& map) + { + m_table = map.m_root.load(Consume); + m_idx = -1; + next(); + } + + void next() + { + while (++m_idx <= m_table->sizeMask) { + // Index still inside range of table. + typename Details::CellGroup* group = m_table->getCellGroups() + (m_idx >> 2); + typename Details::Cell* cell = group->cells + (m_idx & 3); + m_hash = cell->hash.load(Relaxed); + + if (m_hash != KeyTraits::NullHash) { + // Cell has been reserved. + m_value = cell->value.load(Relaxed); + if (m_value != Value(ValueTraits::NullValue)) + return; // Yield this cell. + } + } + // That's the end of the map. + m_hash = KeyTraits::NullHash; + m_value = Value(ValueTraits::NullValue); + } + + bool isValid() const + { + return m_value != Value(ValueTraits::NullValue); + } + + Key getKey() const + { + // Since we've forbidden concurrent inserts (for now), nonatomic would suffice here, but let's plan ahead: + return KeyTraits::dehash(m_hash); + } + + Value getValue() const + { + return m_value; + } + }; +}; + +#endif // CONCURRENTMAP_LEAPFROG_H diff --git a/libs/image/3rdparty/lock_free_map/leapfrog.h b/libs/image/3rdparty/lock_free_map/leapfrog.h new file mode 100644 index 0000000000..7bc95a8253 --- /dev/null +++ b/libs/image/3rdparty/lock_free_map/leapfrog.h @@ -0,0 +1,573 @@ +/*------------------------------------------------------------------------ + Junction: Concurrent data structures in C++ + Copyright (c) 2016 Jeff Preshing + Distributed under the Simplified BSD License. + Original location: https://github.com/preshing/junction + This software is distributed WITHOUT ANY WARRANTY; without even the + implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + See the LICENSE file for more information. +------------------------------------------------------------------------*/ + +#ifndef LEAPFROG_H +#define LEAPFROG_H + +#include "map_traits.h" +#include "simple_job_coordinator.h" +#include "kis_assert.h" + +#define SANITY_CHECK + +template +struct Leapfrog { + typedef typename Map::Hash Hash; + typedef typename Map::Value Value; + typedef typename Map::KeyTraits KeyTraits; + typedef typename Map::ValueTraits ValueTraits; + + static const quint64 InitialSize = 8; + static const quint64 TableMigrationUnitSize = 32; + static const quint64 LinearSearchLimit = 128; + static const quint64 CellsInUseSample = LinearSearchLimit; + + Q_STATIC_ASSERT(LinearSearchLimit > 0 && LinearSearchLimit < 256); // Must fit in CellGroup::links + Q_STATIC_ASSERT(CellsInUseSample > 0 && CellsInUseSample <= LinearSearchLimit); // Limit sample to failed search chain + + struct Cell { + Atomic hash; + Atomic value; + }; + + struct CellGroup { + // Every cell in the table actually represents a bucket of cells, all linked together in a probe chain. + // Each cell in the probe chain is located within the table itself. + // "deltas" determines the index of the next cell in the probe chain. + // The first cell in the chain is the one that was hashed. It may or may not actually belong in the bucket. + // The "second" cell in the chain is given by deltas 0 - 3. It's guaranteed to belong in the bucket. + // All subsequent cells in the chain is given by deltas 4 - 7. Also guaranteed to belong in the bucket. + Atomic deltas[8]; + Cell cells[4]; + }; + + struct Table { + const quint64 sizeMask; // a power of two minus one + QMutex mutex; // to DCLI the TableMigration (stored in the jobCoordinator) + SimpleJobCoordinator jobCoordinator; // makes all blocked threads participate in the migration + + Table(quint64 sizeMask) : sizeMask(sizeMask) + { + } + + static Table* create(quint64 tableSize) + { +#ifdef SANITY_CHECK + KIS_ASSERT_RECOVER_NOOP(isPowerOf2(tableSize)); + KIS_ASSERT_RECOVER_NOOP(tableSize >= 4); +#endif // SANITY_CHECK + quint64 numGroups = tableSize >> 2; + Table* table = (Table*) std::malloc(sizeof(Table) + sizeof(CellGroup) * numGroups); + new (table) Table(tableSize - 1); + + for (quint64 i = 0; i < numGroups; i++) { + CellGroup* group = table->getCellGroups() + i; + + for (quint64 j = 0; j < 4; j++) { + group->deltas[j].storeNonatomic(0); + group->deltas[j + 4].storeNonatomic(0); + group->cells[j].hash.storeNonatomic(KeyTraits::NullHash); + group->cells[j].value.storeNonatomic(Value(ValueTraits::NullValue)); + } + } + return table; + } + + void destroy() + { + this->Table::~Table(); + std::free(this); + } + + CellGroup* getCellGroups() const + { + return (CellGroup*)(this + 1); + } + + quint64 getNumMigrationUnits() const + { + return sizeMask / TableMigrationUnitSize + 1; + } + }; + + class TableMigration : public SimpleJobCoordinator::Job + { + public: + struct Source { + Table* table; + Atomic sourceIndex; + }; + + Map& m_map; + Table* m_destination; + Atomic m_workerStatus; // number of workers + end flag + Atomic m_overflowed; + Atomic m_unitsRemaining; + quint64 m_numSources; + + TableMigration(Map& map) : m_map(map) + { + } + + static TableMigration* create(Map& map, quint64 numSources) + { + TableMigration* migration = + (TableMigration*) std::malloc(sizeof(TableMigration) + sizeof(TableMigration::Source) * numSources); + new (migration) TableMigration(map); + + migration->m_workerStatus.storeNonatomic(0); + migration->m_overflowed.storeNonatomic(false); + migration->m_unitsRemaining.storeNonatomic(0); + migration->m_numSources = numSources; + // Caller is responsible for filling in sources & destination + return migration; + } + + virtual ~TableMigration() override + { + } + + void destroy() + { + // Destroy all source tables. + for (quint64 i = 0; i < m_numSources; i++) + if (getSources()[i].table) + getSources()[i].table->destroy(); + // Delete the migration object itself. + this->TableMigration::~TableMigration(); + std::free(this); + } + + Source* getSources() const + { + return (Source*)(this + 1); + } + + bool migrateRange(Table* srcTable, quint64 startIdx); + virtual void run() override; + }; + + static Cell* find(Hash hash, Table* table) + { +#ifdef SANITY_CHECK + KIS_ASSERT_RECOVER_NOOP(table); + KIS_ASSERT_RECOVER_NOOP(hash != KeyTraits::NullHash); +#endif // SANITY_CHECK + quint64 sizeMask = table->sizeMask; + // Optimistically check hashed cell even though it might belong to another bucket + quint64 idx = hash & sizeMask; + CellGroup* group = table->getCellGroups() + (idx >> 2); + Cell* cell = group->cells + (idx & 3); + Hash probeHash = cell->hash.load(Relaxed); + + if (probeHash == hash) { + return cell; + } else if (probeHash == KeyTraits::NullHash) { + return cell = NULL; + } + // Follow probe chain for our bucket + quint8 delta = group->deltas[idx & 3].load(Relaxed); + while (delta) { + idx = (idx + delta) & sizeMask; + group = table->getCellGroups() + (idx >> 2); + cell = group->cells + (idx & 3); + Hash probeHash = cell->hash.load(Relaxed); + // Note: probeHash might actually be NULL due to memory reordering of a concurrent insert, + // but we don't check for it. We just follow the probe chain. + if (probeHash == hash) { + return cell; + } + delta = group->deltas[(idx & 3) + 4].load(Relaxed); + } + // End of probe chain, not found + return NULL; + } + + // FIXME: Possible optimization: Dedicated insert for migration? It wouldn't check for InsertResult_AlreadyFound. + enum InsertResult { InsertResult_AlreadyFound, InsertResult_InsertedNew, InsertResult_Overflow }; + static InsertResult insertOrFind(Hash hash, Table* table, Cell*& cell, quint64& overflowIdx) + { +#ifdef SANITY_CHECK + KIS_ASSERT_RECOVER_NOOP(table); + KIS_ASSERT_RECOVER_NOOP(hash != KeyTraits::NullHash); +#endif // SANITY_CHECK + quint64 sizeMask = table->sizeMask; + quint64 idx = quint64(hash); + + // Check hashed cell first, though it may not even belong to the bucket. + CellGroup* group = table->getCellGroups() + ((idx & sizeMask) >> 2); + cell = group->cells + (idx & 3); + Hash probeHash = cell->hash.load(Relaxed); + + if (probeHash == KeyTraits::NullHash) { + if (cell->hash.compareExchangeStrong(probeHash, hash, Relaxed)) { + // There are no links to set. We're done. + return InsertResult_InsertedNew; + } else { + // Fall through to check if it was the same hash... + } + } + + if (probeHash == hash) { + return InsertResult_AlreadyFound; + } + + // Follow the link chain for this bucket. + quint64 maxIdx = idx + sizeMask; + quint64 linkLevel = 0; + Atomic* prevLink; + for (;;) { + followLink: + prevLink = group->deltas + ((idx & 3) + linkLevel); + linkLevel = 4; + quint8 probeDelta = prevLink->load(Relaxed); + + if (probeDelta) { + idx += probeDelta; + // Check the hash for this cell. + group = table->getCellGroups() + ((idx & sizeMask) >> 2); + cell = group->cells + (idx & 3); + probeHash = cell->hash.load(Relaxed); + + if (probeHash == KeyTraits::NullHash) { + // Cell was linked, but hash is not visible yet. + // We could avoid this case (and guarantee it's visible) using acquire & release, but instead, + // just poll until it becomes visible. + do { + probeHash = cell->hash.load(Acquire); + } while (probeHash == KeyTraits::NullHash); + } + +#ifdef SANITY_CHECK + KIS_ASSERT_RECOVER_NOOP(((probeHash ^ hash) & sizeMask) == 0); // Only hashes in same bucket can be linked +#endif // SANITY_CHECK + if (probeHash == hash) { + return InsertResult_AlreadyFound; + } + } else { + // Reached the end of the link chain for this bucket. + // Switch to linear probing until we reserve a new cell or find a late-arriving cell in the same bucket. + quint64 prevLinkIdx = idx; +#ifdef SANITY_CHECK + KIS_ASSERT_RECOVER_NOOP(qint64(maxIdx - idx) >= 0); // Nobody would have linked an idx that's out of range. +#endif // SANITY_CHECK + quint64 linearProbesRemaining = qMin(maxIdx - idx, quint64(LinearSearchLimit)); + + while (linearProbesRemaining-- > 0) { + idx++; + group = table->getCellGroups() + ((idx & sizeMask) >> 2); + cell = group->cells + (idx & 3); + probeHash = cell->hash.load(Relaxed); + + if (probeHash == KeyTraits::NullHash) { + // It's an empty cell. Try to reserve it. + if (cell->hash.compareExchangeStrong(probeHash, hash, Relaxed)) { + // Success. We've reserved the cell. Link it to previous cell in same bucket. +#ifdef SANITY_CHECK + KIS_ASSERT_RECOVER_NOOP(probeDelta == 0); +#endif // SANITY_CHECK + quint8 desiredDelta = idx - prevLinkIdx; + prevLink->store(desiredDelta, Relaxed); + return InsertResult_InsertedNew; + } else { + // Fall through to check if it's the same hash... + } + } + Hash x = (probeHash ^ hash); + // Check for same hash. + if (!x) { + return InsertResult_AlreadyFound; + } + // Check for same bucket. + if ((x & sizeMask) == 0) { + // Attempt to set the link on behalf of the late-arriving cell. + // This is usually redundant, but if we don't attempt to set the late-arriving cell's link here, + // there's no guarantee that our own link chain will be well-formed by the time this function returns. + // (Indeed, subsequent lookups sometimes failed during testing, for this exact reason.) + quint8 desiredDelta = idx - prevLinkIdx; + prevLink->store(desiredDelta, Relaxed); + goto followLink; // Try to follow link chain for the bucket again. + } + // Continue linear search... + } + // Table is too full to insert. + overflowIdx = idx + 1; + return InsertResult_Overflow; + } + } + } + + static void beginTableMigrationToSize(Map& map, Table* table, quint64 nextTableSize) + { + // Create new migration by DCLI. + SimpleJobCoordinator::Job* job = table->jobCoordinator.loadConsume(); + if (job) { + // new migration already exists + } else { + QMutexLocker guard(&table->mutex); + job = table->jobCoordinator.loadConsume(); // Non-atomic would be sufficient, but that's OK. + + if (job) { + // new migration already exists (double-checked) + } else { + // Create new migration. + TableMigration* migration = TableMigration::create(map, 1); + migration->m_unitsRemaining.storeNonatomic(table->getNumMigrationUnits()); + migration->getSources()[0].table = table; + migration->getSources()[0].sourceIndex.storeNonatomic(0); + migration->m_destination = Table::create(nextTableSize); + // Publish the new migration. + table->jobCoordinator.storeRelease(migration); + } + } + } + + static void beginTableMigration(Map& map, Table* table, quint64 overflowIdx) + { + // Estimate number of cells in use based on a small sample. + quint64 sizeMask = table->sizeMask; + quint64 idx = overflowIdx - CellsInUseSample; + quint64 inUseCells = 0; + for (quint64 linearProbesRemaining = CellsInUseSample; linearProbesRemaining > 0; linearProbesRemaining--) { + CellGroup* group = table->getCellGroups() + ((idx & sizeMask) >> 2); + Cell* cell = group->cells + (idx & 3); + Value value = cell->value.load(Relaxed); + if (value == Value(ValueTraits::Redirect)) { + // Another thread kicked off the jobCoordinator. The caller will participate upon return. + return; + } + if (value != Value(ValueTraits::NullValue)) + inUseCells++; + idx++; + } + float inUseRatio = float(inUseCells) / CellsInUseSample; + float estimatedInUse = (sizeMask + 1) * inUseRatio; + quint64 nextTableSize = qMax(quint64(InitialSize), roundUpPowerOf2(quint64(estimatedInUse * 2))); + beginTableMigrationToSize(map, table, nextTableSize); + } +}; // Leapfrog + +template +bool Leapfrog::TableMigration::migrateRange(Table* srcTable, quint64 startIdx) +{ + quint64 srcSizeMask = srcTable->sizeMask; + quint64 endIdx = qMin(startIdx + TableMigrationUnitSize, srcSizeMask + 1); + // Iterate over source range. + for (quint64 srcIdx = startIdx; srcIdx < endIdx; srcIdx++) { + CellGroup* srcGroup = srcTable->getCellGroups() + ((srcIdx & srcSizeMask) >> 2); + Cell* srcCell = srcGroup->cells + (srcIdx & 3); + Hash srcHash; + Value srcValue; + // Fetch the srcHash and srcValue. + for (;;) { + srcHash = srcCell->hash.load(Relaxed); + if (srcHash == KeyTraits::NullHash) { + // An unused cell. Try to put a Redirect marker in its value. + srcValue = + srcCell->value.compareExchange(Value(ValueTraits::NullValue), Value(ValueTraits::Redirect), Relaxed); + if (srcValue == Value(ValueTraits::Redirect)) { + // srcValue is already marked Redirect due to previous incomplete migration. + break; + } + if (srcValue == Value(ValueTraits::NullValue)) { + break; // Redirect has been placed. Break inner loop, continue outer loop. + } + // Otherwise, somebody just claimed the cell. Read srcHash again... + } else { + // Check for deleted/uninitialized value. + srcValue = srcCell->value.load(Relaxed); + if (srcValue == Value(ValueTraits::NullValue)) { + // Try to put a Redirect marker. + if (srcCell->value.compareExchangeStrong(srcValue, Value(ValueTraits::Redirect), Relaxed)) { + break; // Redirect has been placed. Break inner loop, continue outer loop. + } + + if (srcValue == Value(ValueTraits::Redirect)) { + // FIXME: I don't think this will happen. Investigate & change to assert + break; + } + } else if (srcValue == Value(ValueTraits::Redirect)) { + // srcValue is already marked Redirect due to previous incomplete migration. + break; + } + + // We've got a key/value pair to migrate. + // Reserve a destination cell in the destination. +#ifdef SANITY_CHECK + KIS_ASSERT_RECOVER_NOOP(srcHash != KeyTraits::NullHash); + KIS_ASSERT_RECOVER_NOOP(srcValue != Value(ValueTraits::NullValue)); + KIS_ASSERT_RECOVER_NOOP(srcValue != Value(ValueTraits::Redirect)); +#endif // SANITY_CHECK + Cell* dstCell; + quint64 overflowIdx; + InsertResult result = insertOrFind(srcHash, m_destination, dstCell, overflowIdx); + // During migration, a hash can only exist in one place among all the source tables, + // and it is only migrated by one thread. Therefore, the hash will never already exist + // in the destination table: +#ifdef SANITY_CHECK + KIS_ASSERT_RECOVER_NOOP(result != InsertResult_AlreadyFound); +#endif // SANITY_CHECK + if (result == InsertResult_Overflow) { + // Destination overflow. + // This can happen for several reasons. For example, the source table could have + // existed of all deleted cells when it overflowed, resulting in a small destination + // table size, but then another thread could re-insert all the same hashes + // before the migration completed. + // Caller will cancel the current migration and begin a new one. + return false; + } + // Migrate the old value to the new cell. + for (;;) { + // Copy srcValue to the destination. + dstCell->value.store(srcValue, Relaxed); + // Try to place a Redirect marker in srcValue. + Value doubleCheckedSrcValue = srcCell->value.compareExchange(srcValue, Value(ValueTraits::Redirect), Relaxed); +#ifdef SANITY_CHECK + KIS_ASSERT_RECOVER_NOOP(doubleCheckedSrcValue != Value(ValueTraits::Redirect)); // Only one thread can redirect a cell at a time. +#endif // SANITY_CHECK + if (doubleCheckedSrcValue == srcValue) { + // No racing writes to the src. We've successfully placed the Redirect marker. + // srcValue was non-NULL when we decided to migrate it, but it may have changed to NULL + // by a late-arriving erase. + if (srcValue == Value(ValueTraits::NullValue)) { + // racing update was erase", uptr(srcTable), srcIdx) + } + + break; + } + // There was a late-arriving write (or erase) to the src. Migrate the new value and try again. + srcValue = doubleCheckedSrcValue; + } + // Cell successfully migrated. Proceed to next source cell. + break; + } + } + } + // Range has been migrated successfully. + return true; +} + +template +void Leapfrog::TableMigration::run() +{ + // Conditionally increment the shared # of workers. + quint64 probeStatus = m_workerStatus.load(Relaxed); + do { + if (probeStatus & 1) { + // End flag is already set, so do nothing. + return; + } + } while (!m_workerStatus.compareExchangeWeak(probeStatus, probeStatus + 2, Relaxed, Relaxed)); + // # of workers has been incremented, and the end flag is clear. +#ifdef SANITY_CHECK + KIS_ASSERT_RECOVER_NOOP((probeStatus & 1) == 0); +#endif // SANITY_CHECK + + // Iterate over all source tables. + for (quint64 s = 0; s < m_numSources; s++) { + Source& source = getSources()[s]; + // Loop over all migration units in this source table. + for (;;) { + if (m_workerStatus.load(Relaxed) & 1) { + goto endMigration; + } + quint64 startIdx = source.sourceIndex.fetchAdd(TableMigrationUnitSize, Relaxed); + if (startIdx >= source.table->sizeMask + 1) + break; // No more migration units in this table. Try next source table. + bool overflowed = !migrateRange(source.table, startIdx); + if (overflowed) { + // *** FAILED MIGRATION *** + // TableMigration failed due to destination table overflow. + // No other thread can declare the migration successful at this point, because *this* unit will never complete, + // hence m_unitsRemaining won't reach zero. + // However, multiple threads can independently detect a failed migration at the same time. + // The reason we store overflowed in a shared variable is because we can must flush all the worker threads before + // we can safely deal with the overflow. Therefore, the thread that detects the failure is often different from + // the thread + // that deals with it. + bool oldOverflowed = m_overflowed.exchange(overflowed, Relaxed); + if (oldOverflowed) { + // race to set m_overflowed + } + + m_workerStatus.fetchOr(1, Relaxed); + goto endMigration; + } + + qint64 prevRemaining = m_unitsRemaining.fetchSub(1, Relaxed); +#ifdef SANITY_CHECK + KIS_ASSERT_RECOVER_NOOP(prevRemaining > 0); +#endif // SANITY_CHECK + if (prevRemaining == 1) { + // *** SUCCESSFUL MIGRATION *** + // That was the last chunk to migrate. + m_workerStatus.fetchOr(1, Relaxed); + goto endMigration; + } + } + } + +endMigration: + // Decrement the shared # of workers. + probeStatus = m_workerStatus.fetchSub(2, AcquireRelease); // AcquireRelease makes all previous writes visible to the last worker thread. + if (probeStatus >= 4) { + // There are other workers remaining. Return here so that only the very last worker will proceed. + return; + } + + // We're the very last worker thread. + // Perform the appropriate post-migration step depending on whether the migration succeeded or failed. +#ifdef SANITY_CHECK + KIS_ASSERT_RECOVER_NOOP(probeStatus == 3); +#endif // SANITY_CHECK + bool overflowed = m_overflowed.loadNonatomic(); // No racing writes at this point + if (!overflowed) { + // The migration succeeded. This is the most likely outcome. Publish the new subtree. + m_map.publishTableMigration(this); + // End the jobCoodinator. + getSources()[0].table->jobCoordinator.end(); + } else { + // The migration failed due to the overflow of the destination table. + Table* origTable = getSources()[0].table; + QMutexLocker guard(&origTable->mutex); + SimpleJobCoordinator::Job* checkedJob = origTable->jobCoordinator.loadConsume(); + + if (checkedJob != this) { + // a new TableMigration was already started + } else { + TableMigration* migration = TableMigration::create(m_map, m_numSources + 1); + // Double the destination table size. + migration->m_destination = Table::create((m_destination->sizeMask + 1) * 2); + // Transfer source tables to the new migration. + for (quint64 i = 0; i < m_numSources; i++) { + migration->getSources()[i].table = getSources()[i].table; + getSources()[i].table = NULL; + migration->getSources()[i].sourceIndex.storeNonatomic(0); + } + + migration->getSources()[m_numSources].table = m_destination; + migration->getSources()[m_numSources].sourceIndex.storeNonatomic(0); + // Calculate total number of migration units to move. + quint64 unitsRemaining = 0; + for (quint64 s = 0; s < migration->m_numSources; s++) { + unitsRemaining += migration->getSources()[s].table->getNumMigrationUnits(); + } + + migration->m_unitsRemaining.storeNonatomic(unitsRemaining); + // Publish the new migration. + origTable->jobCoordinator.storeRelease(migration); + } + } + + // We're done with this TableMigration. Queue it for GC. + m_map.getGC().enqueue(&TableMigration::destroy, this, true); +} + +#endif // LEAPFROG_H diff --git a/libs/image/3rdparty/lock_free_map/map_traits.h b/libs/image/3rdparty/lock_free_map/map_traits.h new file mode 100644 index 0000000000..92d97e6f4f --- /dev/null +++ b/libs/image/3rdparty/lock_free_map/map_traits.h @@ -0,0 +1,80 @@ +/*------------------------------------------------------------------------ + Junction: Concurrent data structures in C++ + Copyright (c) 2016 Jeff Preshing + Distributed under the Simplified BSD License. + Original location: https://github.com/preshing/junction + This software is distributed WITHOUT ANY WARRANTY; without even the + implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + See the LICENSE file for more information. +------------------------------------------------------------------------*/ + +#ifndef MAPTRAITS_H +#define MAPTRAITS_H + +#include + +inline quint64 roundUpPowerOf2(quint64 v) +{ + v--; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v |= v >> 32; + v++; + return v; +} + +inline bool isPowerOf2(quint64 v) +{ + return (v & (v - 1)) == 0; +} + +inline quint32 avalanche(quint32 h) +{ + h ^= h >> 16; + h *= 0x85ebca6b; + h ^= h >> 13; + h *= 0xc2b2ae35; + h ^= h >> 16; + return h; +} + +inline quint32 deavalanche(quint32 h) +{ + h ^= h >> 16; + h *= 0x7ed1b41d; + h ^= (h ^ (h >> 13)) >> 13; + h *= 0xa5cb9243; + h ^= h >> 16; + return h; +} + +template +struct DefaultKeyTraits { + typedef T Key; + typedef quint32 Hash; + static const Key NullKey = Key(0); + static const Hash NullHash = Hash(0); + + static Hash hash(T key) + { + return avalanche(Hash(key)); + } + + static Key dehash(Hash hash) + { + return (T) deavalanche(hash); + } +}; + +template +struct DefaultValueTraits { + typedef T Value; + typedef quint32 IntType; + static const IntType NullValue = 0; + static const IntType Redirect = 1; +}; + +#endif // MAPTRAITS_H diff --git a/libs/image/3rdparty/lock_free_map/qsbr.h b/libs/image/3rdparty/lock_free_map/qsbr.h new file mode 100644 index 0000000000..81508780f0 --- /dev/null +++ b/libs/image/3rdparty/lock_free_map/qsbr.h @@ -0,0 +1,109 @@ +/*------------------------------------------------------------------------ + Junction: Concurrent data structures in C++ + Copyright (c) 2016 Jeff Preshing + Distributed under the Simplified BSD License. + Original location: https://github.com/preshing/junction + This software is distributed WITHOUT ANY WARRANTY; without even the + implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + See the LICENSE file for more information. +------------------------------------------------------------------------*/ + +#ifndef QSBR_H +#define QSBR_H + +#include +#include +#include + +#define CALL_MEMBER(obj, pmf) ((obj).*(pmf)) + +class QSBR +{ +private: + struct Action { + void (*func)(void*); + quint64 param[4]; // Size limit found experimentally. Verified by assert below. + + Action() = default; + + Action(void (*f)(void*), void* p, quint64 paramSize) : func(f) + { + Q_ASSERT(paramSize <= sizeof(param)); // Verify size limit. + memcpy(¶m, p, paramSize); + } + + void operator()() + { + func(¶m); + } + }; + + QMutex m_mutex; + QVector m_pendingActions; + QVector m_deferedActions; + std::atomic_flag m_isProcessing = ATOMIC_FLAG_INIT; + +public: + + template + void enqueue(void (T::*pmf)(), T* target, bool migration = false) + { + struct Closure { + void (T::*pmf)(); + T* target; + + static void thunk(void* param) + { + Closure* self = (Closure*) param; + CALL_MEMBER(*self->target, self->pmf)(); + } + }; + + Closure closure = {pmf, target}; + while (m_isProcessing.test_and_set(std::memory_order_acquire)) { + } + + if (migration) { + m_deferedActions.append(Action(Closure::thunk, &closure, sizeof(closure))); + } else { + m_pendingActions.append(Action(Closure::thunk, &closure, sizeof(closure))); + } + + m_isProcessing.clear(std::memory_order_release); + } + + void update(bool migration) + { + if (!m_isProcessing.test_and_set(std::memory_order_acquire)) { + QVector actions; + actions.swap(m_pendingActions); + + if (!migration) { + m_pendingActions.swap(m_deferedActions); + } + + m_isProcessing.clear(std::memory_order_release); + + for (auto &action : actions) { + action(); + } + } + } + + void flush() + { + if (!m_isProcessing.test_and_set(std::memory_order_acquire)) { + for (auto &action : m_pendingActions) { + action(); + } + + for (auto &action : m_deferedActions) { + action(); + } + + m_isProcessing.clear(std::memory_order_release); + } + } +}; + +#endif // QSBR_H diff --git a/libs/image/3rdparty/lock_free_map/simple_job_coordinator.h b/libs/image/3rdparty/lock_free_map/simple_job_coordinator.h new file mode 100644 index 0000000000..0df56f995f --- /dev/null +++ b/libs/image/3rdparty/lock_free_map/simple_job_coordinator.h @@ -0,0 +1,107 @@ +/*------------------------------------------------------------------------ + Junction: Concurrent data structures in C++ + Copyright (c) 2016 Jeff Preshing + Distributed under the Simplified BSD License. + Original location: https://github.com/preshing/junction + This software is distributed WITHOUT ANY WARRANTY; without even the + implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + See the LICENSE file for more information. +------------------------------------------------------------------------*/ + +#ifndef SIMPLEJOBCOORDINATOR_H +#define SIMPLEJOBCOORDINATOR_H + +#include +#include +#include + +#include "kis_assert.h" +#include "atomic.h" + +#define SANITY_CHECK + +class SimpleJobCoordinator +{ +public: + struct Job { + virtual ~Job() + { + } + + virtual void run() = 0; + }; + +private: + Atomic m_job; + QMutex mutex; + QWaitCondition condVar; + +public: + SimpleJobCoordinator() : m_job(quint64(NULL)) + { + } + + Job* loadConsume() const + { + return (Job*) m_job.load(Consume); + } + + void storeRelease(Job* job) + { + { + QMutexLocker guard(&mutex); + m_job.store(quint64(job), Release); + } + + condVar.wakeAll(); + } + + void participate() + { + quint64 prevJob = quint64(NULL); + + for (;;) { + quint64 job = m_job.load(Consume); + if (job == prevJob) { + QMutexLocker guard(&mutex); + + for (;;) { + job = m_job.loadNonatomic(); // No concurrent writes inside lock + if (job != prevJob) { + break; + } + + condVar.wait(&mutex); + } + } + + if (job == 1) { + return; + } + + reinterpret_cast(job)->run(); + prevJob = job; + } + } + + void runOne(Job* job) + { +#ifdef SANITY_CHECK + KIS_ASSERT_RECOVER_NOOP(job != (Job*) m_job.load(Relaxed)); +#endif // SANITY_CHECK + storeRelease(job); + job->run(); + } + + void end() + { + { + QMutexLocker guard(&mutex); + m_job.store(1, Release); + } + + condVar.wakeAll(); + } +}; + +#endif // SIMPLEJOBCOORDINATOR_H diff --git a/libs/image/tiles3/kis_memento_manager.h b/libs/image/tiles3/kis_memento_manager.h index 5b9543983c..0b20aaa852 100644 --- a/libs/image/tiles3/kis_memento_manager.h +++ b/libs/image/tiles3/kis_memento_manager.h @@ -1,164 +1,174 @@ /* * Copyright (c) 2009 Dmitry Kazakov * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef KIS_MEMENTO_MANAGER_ #define KIS_MEMENTO_MANAGER_ #include #include "kis_memento_item.h" -#include "kis_tile_hash_table.h" +#include "config-hash-table-implementaion.h" typedef QList KisMementoItemList; typedef QListIterator KisMementoItemListIterator; class KisMemento; struct KisHistoryItem { KisMemento* memento; KisMementoItemList itemList; }; typedef QList KisHistoryList; class KisMemento; typedef KisSharedPtr KisMementoSP; +#ifdef USE_LOCK_FREE_HASH_TABLE +#include "kis_tile_hash_table2.h" + +typedef KisTileHashTableTraits2 KisMementoItemHashTable; +typedef KisTileHashTableIteratorTraits2 KisMementoItemHashTableIterator; +typedef KisTileHashTableIteratorTraits2 KisMementoItemHashTableIteratorConst; +#else +#include "kis_tile_hash_table.h" + typedef KisTileHashTableTraits KisMementoItemHashTable; typedef KisTileHashTableIteratorTraits KisMementoItemHashTableIterator; typedef KisTileHashTableIteratorTraits KisMementoItemHashTableIteratorConst; +#endif // USE_LOCK_FREE_HASH_TABLE class KRITAIMAGE_EXPORT KisMementoManager { public: KisMementoManager(); KisMementoManager(const KisMementoManager& rhs); ~KisMementoManager(); /** * Most tricky part. This function is called by a tile, when it gets new * tile-data through COW. The Memento Manager wraps this tile-data into * KisMementoItem class and waits until commit() order given. By this * time KisMementoItem doesn't take part in COW mechanism. It only holds * tileData->m_refCount counter to ensure tile isn't deleted from memory. * When commit() comes, KisMementoItem grabs tileData->m_usersCount and * since that moment it is a rightful co-owner of the tileData and COW * participant. It means that tileData won't be ever changed since then. * Every write request to the original tile will lead to duplicating * tileData and registering it here again... */ void registerTileChange(KisTile *tile); /** * Called when a tile deleted. Creates empty KisMementoItem showing that * there was a tile one day */ void registerTileDeleted(KisTile *tile); /** * Commits changes, made in INDEX: appends m_index into m_revisions list * and owes all modified tileDatas. */ void commit(); /** * Undo and Redo stuff respectively. * * When calling them, INDEX list should be empty, so to say, "working * copy should be clean". */ void rollback(KisTileHashTable *ht); void rollforward(KisTileHashTable *ht); /** * Get old tile, whose memento is in the HEAD revision. * \p existingTile returns if the tile is actually an existing * non-default tile or it was created on the fly * from the default tile data */ KisTileSP getCommitedTile(qint32 col, qint32 row, bool &existingTile); KisMementoSP getMemento(); bool hasCurrentMemento() { return m_currentMemento; } KisMementoSP currentMemento(); void setDefaultTileData(KisTileData *defaultTileData); void debugPrintInfo(); /** * Removes all the history that preceds the revision * pointed by oldestMemento. That is after calling to * purgeHistory(someMemento) you won't be able to do * rollback(someMemento) anymore. */ void purgeHistory(KisMementoSP oldestMemento); protected: qint32 findRevisionByMemento(KisMementoSP memento) const; void resetRevisionHistory(KisMementoItemList list); protected: /** * INDEX of tiles to be committed with next commit() * We use a hash table to be able to check that * we have the only memento item for a tile * per commit efficiently */ KisMementoItemHashTable m_index; /** * Main list that stores every commit ever done */ KisHistoryList m_revisions; /** * List of revisions temporarily undone while rollback() */ KisHistoryList m_cancelledRevisions; /** * A hash table, that stores the most recently updated * versions of tiles. Say, HEAD revision :) */ KisMementoItemHashTable m_headsHashTable; /** * Stores extent of current INDEX. * It is the "name" of current named transaction */ KisMementoSP m_currentMemento; /** * The flag that blocks registration of changes on tiles. * This is a temporary state of the memento manager, that * is used for traveling in history * * \see rollback() * \see rollforward() */ bool m_registrationBlocked; }; #endif /* KIS_MEMENTO_MANAGER_ */ diff --git a/libs/image/tiles3/kis_tile_hash_table2.h b/libs/image/tiles3/kis_tile_hash_table2.h new file mode 100644 index 0000000000..f468f46000 --- /dev/null +++ b/libs/image/tiles3/kis_tile_hash_table2.h @@ -0,0 +1,371 @@ +#ifndef KIS_TILEHASHTABLE_2_H +#define KIS_TILEHASHTABLE_2_H + +#include "kis_shared.h" +#include "kis_shared_ptr.h" +#include "3rdparty/lock_free_map/concurrent_map.h" +#include "kis_tile.h" + +#define SANITY_CHECK + +/** + * This is a template for a hash table that stores tiles (or some other + * objects resembling tiles). Actually, this object should only have + * col()/row() methods and be able to answer setNext()/next() requests to + * be stored here. It is used in KisTiledDataManager and + * KisMementoManager. + * + * How to use: + * 1) each hash must be unique, otherwise tiles would rewrite each-other + * 2) 0 key is reserved, so can't be used + * 3) col and row must be less than 0x7FFF to guarantee uniqueness of hash for each pair + */ + +template +class KisTileHashTableIteratorTraits2; + +template +class KisTileHashTableTraits2 +{ + static constexpr bool isInherited = std::is_convertible::value; + Q_STATIC_ASSERT_X(isInherited, "Template must inherit KisShared"); + +public: + typedef T TileType; + typedef KisSharedPtr TileTypeSP; + typedef KisWeakSharedPtr TileTypeWSP; + + KisTileHashTableTraits2(KisMementoManager *mm); + KisTileHashTableTraits2(const KisTileHashTableTraits2 &ht, KisMementoManager *mm); + ~KisTileHashTableTraits2(); + + bool isEmpty() + { + return !m_numTiles.load(); + } + + bool tileExists(qint32 col, qint32 row); + + /** + * Returns a tile in position (col,row). If no tile exists, + * returns null. + * \param col column of the tile + * \param row row of the tile + */ + TileTypeSP getExistingTile(qint32 col, qint32 row); + + /** + * Returns a tile in position (col,row). If no tile exists, + * creates a new one, attaches it to the list and returns. + * \param col column of the tile + * \param row row of the tile + * \param newTile out-parameter, returns true if a new tile + * was created + */ + TileTypeSP getTileLazy(qint32 col, qint32 row, bool& newTile); + + /** + * Returns a tile in position (col,row). If no tile exists, + * creates nothing, but returns shared default tile object + * of the table. Be careful, this object has column and row + * parameters set to (qint32_MIN, qint32_MIN). + * \param col column of the tile + * \param row row of the tile + * \param existingTile returns true if the tile actually exists in the table + * and it is not a lazily created default wrapper tile + */ + TileTypeSP getReadOnlyTileLazy(qint32 col, qint32 row, bool &existingTile); + void addTile(TileTypeSP tile); + bool deleteTile(TileTypeSP tile); + bool deleteTile(qint32 col, qint32 row); + + void clear(); + + void setDefaultTileData(KisTileData *defaultTileData); + KisTileData* defaultTileData(); + + qint32 numTiles() + { + return m_numTiles.load(); + } + + void debugPrintInfo(); + void debugMaxListLength(qint32 &min, qint32 &max); + + friend class KisTileHashTableIteratorTraits2; + +private: + struct MemoryReclaimer { + MemoryReclaimer(TileType *data) : d(data) {} + + void destroy() + { + TileTypeSP::deref(reinterpret_cast(this), d); + this->MemoryReclaimer::~MemoryReclaimer(); + delete this; + } + + private: + TileType *d; + }; + + inline quint32 calculateHash(qint32 col, qint32 row) + { +#ifdef SANITY_CHECK + KIS_ASSERT_RECOVER_NOOP(row < 0x7FFF && col < 0x7FFF) +#endif // SANITY_CHECK + + if (col == 0 && row == 0) { + col = 0x7FFF; + row = 0x7FFF; + } + + return ((static_cast(row) << 16) | (static_cast(col) & 0xFFFF)); + } + + inline void insert(quint32 key, TileTypeSP value) + { + TileTypeSP::ref(&value, value.data()); + TileType *result = m_map.assign(key, value.data()); + + if (result) { + m_map.getGC().enqueue(&MemoryReclaimer::destroy, new MemoryReclaimer(result)); + } else { + m_numTiles.fetchAndAddRelaxed(1); + } + + m_map.getGC().update(m_map.migrationInProcess()); + } + + inline bool erase(quint32 key) + { + bool wasDeleted = false; + TileType *result = m_map.erase(key); + + if (result) { + wasDeleted = true; + m_numTiles.fetchAndSubRelaxed(1); + m_map.getGC().enqueue(&MemoryReclaimer::destroy, new MemoryReclaimer(result)); + } + + m_map.getGC().update(m_map.migrationInProcess()); + return wasDeleted; + } + +private: + ConcurrentMap m_map; + + /** + * We still need something to guard changes in m_defaultTileData, + * otherwise there will be concurrent read/writes, resulting in broken memory. + */ + QReadWriteLock m_defaultPixelDataLock; + + QAtomicInt m_numTiles; + KisTileData *m_defaultTileData; + KisMementoManager *m_mementoManager; +}; + +template +class KisTileHashTableIteratorTraits2 +{ +public: + typedef T TileType; + typedef KisSharedPtr TileTypeSP; + typedef typename ConcurrentMap::Iterator Iterator; + + KisTileHashTableIteratorTraits2(KisTileHashTableTraits2 *ht) : m_ht(ht) + { + m_iter.setMap(m_ht->m_map); + } + + void next() + { + m_iter.next(); + } + + TileTypeSP tile() const + { + return TileTypeSP(m_iter.getValue()); + } + + bool isDone() const + { + return !m_iter.isValid(); + } + + void deleteCurrent() + { + m_ht->erase(m_iter.getKey()); + next(); + } + + void moveCurrentToHashTable(KisTileHashTableTraits2 *newHashTable) + { + TileTypeSP tile = m_iter.getValue(); + next(); + m_ht->deleteTile(tile); + newHashTable->addTile(tile); + } + +private: + KisTileHashTableTraits2 *m_ht; + Iterator m_iter; +}; + +template +KisTileHashTableTraits2::KisTileHashTableTraits2(KisMementoManager *mm) + : m_numTiles(0), m_defaultTileData(0), m_mementoManager(mm) +{ +} + +template +KisTileHashTableTraits2::KisTileHashTableTraits2(const KisTileHashTableTraits2 &ht, KisMementoManager *mm) + : KisTileHashTableTraits2(mm) +{ + setDefaultTileData(ht.m_defaultTileData); + typename ConcurrentMap::Iterator iter(const_cast &>(ht.m_map)); + while (iter.isValid()) { + insert(iter.getKey(), iter.getValue()); + iter.next(); + } +} + +template +KisTileHashTableTraits2::~KisTileHashTableTraits2() +{ + clear(); + m_map.getGC().flush(); + setDefaultTileData(0); +} + +template +bool KisTileHashTableTraits2::tileExists(qint32 col, qint32 row) +{ + return getExistingTile(col, row); +} + +template +typename KisTileHashTableTraits2::TileTypeSP KisTileHashTableTraits2::getExistingTile(qint32 col, qint32 row) +{ + quint32 idx = calculateHash(col, row); + TileTypeSP result = m_map.get(idx); + m_map.getGC().update(m_map.migrationInProcess()); + return result; +} + +template +typename KisTileHashTableTraits2::TileTypeSP KisTileHashTableTraits2::getTileLazy(qint32 col, qint32 row, bool &newTile) +{ + newTile = false; + TileTypeSP tile; + quint32 idx = calculateHash(col, row); + typename ConcurrentMap::Mutator mutator = m_map.insertOrFind(idx); + + if (!mutator.getValue()) { + { + QReadLocker guard(&m_defaultPixelDataLock); + tile = new TileType(col, row, m_defaultTileData, m_mementoManager); + } + TileTypeSP::ref(&tile, tile.data()); + TileType *result = mutator.exchangeValue(tile.data()); + + if (result) { + m_map.getGC().enqueue(&MemoryReclaimer::destroy, new MemoryReclaimer(result)); + } else { + newTile = true; + m_numTiles.fetchAndAddRelaxed(1); + } + + tile = m_map.get(idx); + } else { + tile = mutator.getValue(); + } + + m_map.getGC().update(m_map.migrationInProcess()); + return tile; +} + +template +typename KisTileHashTableTraits2::TileTypeSP KisTileHashTableTraits2::getReadOnlyTileLazy(qint32 col, qint32 row, bool &existingTile) +{ + quint32 idx = calculateHash(col, row); + TileTypeSP tile = m_map.get(idx); + existingTile = tile; + + if (!existingTile) { + QReadLocker guard(&m_defaultPixelDataLock); + tile = new TileType(col, row, m_defaultTileData, 0); + } + + m_map.getGC().update(m_map.migrationInProcess()); + return tile; +} + +template +void KisTileHashTableTraits2::addTile(TileTypeSP tile) +{ + quint32 idx = calculateHash(tile->col(), tile->row()); + insert(idx, tile); +} + +template +bool KisTileHashTableTraits2::deleteTile(TileTypeSP tile) +{ + return deleteTile(tile->col(), tile->row()); +} + +template +bool KisTileHashTableTraits2::deleteTile(qint32 col, qint32 row) +{ + quint32 idx = calculateHash(col, row); + return erase(idx); +} + +template +void KisTileHashTableTraits2::clear() +{ + typename ConcurrentMap::Iterator iter(m_map); + while (iter.isValid()) { + erase(iter.getKey()); + iter.next(); + } +} + +template +inline void KisTileHashTableTraits2::setDefaultTileData(KisTileData *defaultTileData) +{ + QWriteLocker guard(&m_defaultPixelDataLock); + if (m_defaultTileData) { + m_defaultTileData->release(); + m_defaultTileData = 0; + } + + if (defaultTileData) { + defaultTileData->acquire(); + m_defaultTileData = defaultTileData; + } +} + +template +inline KisTileData* KisTileHashTableTraits2::defaultTileData() +{ + QReadLocker guard(&m_defaultPixelDataLock); + return m_defaultTileData; +} + +template +void KisTileHashTableTraits2::debugPrintInfo() +{ +} + +template +void KisTileHashTableTraits2::debugMaxListLength(qint32 &min, qint32 &max) +{ +} + +typedef KisTileHashTableTraits2 KisTileHashTable; +typedef KisTileHashTableIteratorTraits2 KisTileHashTableIterator; +typedef KisTileHashTableIteratorTraits2 KisTileHashTableConstIterator; + +#endif // KIS_TILEHASHTABLE_2_H diff --git a/libs/image/tiles3/kis_tiled_data_manager.h b/libs/image/tiles3/kis_tiled_data_manager.h index 8308965a0c..b6f8384d17 100644 --- a/libs/image/tiles3/kis_tiled_data_manager.h +++ b/libs/image/tiles3/kis_tiled_data_manager.h @@ -1,414 +1,420 @@ /* * Copyright (c) 2004 Boudewijn Rempt * (c) 2009 Dmitry Kazakov * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef KIS_TILEDDATAMANAGER_H_ #define KIS_TILEDDATAMANAGER_H_ #include #include #include #include #include +#include "config-hash-table-implementaion.h" //#include "kis_debug.h" #include "kritaimage_export.h" +#ifdef USE_LOCK_FREE_HASH_TABLE +#include "kis_tile_hash_table2.h" +#else #include "kis_tile_hash_table.h" +#endif // USE_LOCK_FREE_HASH_TABLE + #include "kis_memento_manager.h" #include "kis_memento.h" #include "KisTiledExtentManager.h" class KisTiledDataManager; typedef KisSharedPtr KisTiledDataManagerSP; class KisTiledIterator; class KisTiledRandomAccessor; class KisPaintDeviceWriter; class QIODevice; /** * KisTiledDataManager implements the interface that KisDataManager defines * * The interface definition is enforced by KisDataManager calling all the methods * which must also be defined in KisTiledDataManager. It is not allowed to change the interface * as other datamangers may also rely on the same interface. * * * Storing undo/redo data * * Offering ordered and unordered iterators over rects of pixels * * (eventually) efficiently loading and saving data in a format * that may allow deferred loading. * * A datamanager knows nothing about the type of pixel data except * how many quint8's a single pixel takes. */ class KRITAIMAGE_EXPORT KisTiledDataManager : public KisShared { private: static const qint32 LEGACY_VERSION = 1; static const qint32 CURRENT_VERSION = 2; protected: /*FIXME:*/ public: KisTiledDataManager(quint32 pixelSize, const quint8 *defPixel); virtual ~KisTiledDataManager(); KisTiledDataManager(const KisTiledDataManager &dm); KisTiledDataManager & operator=(const KisTiledDataManager &dm); protected: // Allow the baseclass of iterators access to the interior // derived iterator classes must go through KisTiledIterator friend class KisTiledIterator; friend class KisBaseIterator; friend class KisTiledRandomAccessor; friend class KisRandomAccessor2; friend class KisStressJob; public: void setDefaultPixel(const quint8 *defPixel); const quint8 *defaultPixel() const { return m_defaultPixel; } /** * Every iterator fetches both types of tiles all the time: old and new. * For projection devices these tiles are **always** the same, but doing * two distinct calls makes double pressure on the read-write lock in the * hash table. * * Merging two calls into one allows us to avoid additional tile fetch from * the hash table and therefore reduce waiting time. */ inline void getTilesPair(qint32 col, qint32 row, bool writable, KisTileSP *tile, KisTileSP *oldTile) { *tile = getTile(col, row, writable); bool unused; *oldTile = m_mementoManager->getCommitedTile(col, row, unused); if (!*oldTile) { *oldTile = *tile; } } inline KisTileSP getTile(qint32 col, qint32 row, bool writable) { if (writable) { bool newTile; KisTileSP tile = m_hashTable->getTileLazy(col, row, newTile); if (newTile) { m_extentManager.notifyTileAdded(col, row); } return tile; } else { bool unused; return m_hashTable->getReadOnlyTileLazy(col, row, unused); } } inline KisTileSP getReadOnlyTileLazy(qint32 col, qint32 row, bool &existingTile) { return m_hashTable->getReadOnlyTileLazy(col, row, existingTile); } inline KisTileSP getOldTile(qint32 col, qint32 row, bool &existingTile) { KisTileSP tile = m_mementoManager->getCommitedTile(col, row, existingTile); return tile ? tile : getReadOnlyTileLazy(col, row, existingTile); } inline KisTileSP getOldTile(qint32 col, qint32 row) { bool unused; return getOldTile(col, row, unused); } KisMementoSP getMemento() { QWriteLocker locker(&m_lock); KisMementoSP memento = m_mementoManager->getMemento(); memento->saveOldDefaultPixel(m_defaultPixel, m_pixelSize); return memento; } /** * Finishes having already started transaction */ void commit() { QWriteLocker locker(&m_lock); KisMementoSP memento = m_mementoManager->currentMemento(); if(memento) { memento->saveNewDefaultPixel(m_defaultPixel, m_pixelSize); } m_mementoManager->commit(); } void rollback(KisMementoSP memento) { commit(); QWriteLocker locker(&m_lock); m_mementoManager->rollback(m_hashTable); const quint8 *defaultPixel = memento->oldDefaultPixel(); if(memcmp(m_defaultPixel, defaultPixel, m_pixelSize)) { setDefaultPixelImpl(defaultPixel); } recalculateExtent(); } void rollforward(KisMementoSP memento) { commit(); QWriteLocker locker(&m_lock); m_mementoManager->rollforward(m_hashTable); const quint8 *defaultPixel = memento->newDefaultPixel(); if(memcmp(m_defaultPixel, defaultPixel, m_pixelSize)) { setDefaultPixelImpl(defaultPixel); } recalculateExtent(); } bool hasCurrentMemento() const { return m_mementoManager->hasCurrentMemento(); //return true; } /** * Removes all the history that preceds the revision * pointed by oldestMemento. That is after calling to * purgeHistory(someMemento) you won't be able to do * rollback(someMemento) anymore. */ void purgeHistory(KisMementoSP oldestMemento) { QWriteLocker locker(&m_lock); m_mementoManager->purgeHistory(oldestMemento); } static void releaseInternalPools(); protected: /** * Reads and writes the tiles */ bool write(KisPaintDeviceWriter &store); bool read(QIODevice *stream); void purge(const QRect& area); inline quint32 pixelSize() const { return m_pixelSize; } /* FIXME:*/ public: void extent(qint32 &x, qint32 &y, qint32 &w, qint32 &h) const; void setExtent(qint32 x, qint32 y, qint32 w, qint32 h); QRect extent() const; void setExtent(QRect newRect); QRegion region() const; void clear(QRect clearRect, quint8 clearValue); void clear(QRect clearRect, const quint8 *clearPixel); void clear(qint32 x, qint32 y, qint32 w, qint32 h, quint8 clearValue); void clear(qint32 x, qint32 y, qint32 w, qint32 h, const quint8 *clearPixel); void clear(); /** * Clones rect from another datamanager. The cloned area will be * shared between both datamanagers as much as possible using * copy-on-write. Parts of the rect that cannot be shared * (cross tiles) are deep-copied, */ void bitBlt(KisTiledDataManager *srcDM, const QRect &rect); /** * The same as \ref bitBlt(), but reads old data */ void bitBltOldData(KisTiledDataManager *srcDM, const QRect &rect); /** * Clones rect from another datamanager in a rough and fast way. * All the tiles touched by rect will be shared, between both * managers, that means it will copy a bigger area than was * requested. This method is supposed to be used for bitBlt'ing * into temporary paint devices. */ void bitBltRough(KisTiledDataManager *srcDM, const QRect &rect); /** * The same as \ref bitBltRough(), but reads old data */ void bitBltRoughOldData(KisTiledDataManager *srcDM, const QRect &rect); /** * write the specified data to x, y. There is no checking on pixelSize! */ void setPixel(qint32 x, qint32 y, const quint8 * data); /** * Copy the bytes in the specified rect to a vector. The caller is responsible * for managing the vector. * * \param dataRowStride is the step (in bytes) which should be * added to \p bytes pointer to get to the * next row */ void readBytes(quint8 * bytes, qint32 x, qint32 y, qint32 w, qint32 h, qint32 dataRowStride = -1) const; /** * Copy the bytes in the vector to the specified rect. If there are bytes left * in the vector after filling the rect, they will be ignored. If there are * not enough bytes, the rest of the rect will be filled with the default value * given (by default, 0); * * \param dataRowStride is the step (in bytes) which should be * added to \p bytes pointer to get to the * next row */ void writeBytes(const quint8 * bytes, qint32 x, qint32 y, qint32 w, qint32 h, qint32 dataRowStride = -1); /** * Copy the bytes in the paint device into a vector of arrays of bytes, * where the number of arrays is the number of channels in the * paint device. If the specified area is larger than the paint * device's extent, the default pixel will be read. */ QVector readPlanarBytes(QVector channelsizes, qint32 x, qint32 y, qint32 w, qint32 h) const; /** * Write the data in the separate arrays to the channels. If there * are less vectors than channels, the remaining channels will not * be copied. If any of the arrays points to 0, the channel in * that location will not be touched. If the specified area is * larger than the paint device, the paint device will be * extended. There are no guards: if the area covers more pixels * than there are bytes in the arrays, krita will happily fill * your paint device with areas of memory you never wanted to be * read. Krita may also crash. */ void writePlanarBytes(QVector planes, QVector channelsizes, qint32 x, qint32 y, qint32 w, qint32 h); /** * Get the number of contiguous columns starting at x, valid for all values * of y between minY and maxY. */ qint32 numContiguousColumns(qint32 x, qint32 minY, qint32 maxY) const; /** * Get the number of contiguous rows starting at y, valid for all values * of x between minX and maxX. */ qint32 numContiguousRows(qint32 y, qint32 minX, qint32 maxX) const; /** * Get the row stride at pixel (x, y). This is the number of bytes to add to a * pointer to pixel (x, y) to access (x, y + 1). */ qint32 rowStride(qint32 x, qint32 y) const; private: KisTileHashTable *m_hashTable; KisMementoManager *m_mementoManager; quint8* m_defaultPixel; qint32 m_pixelSize; KisTiledExtentManager m_extentManager; mutable QReadWriteLock m_lock; private: // Allow compression routines to calculate (col,row) coordinates // and pixel size friend class KisAbstractTileCompressor; friend class KisTileDataWrapper; qint32 xToCol(qint32 x) const; qint32 yToRow(qint32 y) const; private: void setDefaultPixelImpl(const quint8 *defPixel); bool writeTilesHeader(KisPaintDeviceWriter &store, quint32 numTiles); bool processTilesHeader(QIODevice *stream, quint32 &numTiles); qint32 divideRoundDown(qint32 x, const qint32 y) const; void recalculateExtent(); quint8* duplicatePixel(qint32 num, const quint8 *pixel); template void bitBltImpl(KisTiledDataManager *srcDM, const QRect &rect); template void bitBltRoughImpl(KisTiledDataManager *srcDM, const QRect &rect); void writeBytesBody(const quint8 *data, qint32 x, qint32 y, qint32 width, qint32 height, qint32 dataRowStride = -1); void readBytesBody(quint8 *data, qint32 x, qint32 y, qint32 width, qint32 height, qint32 dataRowStride = -1) const; template void writePlanarBytesBody(QVector planes, QVector channelsizes, qint32 x, qint32 y, qint32 w, qint32 h); QVector readPlanarBytesBody(QVector channelsizes, qint32 x, qint32 y, qint32 w, qint32 h) const; public: void debugPrintInfo() { m_mementoManager->debugPrintInfo(); } }; inline qint32 KisTiledDataManager::divideRoundDown(qint32 x, const qint32 y) const { /** * Equivalent to the following: * -(( -x + (y-1) ) / y) */ return x >= 0 ? x / y : -(((-x - 1) / y) + 1); } inline qint32 KisTiledDataManager::xToCol(qint32 x) const { return divideRoundDown(x, KisTileData::WIDTH); } inline qint32 KisTiledDataManager::yToRow(qint32 y) const { return divideRoundDown(y, KisTileData::HEIGHT); } // during development the following line helps to check the interface is correct // it should be safe to keep it here even during normal compilation //#include "kis_datamanager.h" #endif // KIS_TILEDDATAMANAGER_H_