8
0
mirror of https://github.com/FirebirdSQL/firebird.git synced 2025-01-22 20:03:02 +01:00

Merge pull request #7339 from hvlad/PageCache/HashTable_PR

Hash table based on lock-free list for page cache.
This commit is contained in:
Vlad Khorsun 2022-11-22 00:00:21 +02:00 committed by GitHub
commit 57218a90a7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1190 changed files with 288842 additions and 688 deletions

View File

@ -15,7 +15,7 @@ jobs:
- name: Prepare - name: Prepare
run: | run: |
sudo apt-get install libtool-bin libtomcrypt1 libtomcrypt-dev libtommath1 libtommath-dev libicu-dev zlib1g-dev sudo apt-get install libtool-bin libtomcrypt1 libtomcrypt-dev libtommath1 libtommath-dev libicu-dev zlib1g-dev cmake
- name: Build - name: Build
run: | run: |
@ -135,7 +135,7 @@ jobs:
fetch-depth: 10 fetch-depth: 10
- name: Prepare - name: Prepare
run: apk update && apk --no-cache --update add build-base libtool git autoconf automake zlib-dev icu-dev ncurses-dev libedit-dev linux-headers tar run: apk update && apk --no-cache --update add build-base libtool git autoconf automake cmake zlib-dev icu-dev ncurses-dev libedit-dev linux-headers tar
- name: Build - name: Build
run: | run: |
@ -323,7 +323,7 @@ jobs:
- name: Prepare - Install tools - name: Prepare - Install tools
run: | run: |
brew install automake libtool ninja brew install automake cmake libtool ninja
- name: Cache - libc++ install - name: Cache - libc++ install
id: cache-libcxx-install-macos id: cache-libcxx-install-macos

1
.gitignore vendored
View File

@ -27,3 +27,4 @@ extern/ttmath/release/
/src/include/gen/parse.h /src/include/gen/parse.h
/src/include/gen/autoconfig.auto /src/include/gen/autoconfig.auto
/src/include/gen/autoconfig.h /src/include/gen/autoconfig.h
extern/libcds/lib/

View File

@ -28,7 +28,7 @@ install:
- cmd: cd builds\win32 - cmd: cd builds\win32
- cmd: run_all.bat JUSTBUILD - cmd: run_all.bat JUSTBUILD
- cmd: set ARTIFACTS_PATH=output_%FB_OUTPUT_SUFFIX% - cmd: set ARTIFACTS_PATH=output_%FB_OUTPUT_SUFFIX%
- sh: export APT_PACKAGES="libtool-bin" - sh: export APT_PACKAGES="libtool-bin cmake"
- sh: if [ $PLATFORM = "x64" ]; then export APT_PACKAGES="$APT_PACKAGES libtommath1 libtommath-dev libicu-dev zlib1g-dev"; fi - sh: if [ $PLATFORM = "x64" ]; then export APT_PACKAGES="$APT_PACKAGES libtommath1 libtommath-dev libicu-dev zlib1g-dev"; fi
- sh: if [ $PLATFORM = "x86" ]; then export APT_PACKAGES="$APT_PACKAGES gcc-multilib g++-multilib libncurses5-dev:i386 libtommath-dev:i386 libicu-dev:i386 zlib1g-dev:i386"; fi - sh: if [ $PLATFORM = "x86" ]; then export APT_PACKAGES="$APT_PACKAGES gcc-multilib g++-multilib libncurses5-dev:i386 libtommath-dev:i386 libicu-dev:i386 zlib1g-dev:i386"; fi
- sh: if [ $PLATFORM = "x64" ]; then export CC="gcc" CXX="g++"; fi - sh: if [ $PLATFORM = "x64" ]; then export CC="gcc" CXX="g++"; fi

View File

@ -83,6 +83,13 @@ LTC_LDFLAGS='-L$(LIB) $(subst $,$$$$,$(call LIB_LINK_RPATH,lib))'
endif endif
endif endif
# correct build type for cmake builds
FB_CMAKE_BUILD_TYPE=$(TARGET)
ifeq ($(FB_CMAKE_BUILD_TYPE),Native)
FB_CMAKE_BUILD_TYPE=Release
endif
.PHONY: master_process cross_process firebird Debug Release external .PHONY: master_process cross_process firebird Debug Release external
all: firebird all: firebird
@ -190,6 +197,8 @@ ifeq ($(RE2_BUILD_FLG),Y)
ln -sf $(ROOT)/extern/re2/obj/libre2.a $(LIB) ln -sf $(ROOT)/extern/re2/obj/libre2.a $(LIB)
endif endif
$(MAKE) libcds
ifeq ($(TOMMATH_BUILD_FLG),Y) ifeq ($(TOMMATH_BUILD_FLG),Y)
CC="$(CC)" CFLAGS="$(CFLAGS)" AR="$(AR)" $(MAKE) -C $(ROOT)/extern/libtommath -f makefile.shared GCC="$(CC)" CC="$(CC)" CFLAGS="$(CFLAGS)" AR="$(AR)" $(MAKE) -C $(ROOT)/extern/libtommath -f makefile.shared GCC="$(CC)"
@ -279,6 +288,23 @@ $(RE2_LIB): $(RE2_Objs)
-$(RM) $@ -$(RM) $@
$(STATICLIB_LINK) $@ $^ $(STATICLIB_LINK) $@ $^
#___________________________________________________________________________
# libcds
#
.PHONY: libcds
libcds:
mkdir -p $(LIBCDS)/lib/$(TARGET)
cd $(LIBCDS)/lib/$(TARGET); \
cmake -DCMAKE_BUILD_TYPE=$(FB_CMAKE_BUILD_TYPE) -DCMAKE_CXX_FLAGS=-fPIC $(LIBCDS)
AR="$(AR)" $(MAKE) -C $(LIBCDS)/lib/$(TARGET)
ifeq ($(TARGET),Debug)
ln -sf $(LIBCDS)/lib/$(TARGET)/bin/libcds-s_d.a $(LIB)/libcds.a
else
ln -sf $(LIBCDS)/lib/$(TARGET)/bin/libcds-s.a $(LIB)/libcds.a
endif
#___________________________________________________________________________ #___________________________________________________________________________
# main build target for both debug and release builds # main build target for both debug and release builds
# #
@ -343,6 +369,7 @@ cross2:
ln -sf $(ROOT)/extern/decNumber/libdecFloat$(CROSS).a $(LIB) ln -sf $(ROOT)/extern/decNumber/libdecFloat$(CROSS).a $(LIB)
CXX="$(CXX)" CXXFLAGS="$(CXXFLAGS)" $(MAKE) -C $(ROOT)/extern/int128/absl/numeric CXX="$(CXX)" CXXFLAGS="$(CXXFLAGS)" $(MAKE) -C $(ROOT)/extern/int128/absl/numeric
ln -sf $(ROOT)/extern/int128/absl/numeric/libi128$(CROSS).a $(LIB) ln -sf $(ROOT)/extern/int128/absl/numeric/libi128$(CROSS).a $(LIB)
$(MAKE) libcds
$(MAKE) yvalve $(MAKE) yvalve
$(MAKE) engine $(MAKE) engine
$(MAKE) fbintl $(MAKE) fbintl

View File

@ -41,7 +41,7 @@ DEV_FLAGS=$(COMMON_FLAGS) $(WARN_FLAGS)
CROSS_CONFIG=android.arm64 CROSS_CONFIG=android.arm64
LDFLAGS += -static-libstdc++ LDFLAGS += -static-libstdc++
DroidLibs := -lm -ldl $(DECLIB) $(RE2LIB) $(I128LIB) DroidLibs := -lm -ldl $(DECLIB) $(RE2LIB) $(I128LIB) $(LIBCDSLIB)
UDR_SUPPORT_LIBS := UDR_SUPPORT_LIBS :=
LINK_LIBS = $(DroidLibs) LINK_LIBS = $(DroidLibs)

View File

@ -41,7 +41,7 @@ DEV_FLAGS=$(COMMON_FLAGS) $(WARN_FLAGS)
CROSS_CONFIG=android.arme CROSS_CONFIG=android.arme
LDFLAGS += -static-libstdc++ LDFLAGS += -static-libstdc++
DroidLibs := -lm -ldl $(DECLIB) $(RE2LIB) $(I128LIB) DroidLibs := -lm -ldl $(DECLIB) $(RE2LIB) $(I128LIB) $(LIBCDSLIB)
UDR_SUPPORT_LIBS := UDR_SUPPORT_LIBS :=
LINK_LIBS = $(DroidLibs) LINK_LIBS = $(DroidLibs)

View File

@ -41,7 +41,7 @@ DEV_FLAGS=$(COMMON_FLAGS) $(WARN_FLAGS)
CROSS_CONFIG=android.x86 CROSS_CONFIG=android.x86
LDFLAGS += -static-libstdc++ LDFLAGS += -static-libstdc++
DroidLibs := -lm -ldl $(DECLIB) $(RE2LIB) $(I128LIB) DroidLibs := -lm -ldl $(DECLIB) $(RE2LIB) $(I128LIB) $(LIBCDSLIB)
UDR_SUPPORT_LIBS := UDR_SUPPORT_LIBS :=
LINK_LIBS = $(DroidLibs) LINK_LIBS = $(DroidLibs)

View File

@ -41,7 +41,7 @@ DEV_FLAGS=$(COMMON_FLAGS) $(WARN_FLAGS)
CROSS_CONFIG=android.x86_64 CROSS_CONFIG=android.x86_64
LDFLAGS += -static-libstdc++ LDFLAGS += -static-libstdc++
DroidLibs := -lm -ldl $(DECLIB) $(RE2LIB) $(I128LIB) DroidLibs := -lm -ldl $(DECLIB) $(RE2LIB) $(I128LIB) $(LIBCDSLIB)
UDR_SUPPORT_LIBS := UDR_SUPPORT_LIBS :=
LINK_LIBS = $(DroidLibs) LINK_LIBS = $(DroidLibs)

View File

@ -148,6 +148,8 @@ else
I128LIB= I128LIB=
endif endif
LIBCDSLIB=-lcds
# crypt library # crypt library
CRYPTLIB=@CRYPTLIB@ CRYPTLIB=@CRYPTLIB@
@ -204,8 +206,8 @@ endif
STATICLIB_LINK = $(AR) crus STATICLIB_LINK = $(AR) crus
LINK_LIBS = @LIBS@ $(DECLIB) $(RE2LIB) $(I128LIB) LINK_LIBS = @LIBS@ $(DECLIB) $(RE2LIB) $(I128LIB) $(LIBCDSLIB)
SO_LINK_LIBS = @LIBS@ $(DECLIB) $(RE2LIB) $(I128LIB) SO_LINK_LIBS = @LIBS@ $(DECLIB) $(RE2LIB) $(I128LIB) $(LIBCDSLIB)
# Default extensions # Default extensions
@ -294,6 +296,11 @@ TOMCRYPT_INC=$(TOMCRYPT)/src/headers
TOMCRYPT_SO=$(TOMCRYPT)/.libs/libtomcrypt.so TOMCRYPT_SO=$(TOMCRYPT)/.libs/libtomcrypt.so
TOMCRYPT_VER=1 TOMCRYPT_VER=1
# Own libcds support
LIBCDS=$(ROOT)/extern/libcds
LIBCDS_INC=$(LIBCDS)
LIBCDS_DEF=CDS_BUILD_STATIC_LIB
# LINKER OPTIONS # LINKER OPTIONS
# #

View File

@ -38,6 +38,8 @@ ifneq ($(SYSTEM_BOOST_FLG),Y)
WFLAGS += -I$(ROOT)/extern/boost WFLAGS += -I$(ROOT)/extern/boost
endif endif
WFLAGS += -I$(LIBCDS_INC) -D$(LIBCDS_DEF)
ifeq ($(TOMMATH_BUILD_FLG),Y) ifeq ($(TOMMATH_BUILD_FLG),Y)
WFLAGS += -I$(TOMMATH_INC) WFLAGS += -I$(TOMMATH_INC)
endif endif

View File

@ -22,6 +22,16 @@ for %%v in ( %* ) do (
@echo Cleaning icu... @echo Cleaning icu...
@rmdir /S /Q "%FB_ROOT_PATH%\extern\icu\%FB_TARGET_PLATFORM%\%FBBUILD_BUILDTYPE%" 2>nul @rmdir /S /Q "%FB_ROOT_PATH%\extern\icu\%FB_TARGET_PLATFORM%\%FBBUILD_BUILDTYPE%" 2>nul
@echo Cleaning cds...
@for /D %%d in ("%FB_ROOT_PATH%\extern\libcds\obj\*") do (
rmdir /S /Q "%%d\%FB_TARGET_PLATFORM%\cds\%FB_CONFIG%-static" 2>nul
)
@for /D %%d in ("%FB_ROOT_PATH%\extern\libcds\bin\*") do (
rmdir /S /Q "%%d\%FB_TARGET_PLATFORM%-%FB_CONFIG%-static" 2>nul
)
@echo Cleaning decNumber... @echo Cleaning decNumber...
@rmdir /S /Q "%FB_ROOT_PATH%\extern\decNumber\lib\%FB_TARGET_PLATFORM%" 2>nul @rmdir /S /Q "%FB_ROOT_PATH%\extern\decNumber\lib\%FB_TARGET_PLATFORM%" 2>nul
@rmdir /S /Q "%FB_ROOT_PATH%\extern\decNumber\temp\%FB_TARGET_PLATFORM%" 2>nul @rmdir /S /Q "%FB_ROOT_PATH%\extern\decNumber\temp\%FB_TARGET_PLATFORM%" 2>nul

View File

@ -18,6 +18,11 @@ set projects=
set config=debug set config=debug
) )
:: Special case for CDS, set in make_boot only
@if "%FB_LIBCDS%"=="1" (
set config=%config%-static
)
shift shift
shift shift

View File

@ -34,6 +34,9 @@ if "%ERRLEV%"=="1" goto :END
call :btyacc call :btyacc
if "%ERRLEV%"=="1" goto :END if "%ERRLEV%"=="1" goto :END
call :libcds
if "%ERRLEV%"=="1" goto :END
call :LibTom call :LibTom
if "%ERRLEV%"=="1" goto :END if "%ERRLEV%"=="1" goto :END
@ -145,6 +148,17 @@ goto :EOF
if errorlevel 1 call :boot2 decNumber_%FB_OBJ_DIR% if errorlevel 1 call :boot2 decNumber_%FB_OBJ_DIR%
goto :EOF goto :EOF
::===================
:: Build libcds
:libcds
@echo.
set FB_LIBCDS=1
@echo Building libcds (%FB_OBJ_DIR%)...
@call compile.bat extern\libcds\projects\Win\vc141\cds libcds_%FB_CONFIG%_%FB_TARGET_PLATFORM%.log cds
if errorlevel 1 call :boot2 libcds%FB_OBJ_DIR%
set FB_LIBCDS=
goto :EOF
::=================== ::===================
:: BUILD ttmath :: BUILD ttmath
:ttmath :ttmath

View File

@ -113,24 +113,28 @@
<Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC71.props" /> <Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC71.props" />
<Import Project="FirebirdCommon.props" /> <Import Project="FirebirdCommon.props" />
<Import Project="FirebirdRelease.props" /> <Import Project="FirebirdRelease.props" />
<Import Project="libcds.props" />
</ImportGroup> </ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets"> <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC71.props" /> <Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC71.props" />
<Import Project="FirebirdCommon.props" /> <Import Project="FirebirdCommon.props" />
<Import Project="FirebirdDebug.props" /> <Import Project="FirebirdDebug.props" />
<Import Project="libcds.props" />
</ImportGroup> </ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC71.props" /> <Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC71.props" />
<Import Project="FirebirdCommon.props" /> <Import Project="FirebirdCommon.props" />
<Import Project="FirebirdRelease.props" /> <Import Project="FirebirdRelease.props" />
<Import Project="libcds.props" />
</ImportGroup> </ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC71.props" /> <Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC71.props" />
<Import Project="FirebirdCommon.props" /> <Import Project="FirebirdCommon.props" />
<Import Project="FirebirdDebug.props" /> <Import Project="FirebirdDebug.props" />
<Import Project="libcds.props" />
</ImportGroup> </ImportGroup>
<PropertyGroup Label="UserMacros" /> <PropertyGroup Label="UserMacros" />
<PropertyGroup> <PropertyGroup>

View File

@ -89,6 +89,7 @@
<ClCompile Include="..\..\..\src\jrd\GlobalRWLock.cpp" /> <ClCompile Include="..\..\..\src\jrd\GlobalRWLock.cpp" />
<ClCompile Include="..\..\..\src\jrd\idx.cpp" /> <ClCompile Include="..\..\..\src\jrd\idx.cpp" />
<ClCompile Include="..\..\..\src\jrd\inf.cpp" /> <ClCompile Include="..\..\..\src\jrd\inf.cpp" />
<ClCompile Include="..\..\..\src\jrd\InitCDSLib.cpp" />
<ClCompile Include="..\..\..\src\jrd\intl.cpp" /> <ClCompile Include="..\..\..\src\jrd\intl.cpp" />
<ClCompile Include="..\..\..\src\jrd\IntlManager.cpp" /> <ClCompile Include="..\..\..\src\jrd\IntlManager.cpp" />
<ClCompile Include="..\..\..\src\jrd\intl_builtin.cpp" /> <ClCompile Include="..\..\..\src\jrd\intl_builtin.cpp" />
@ -276,6 +277,7 @@
<ClInclude Include="..\..\..\src\jrd\inf_proto.h" /> <ClInclude Include="..\..\..\src\jrd\inf_proto.h" />
<ClInclude Include="..\..\..\src\jrd\inf_pub.h" /> <ClInclude Include="..\..\..\src\jrd\inf_pub.h" />
<ClInclude Include="..\..\..\src\jrd\ini.h" /> <ClInclude Include="..\..\..\src\jrd\ini.h" />
<ClInclude Include="..\..\..\src\jrd\InitCDSLib.h" />
<ClInclude Include="..\..\..\src\jrd\ini_proto.h" /> <ClInclude Include="..\..\..\src\jrd\ini_proto.h" />
<ClInclude Include="..\..\..\src\jrd\intl.h" /> <ClInclude Include="..\..\..\src\jrd\intl.h" />
<ClInclude Include="..\..\..\src\jrd\IntlManager.h" /> <ClInclude Include="..\..\..\src\jrd\IntlManager.h" />
@ -429,24 +431,28 @@
<Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC71.props" /> <Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC71.props" />
<Import Project="FirebirdCommon.props" /> <Import Project="FirebirdCommon.props" />
<Import Project="FirebirdRelease.props" /> <Import Project="FirebirdRelease.props" />
<Import Project="libcds.props" />
</ImportGroup> </ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets"> <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC71.props" /> <Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC71.props" />
<Import Project="FirebirdCommon.props" /> <Import Project="FirebirdCommon.props" />
<Import Project="FirebirdDebug.props" /> <Import Project="FirebirdDebug.props" />
<Import Project="libcds.props" />
</ImportGroup> </ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC71.props" /> <Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC71.props" />
<Import Project="FirebirdCommon.props" /> <Import Project="FirebirdCommon.props" />
<Import Project="FirebirdRelease.props" /> <Import Project="FirebirdRelease.props" />
<Import Project="libcds.props" />
</ImportGroup> </ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC71.props" /> <Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC71.props" />
<Import Project="FirebirdCommon.props" /> <Import Project="FirebirdCommon.props" />
<Import Project="FirebirdDebug.props" /> <Import Project="FirebirdDebug.props" />
<Import Project="libcds.props" />
</ImportGroup> </ImportGroup>
<PropertyGroup Label="UserMacros" /> <PropertyGroup Label="UserMacros" />
<PropertyGroup> <PropertyGroup>

View File

@ -513,6 +513,9 @@
<ClCompile Include="..\..\..\src\jrd\MetaName.cpp"> <ClCompile Include="..\..\..\src\jrd\MetaName.cpp">
<Filter>JRD files</Filter> <Filter>JRD files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\src\jrd\InitCDSLib.cpp">
<Filter>JRD files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\src\jrd\optimizer\InnerJoin.cpp"> <ClCompile Include="..\..\..\src\jrd\optimizer\InnerJoin.cpp">
<Filter>Optimizer</Filter> <Filter>Optimizer</Filter>
</ClCompile> </ClCompile>
@ -1070,6 +1073,9 @@
<ClInclude Include="..\..\..\src\jrd\QualifiedName.h"> <ClInclude Include="..\..\..\src\jrd\QualifiedName.h">
<Filter>Header files</Filter> <Filter>Header files</Filter>
</ClInclude> </ClInclude>
<ClInclude Include="..\..\..\src\jrd\InitCDSLib.h">
<Filter>Header files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\src\jrd\WorkerAttachment.h"> <ClInclude Include="..\..\..\src\jrd\WorkerAttachment.h">
<Filter>Header files</Filter> <Filter>Header files</Filter>
</ClInclude> </ClInclude>

View File

@ -66,24 +66,28 @@
<Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC71.props" /> <Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC71.props" />
<Import Project="FirebirdCommon.props" /> <Import Project="FirebirdCommon.props" />
<Import Project="FirebirdDebug.props" /> <Import Project="FirebirdDebug.props" />
<Import Project="libcds.props" />
</ImportGroup> </ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets"> <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC71.props" /> <Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC71.props" />
<Import Project="FirebirdCommon.props" /> <Import Project="FirebirdCommon.props" />
<Import Project="FirebirdRelease.props" /> <Import Project="FirebirdRelease.props" />
<Import Project="libcds.props" />
</ImportGroup> </ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC71.props" /> <Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC71.props" />
<Import Project="FirebirdCommon.props" /> <Import Project="FirebirdCommon.props" />
<Import Project="FirebirdDebug.props" /> <Import Project="FirebirdDebug.props" />
<Import Project="libcds.props" />
</ImportGroup> </ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC71.props" /> <Import Project="$(VCTargetsPath)Microsoft.CPP.UpgradeFromVC71.props" />
<Import Project="FirebirdCommon.props" /> <Import Project="FirebirdCommon.props" />
<Import Project="FirebirdRelease.props" /> <Import Project="FirebirdRelease.props" />
<Import Project="libcds.props" />
</ImportGroup> </ImportGroup>
<PropertyGroup Label="UserMacros" /> <PropertyGroup Label="UserMacros" />
<PropertyGroup> <PropertyGroup>

View File

@ -239,4 +239,4 @@
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets"> <ImportGroup Label="ExtensionTargets">
</ImportGroup> </ImportGroup>
</Project> </Project>

View File

@ -0,0 +1,23 @@
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ImportGroup Label="PropertySheets" />
<PropertyGroup Label="UserMacros">
<LIBCDS_ROOT>..\..\..\extern\libcds</LIBCDS_ROOT>
</PropertyGroup>
<PropertyGroup />
<ItemDefinitionGroup>
<ClCompile>
<AdditionalIncludeDirectories>$(LIBCDS_ROOT);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>CDS_BUILD_STATIC_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
</ClCompile>
<Link>
<AdditionalLibraryDirectories>$(LIBCDS_ROOT)\bin\vc.$(PlatformToolset)\$(Platform)-$(Configuration)-static;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
<AdditionalDependencies>libcds-$(PlatformTarget).lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<BuildMacro Include="LIBCDS_ROOT">
<Value>$(LIBCDS_ROOT)</Value>
</BuildMacro>
</ItemGroup>
</Project>

21
extern/libcds/.gitignore vendored Normal file
View File

@ -0,0 +1,21 @@
/doc
/sandbox
*.o
*.d
/bin
/obj
/projects/Win/vc14/cds.sdf
/projects/Win/vc14/cds.v14.suo
/projects/Win/vc14/*.user
/projects/Win/vc14/*.opensdf
/projects/Win/vc14/.vs/
/projects/Win/vc141/.vs/
/projects/Win/vc141/*.user
*.log
/.project
/projects/Win/vc14/*.opendb
/test/stress/data/dictionary.txt
/projects/Win/vc14/cds.VC.db
/.cproject
/.settings/
/tools/change_license.pl

265
extern/libcds/.travis.yml vendored Normal file
View File

@ -0,0 +1,265 @@
language: cpp
install:
- chmod +x ./build/CI/travis-ci/install.sh
- ./build/CI/travis-ci/install.sh
script:
- chmod +x ./build/CI/travis-ci/run.sh
- ./build/CI/travis-ci/run.sh
linux: &linux_gcc
os: linux
addons:
apt:
sources:
- ubuntu-toolchain-r-test
packages:
- g++-6
compiler:
- g++-6
before_install:
- eval "CC=gcc-6 && CXX=g++-6"
linux: &linux_clang
os: linux
addons:
apt:
sources:
- ubuntu-toolchain-r-test
- llvm-toolchain-trusty-4.0
packages:
- clang-4.0
compiler:
- clang-4.0
before_install:
- eval "CC=clang-4.0 && CXX=clang++-4.0"
osx: &osx
os: osx
osx_image: xcode8.3
compiler:
- clang
before_install:
- eval "CC=clang && CXX=clang++"
matrix:
include:
## BUILD_TYPE=Release CXX_COMPILER=g++-6
- <<: *linux_gcc
env: TARGET=unit-deque BUILD_TYPE=Release
- <<: *linux_gcc
env: TARGET=unit-ilist BUILD_TYPE=Release
- <<: *linux_gcc
env: TARGET=unit-list BUILD_TYPE=Release
- <<: *linux_gcc
env: TARGET=unit-map BUILD_TYPE=Release
- <<: *linux_gcc
env: TARGET=unit-misc BUILD_TYPE=Release
- <<: *linux_gcc
env: TARGET=unit-pqueue BUILD_TYPE=Release
- <<: *linux_gcc
env: TARGET=unit-queue BUILD_TYPE=Release
- <<: *linux_gcc
env: TARGET=unit-iset-feldman BUILD_TYPE=Release
- <<: *linux_gcc
env: TARGET=unit-iset-michael-michael BUILD_TYPE=Release
- <<: *linux_gcc
env: TARGET=unit-iset-michael-lazy BUILD_TYPE=Release
- <<: *linux_gcc
env: TARGET=unit-iset-michael-iterable BUILD_TYPE=Release
- <<: *linux_gcc
env: TARGET=unit-iset-skip BUILD_TYPE=Release
- <<: *linux_gcc
env: TARGET=unit-iset-split-michael BUILD_TYPE=Release
- <<: *linux_gcc
env: TARGET=unit-iset-split-lazy BUILD_TYPE=Release
- <<: *linux_gcc
env: TARGET=unit-iset-split-iterable BUILD_TYPE=Release
- <<: *linux_gcc
env: TARGET=unit-set BUILD_TYPE=Release
- <<: *linux_gcc
env: TARGET=unit-striped-set BUILD_TYPE=Release
- <<: *linux_gcc
env: TARGET=unit-stack BUILD_TYPE=Release
- <<: *linux_gcc
env: TARGET=unit-tree BUILD_TYPE=Release
## BUILD_TYPE=Debug CXX_COMPILER=g++-6
- <<: *linux_gcc
env: TARGET=unit-deque BUILD_TYPE=Debug
- <<: *linux_gcc
env: TARGET=unit-ilist BUILD_TYPE=Debug
- <<: *linux_gcc
env: TARGET=unit-list BUILD_TYPE=Debug
- <<: *linux_gcc
env: TARGET=unit-map BUILD_TYPE=Debug
- <<: *linux_gcc
env: TARGET=unit-misc BUILD_TYPE=Debug
- <<: *linux_gcc
env: TARGET=unit-pqueue BUILD_TYPE=Debug
- <<: *linux_gcc
env: TARGET=unit-queue BUILD_TYPE=Debug
- <<: *linux_gcc
env: TARGET=unit-iset BUILD_TYPE=Debug
- <<: *linux_gcc
env: TARGET=unit-set BUILD_TYPE=Debug
- <<: *linux_gcc
env: TARGET=unit-striped-set BUILD_TYPE=Debug
- <<: *linux_gcc
env: TARGET=unit-stack BUILD_TYPE=Debug
- <<: *linux_gcc
env: TARGET=unit-tree BUILD_TYPE=Debug
## BUILD_TYPE=Release CXX_COMPILER=clang-4.0
- <<: *linux_clang
env: TARGET=unit-deque BUILD_TYPE=Release
- <<: *linux_clang
env: TARGET=unit-ilist BUILD_TYPE=Release
- <<: *linux_clang
env: TARGET=unit-list BUILD_TYPE=Release
- <<: *linux_clang
env: TARGET=unit-misc BUILD_TYPE=Release LINKER_FLAGS=-latomic
- <<: *linux_clang
env: TARGET=unit-pqueue BUILD_TYPE=Release
- <<: *linux_clang
env: TARGET=unit-queue BUILD_TYPE=Release
- <<: *linux_clang
env: TARGET=unit-set-feldman BUILD_TYPE=Release
- <<: *linux_clang
env: TARGET=unit-set-michael-michael BUILD_TYPE=Release
- <<: *linux_clang
env: TARGET=unit-set-michael-iterable BUILD_TYPE=Release
- <<: *linux_clang
env: TARGET=unit-set-michael-lazy BUILD_TYPE=Release
- <<: *linux_clang
env: TARGET=unit-set-skip BUILD_TYPE=Release
- <<: *linux_clang
env: TARGET=unit-set-split-iterable BUILD_TYPE=Release
- <<: *linux_clang
env: TARGET=unit-set-split-michael BUILD_TYPE=Release
- <<: *linux_clang
env: TARGET=unit-set-split-lazy BUILD_TYPE=Release
- <<: *linux_clang
env: TARGET=unit-striped-set BUILD_TYPE=Release
- <<: *linux_clang
env: TARGET=unit-stack BUILD_TYPE=Release
# FIXME: building too long. Travis-ci will stop building.
# - BUILD_TYPE=Release TARGET=unit-map
# - BUILD_TYPE=Release TARGET=unit-iset
# - BUILD_TYPE=Release TARGET=unit-tree
## BUILD_TYPE=Debug CXX_COMPILER=clang-4.0
- <<: *linux_clang
env: TARGET=unit-deque BUILD_TYPE=Debug
- <<: *linux_clang
env: TARGET=unit-ilist BUILD_TYPE=Debug
- <<: *linux_clang
env: TARGET=unit-list BUILD_TYPE=Debug
- <<: *linux_clang
env: TARGET=unit-map BUILD_TYPE=Debug
- <<: *linux_clang
env: TARGET=unit-misc BUILD_TYPE=Debug LINKER_FLAGS=-latomic
- <<: *linux_clang
env: TARGET=unit-pqueue BUILD_TYPE=Debug
- <<: *linux_clang
env: TARGET=unit-queue BUILD_TYPE=Debug
- <<: *linux_clang
env: TARGET=unit-iset BUILD_TYPE=Debug
- <<: *linux_clang
env: TARGET=unit-set BUILD_TYPE=Debug
- <<: *linux_clang
env: TARGET=unit-striped-set BUILD_TYPE=Debug
- <<: *linux_clang
env: TARGET=unit-stack BUILD_TYPE=Debug
- <<: *linux_clang
env: TARGET=unit-tree BUILD_TYPE=Debug
# RELEASE
- <<: *osx
env: BUILD_TYPE=Release TARGET=unit-deque
- <<: *osx
env: BUILD_TYPE=Release TARGET=unit-ilist
- <<: *osx
env: BUILD_TYPE=Release TARGET=unit-list
- <<: *osx
env: BUILD_TYPE=Release TARGET=unit-misc
- <<: *osx
env: BUILD_TYPE=Release TARGET=unit-pqueue
- <<: *osx
env: BUILD_TYPE=Release TARGET=unit-queue
- <<: *osx
env: BUILD_TYPE=Release TARGET=unit-iset
- <<: *osx
env: BUILD_TYPE=Release TARGET=unit-iset-feldman
- <<: *osx
env: BUILD_TYPE=Release TARGET=unit-iset-michael-michael
- <<: *osx
env: BUILD_TYPE=Release TARGET=unit-iset-michael-lazy
- <<: *osx
env: BUILD_TYPE=Release TARGET=unit-iset-michael-iterable
- <<: *osx
env: BUILD_TYPE=Release TARGET=unit-iset-skip
- <<: *osx
env: BUILD_TYPE=Release TARGET=unit-iset-split-michael
- <<: *osx
env: BUILD_TYPE=Release TARGET=unit-iset-split-lazy
- <<: *osx
env: BUILD_TYPE=Release TARGET=unit-iset-split-iterable
- <<: *osx
env: BUILD_TYPE=Release TARGET=unit-striped-set
- <<: *osx
env: BUILD_TYPE=Release TARGET=unit-stack
# FIXME: building too long. Travis-ci will stop building.
# - <<: *osx
# env: BUILD_TYPE=Release TARGET=unit-map
# - <<: *osx
# env: BUILD_TYPE=Release TARGET=unit-set
# - <<: *osx
# env: BUILD_TYPE=Release TARGET=unit-tree
# DEBUG
- <<: *osx
env: BUILD_TYPE=Debug TARGET=unit-deque
- <<: *osx
env: BUILD_TYPE=Debug TARGET=unit-ilist
- <<: *osx
env: BUILD_TYPE=Debug TARGET=unit-list
- <<: *osx
env: BUILD_TYPE=Debug TARGET=unit-map
- <<: *osx
env: BUILD_TYPE=Debug TARGET=unit-misc
- <<: *osx
env: BUILD_TYPE=Debug TARGET=unit-pqueue
- <<: *osx
env: BUILD_TYPE=Debug TARGET=unit-queue
- <<: *osx
env: BUILD_TYPE=Debug TARGET=unit-iset
- <<: *osx
env: BUILD_TYPE=Debug TARGET=unit-iset-feldman
- <<: *osx
env: BUILD_TYPE=Debug TARGET=unit-iset-michael-michael
- <<: *osx
env: BUILD_TYPE=Debug TARGET=unit-iset-michael-lazy
- <<: *osx
env: BUILD_TYPE=Debug TARGET=unit-iset-michael-iterable
- <<: *osx
env: BUILD_TYPE=Debug TARGET=unit-iset-skip
- <<: *osx
env: BUILD_TYPE=Debug TARGET=unit-iset-split-michael
- <<: *osx
env: BUILD_TYPE=Debug TARGET=unit-iset-split-lazy
- <<: *osx
env: BUILD_TYPE=Debug TARGET=unit-iset-split-iterable
- <<: *osx
env: BUILD_TYPE=Debug TARGET=unit-set
- <<: *osx
env: BUILD_TYPE=Debug TARGET=unit-striped-set
- <<: *osx
env: BUILD_TYPE=Debug TARGET=unit-stack
- <<: *osx
env: BUILD_TYPE=Debug TARGET=unit-tree

250
extern/libcds/CMakeLists.txt vendored Normal file
View File

@ -0,0 +1,250 @@
cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR)
cmake_policy(SET CMP0016 NEW)
if(POLICY CMP0042)
cmake_policy(SET CMP0042 NEW)
endif()
set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/build/cmake ${CMAKE_MODULE_PATH})
include(TargetArch)
include(CheckIncludeFileCXX)
project(cds)
set(PROJECT_VERSION 2.3.3)
# Options
option(WITH_TESTS "Build unit tests" OFF)
option(WITH_TESTS_COVERAGE "Analyze test coverage using gcov (only for gcc)" OFF)
option(WITH_BOOST_ATOMIC "Use boost atomics (only for boost >= 1.54)" OFF)
option(WITH_ASAN "Build ASan+UBSan instrumented code" OFF)
option(WITH_TSAN "Build TSan instrumented code" OFF)
option(ENABLE_UNIT_TEST "Enable unit test" ON)
option(ENABLE_STRESS_TEST "Enable stress test" ON)
set(CMAKE_TARGET_ARCHITECTURE "" CACHE string "Target build architecture")
find_package(Threads)
if(NOT CMAKE_TARGET_ARCHITECTURE)
target_architecture(CMAKE_TARGET_ARCHITECTURE)
endif()
if(APPLE)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_DARWIN_C_SOURCE")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_DARWIN_C_SOURCE")
endif()
if(WITH_BOOST_ATOMIC)
if(TARGET boost::atomic)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DCDS_USE_BOOST_ATOMIC")
link_libraries(boost::atomic)
else()
find_package(Boost 1.53 COMPONENTS atomic)
if(Boost_FOUND)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DCDS_USE_BOOST_ATOMIC")
message(STATUS "Boost version allows using of boost.atomic: activated")
endif()
endif()
endif(WITH_BOOST_ATOMIC)
if(WITH_ASAN)
if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
set(CMAKE_CXX_FLAGS_DEBUG "-D_DEBUG")
set(CMAKE_CXX_FLAGS_RELEASE "-DNDEBUG")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O1 -fPIC -fsanitize=address,undefined -g -DCDS_ADDRESS_SANITIZER_ENABLED -fno-omit-frame-pointer -fno-optimize-sibling-calls")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O1 -fsanitize=address,undefined -g -DCDS_ASAN_ENABLED -fno-omit-frame-pointer -fno-optimize-sibling-calls")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=address,undefined -pie")
else()
message(WARNING "Compiler does not support AddressSanitizer")
endif()
endif(WITH_ASAN)
if(WITH_TSAN)
if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
set(CMAKE_CXX_FLAGS_DEBUG "-D_DEBUG")
set(CMAKE_CXX_FLAGS_RELEASE "-DNDEBUG")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O1 -fPIC -fsanitize=thread -g -DCDS_THREAD_SANITIZER_ENABLED -fno-omit-frame-pointer")
set(CMAKE_C_FLAGS "${CMAKE_CXX_FLAGS} -O1 -fPIC -fsanitize=thread -g -DCDS_THREAD_SANITIZER_ENABLED -fno-omit-frame-pointer")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=thread -pie")
else()
message(WARNING "Compiler does not support ThreadSanitizer")
endif()
endif(WITH_TSAN)
if(WITH_TESTS_COVERAGE)
if(CMAKE_COMPILER_IS_GNUCXX)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fprofile-arcs -ftest-coverage")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fprofile-arcs -ftest-coverage")
message(STATUS "Test coverage analysis: activated")
else()
message(WARNING "Compiler is not GNU gcc! Test coverage couldn't be analyzed")
endif()
endif(WITH_TESTS_COVERAGE)
set(CDS_SHARED_LIBRARY ${PROJECT_NAME})
set(CDS_STATIC_LIBRARY ${PROJECT_NAME}-s)
set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
set(CMAKE_INCLUDE_CURRENT_DIR ON)
if(CDS_BIN_DIR)
set(EXECUTABLE_OUTPUT_PATH ${CDS_BIN_DIR})
set(LIBRARY_OUTPUT_PATH ${CDS_BIN_DIR})
else()
set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/bin)
set(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/bin)
endif()
message(STATUS "Binary output path: ${EXECUTABLE_OUTPUT_PATH}")
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Debug CACHE STRING "Default build type to Debug" FORCE)
endif()
if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang" OR CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
string(REGEX MATCHALL "-std=[^ ]+" cxx_std_found ${CMAKE_CXX_FLAGS} " dummy@rg")
if(cxx_std_found)
message("C++ std: ${cxx_std_found}")
else()
list(APPEND LIBCDS_PUBLIC_CXX_FLAGS "-std=c++11")
message("C++ std: -std=c++11 (default)")
endif()
list(APPEND LIBCDS_PRIVATE_CXX_FLAGS "-Wall" "-Wextra" "-pedantic")
if(CMAKE_TARGET_ARCHITECTURE STREQUAL "x86_64")
list(APPEND LIBCDS_PUBLIC_CXX_FLAGS "-mcx16")
set(LIB_SUFFIX "64")
# GCC-7: 128-bit atomics support is implemented via libatomic on amd64
# see https://gcc.gnu.org/ml/gcc/2017-01/msg00167.html
# Maybe, it will be changed in future
if(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER "7.0.0" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS "8.0.0")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -latomic")
endif()
endif()
endif()
if(CMAKE_SYSTEM_NAME STREQUAL "AIX")
set(CMAKE_CXX_ARCHIVE_CREATE "<CMAKE_AR> -q -c ${CMAKE_STATIC_LINKER_FLAGS} -o <TARGET> <OBJECTS>")
list(APPEND LIBCDS_PRIVATE_CXX_FLAGS "-Wl,-G")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-brtl")
endif()
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -D_DEBUG")
CHECK_INCLUDE_FILE_CXX(linux/membarrier.h CDS_HAVE_LINUX_MEMBARRIER_H CMAKE_CXX_FLAGS)
message("Build type -- ${CMAKE_BUILD_TYPE}")
message("Compiler version: ${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION}")
message("System: ${CMAKE_SYSTEM_NAME} version: ${CMAKE_SYSTEM_VERSION}")
message("Target architecture: ${CMAKE_TARGET_ARCHITECTURE}")
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
message("Compiler flags: ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_DEBUG} ${LIBCDS_PUBLIC_CXX_FLAGS} ${LIBCDS_PRIVATE_CXX_FLAGS}")
else()
message("Compiler flags: ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_RELEASE} ${LIBCDS_PUBLIC_CXX_FLAGS} ${LIBCDS_PRIVATE_CXX_FLAGS}")
endif()
message("Exe flags: ${CMAKE_EXE_LINKER_FLAGS}")
# Component names for separate distribution in rpms, debs etc.
set(LIBRARIES_COMPONENT lib)
set(HEADERS_COMPONENT devel)
set(SOURCES src/init.cpp
src/hp.cpp
src/dhp.cpp
src/urcu_gp.cpp
src/urcu_sh.cpp
src/thread_data.cpp
src/topology_hpux.cpp
src/topology_linux.cpp
src/topology_osx.cpp
src/dllmain.cpp)
add_library(${CDS_SHARED_LIBRARY} SHARED ${SOURCES})
set_target_properties(${CDS_SHARED_LIBRARY} PROPERTIES VERSION ${PROJECT_VERSION}
DEBUG_POSTFIX "_d")
if(MINGW)
set_target_properties(${CDS_SHARED_LIBRARY} PROPERTIES DEFINE_SYMBOL CDS_BUILD_LIB)
endif()
add_library(${CDS_STATIC_LIBRARY} STATIC ${SOURCES})
set_target_properties(${CDS_STATIC_LIBRARY} PROPERTIES DEBUG_POSTFIX "_d")
if(MINGW)
target_compile_definitions(${CDS_STATIC_LIBRARY} PRIVATE CDS_BUILD_STATIC_LIB)
endif()
target_link_libraries(${CDS_SHARED_LIBRARY} PRIVATE ${CMAKE_THREAD_LIBS_INIT})
target_link_libraries(${CDS_STATIC_LIBRARY} PRIVATE ${CMAKE_THREAD_LIBS_INIT})
target_include_directories(${CDS_SHARED_LIBRARY} INTERFACE "$<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}>"
$<INSTALL_INTERFACE:include>)
target_include_directories(${CDS_STATIC_LIBRARY} INTERFACE "$<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}>"
$<INSTALL_INTERFACE:include>)
target_compile_options(${CDS_SHARED_LIBRARY} PUBLIC "${LIBCDS_PUBLIC_CXX_FLAGS}")
target_compile_options(${CDS_STATIC_LIBRARY} PUBLIC "${LIBCDS_PUBLIC_CXX_FLAGS}")
target_compile_options(${CDS_SHARED_LIBRARY} PRIVATE "${LIBCDS_PRIVATE_CXX_FLAGS}")
target_compile_options(${CDS_STATIC_LIBRARY} PRIVATE "${LIBCDS_PRIVATE_CXX_FLAGS}")
install(TARGETS ${CDS_SHARED_LIBRARY} EXPORT LibCDSConfig LIBRARY DESTINATION lib${LIB_SUFFIX} COMPONENT ${LIBRARIES_COMPONENT} NAMELINK_SKIP RUNTIME DESTINATION lib${LIB_SUFFIX})
install(TARGETS ${CDS_SHARED_LIBRARY} EXPORT LibCDSConfig LIBRARY DESTINATION lib${LIB_SUFFIX} COMPONENT ${HEADERS_COMPONENT} NAMELINK_ONLY)
install(TARGETS ${CDS_STATIC_LIBRARY} EXPORT LibCDSConfig DESTINATION lib${LIB_SUFFIX} COMPONENT ${LIBRARIES_COMPONENT})
install(EXPORT LibCDSConfig FILE LibCDSConfig.cmake NAMESPACE LibCDS:: DESTINATION lib/cmake/LibCDS)
install(DIRECTORY ${PROJECT_SOURCE_DIR}/cds DESTINATION include COMPONENT ${HEADERS_COMPONENT})
if(WITH_TESTS)
enable_testing()
add_subdirectory(${PROJECT_SOURCE_DIR}/test)
message(STATUS "Build tests: activated")
endif(WITH_TESTS)
### FOR PACKAGING in RPM, TGZ, DEB, NSYS...###############################################################################
set(CPACK_PACKAGE_VERSION ${PROJECT_VERSION})
set(CPACK_PACKAGE_NAME ${PROJECT_NAME})
set(CPACK_PACKAGE_CONTACT "Max Khizhinsky <libcds-user@lists.sourceforge.net>")
set(CPACK_PACKAGE_RELEASE 1)
set(CPACK_PACKAGE_INSTALL_DIRECTORY "cds")
set(CPACK_PACKAGE_DESCRIPTION_FILE "${PROJECT_SOURCE_DIR}/build/cmake/description.txt")
set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "Library of concurrent data structures")
set(CPACK_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}-${CPACK_PACKAGE_RELEASE}")
set(DEPLOY_PACKAGE_FILE_NAME "${CPACK_PACKAGE_FILE_NAME}")
# TGZ specific
set(CPACK_ARCHIVE_COMPONENT_INSTALL ON)
# RPM specific
set(CPACK_RPM_COMPONENT_INSTALL ON)
set(CPACK_RPM_PACKAGE_RELEASE ${CPACK_PACKAGE_RELEASE})
set(CPACK_RPM_POST_INSTALL_SCRIPT_FILE "${PROJECT_SOURCE_DIR}/build/cmake/post_install_script.sh")
set(CPACK_RPM_POST_UNINSTALL_SCRIPT_FILE "${PROJECT_SOURCE_DIR}/build/cmake/post_uninstall_script.sh")
set(CPACK_RPM_PACKAGE_URL https://github.com/khizmax/libcds)
set(CPACK_RPM_PACKAGE_LICENSE GPL)
set(CPACK_RPM_PACKAGE_GROUP "System Environment/Base")
set(CPACK_RPM_PACKAGE_REQUIRES "boost >= 1.50")
set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION ${CPACK_PACKAGING_INSTALL_PREFIX})
set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION /usr/local)
set(CPACK_RPM_devel_PACKAGE_REQUIRES "boost >= 1.50, cds-lib = ${PROJECT_VERSION}")
# DEB specific
set(CPACK_DEB_COMPONENT_INSTALL ON)
set(CPACK_DEBIAN_PACKAGE_DEPENDS "boost (>= 1.50)")
set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://github.com/khizmax/libcds")
set(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA "${PROJECT_SOURCE_DIR}/build/cmake/post_install_script.sh;;${PROJECT_SOURCE_DIR}/build/cmake/post_uninstall_script.sh;")
# NSYS specific
set(CPACK_NSIS_PACKAGE_NAME "${CPACK_PACKAGE_NAME}")
set(CPACK_NSIS_DISPLAY_NAME "${CPACK_PACKAGE_NAME}")
set(CPACK_NSIS_CONTACT ${CPACK_PACKAGE_CONTACT})
set(CPACK_NSIS_ENABLE_UNINSTALL_BEFORE_INSTALL ON)
set(CPACK_NSIS_MODIFY_PATH ON)
# Components grouping for Mac OS X and Windows installers
set(CPACK_COMPONENT_${LIBRARIES_COMPONENT}_GROUP "Runtime")
set(CPACK_COMPONENT_${HEADERS_COMPONENT}_GROUP "Development")
set(CPACK_COMPONENT_${LIBRARIES_COMPONENT}_DISPLAY_NAME "Libraries")
set(CPACK_COMPONENT_${HEADERS_COMPONENT}_DISPLAY_NAME "C++ Headers")
set(CPACK_COMPONENT_${HEADERS_COMPONENT}_DEPENDS ${LIBRARIES_COMPONENT})
set(CPACK_COMPONENT_GROUP_DEVELOPMENT_DESCRIPTION "All of the tools you'll ever need to develop lock-free oriented software with libcds")
set(CPACK_COMPONENT_GROUP_RUNTIME_DESCRIPTION "Only libcds library for runtime")
include(CPack)

23
extern/libcds/LICENSE vendored Normal file
View File

@ -0,0 +1,23 @@
Boost Software License - Version 1.0 - August 17th, 2003
Permission is hereby granted, free of charge, to any person or organization
obtaining a copy of the software and accompanying documentation covered by
this license (the "Software") to use, reproduce, display, distribute,
execute, and transmit the Software, and to prepare derivative works of the
Software, and to permit third-parties to whom the Software is furnished to
do so, all subject to the following:
The copyright notices in the Software and this entire statement, including
the above license grant, this restriction and the following disclaimer,
must be included in all copies of the Software, in whole or in part, and
all derivative works of the Software, unless such copies or derivative
works are solely in the form of machine-executable object code generated by
a source language processor.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

91
extern/libcds/appveyor.yml vendored Normal file
View File

@ -0,0 +1,91 @@
build: false
shallow_clone: true # (git clone --depth 1)
image:
- Visual Studio 2017
install:
- sed -i "/boost/d" conanfile.txt # delete boost from conanfile. Conan renamed name of boost libs so we'll use appveyour's boost
- cmd: echo "Downloading conan..."
- cmd: set PATH=%PATH%;%PYTHON%/Scripts/
- cmd: pip.exe install conan
- cmd: conan user # Create the conan data directory
- cmd: conan --version
- cmd: conan install --build=missing -s arch=x86 -s build_type=%configuration% .
#- cmd: conan install --build=missing -s arch=x86_64 -s build_type=%configuration% conanfileWin.txt
- cmd: echo =======================
- cmd: echo %configuration%
- cmd: echo %platform%
- cmd: echo =======================
- cmd: set GTEST_ROOT=C:/projects/libcds/deps
- cmd: set BOOST_PATH=C:\Libraries\boost_1_66_0
- cmd: set GTEST_LIB32=C:\projects\libcds\deps\lib;C:\Libraries\boost_1_66_0\lib32-msvc-14.1
- cmd: dir %GTEST_LIB32%
- cmd: echo =======================
# - cmd: set GTEST_LIB64=C:\projects\libcds\deps\lib
environment:
matrix:
# require a library with name libboost_thread-vc141-mt-x32-1_66.lib,
- TARGET: gtest-stack
- TARGET: gtest-deque
- TARGET: gtest-pqueue
- TARGET: gtest-queue
- TARGET: gtest-map-skip
- TARGET: gtest-map-split-michael
- TARGET: gtest-set-split-michael
- TARGET: gtest-misc
- TARGET: gtest-ilist-iterable
- TARGET: gtest-ilist-lazy
- TARGET: gtest-ilist-michael
- TARGET: gtest-iset-feldman
- TARGET: gtest-iset-michael
- TARGET: gtest-iset-michael-iterable
- TARGET: gtest-iset-michael-lazy
- TARGET: gtest-iset-skip
- TARGET: gtest-iset-split-iterable
- TARGET: gtest-iset-split-lazy
- TARGET: gtest-iset-split-michael
- TARGET: gtest-list-iterable
- TARGET: gtest-list-lazy
- TARGET: gtest-list-michael
- TARGET: gtest-map-feldman
- TARGET: gtest-map-michael
- TARGET: gtest-map-michael-iterable
- TARGET: gtest-map-michael-lazy
- TARGET: gtest-map-split-iterable
- TARGET: gtest-map-split-lazy
- TARGET: gtest-set-feldman
- TARGET: gtest-set-michael
- TARGET: gtest-set-michael-iterable
- TARGET: gtest-set-michael-lazy
- TARGET: gtest-set-skip
- TARGET: gtest-set-split-iterable
- TARGET: gtest-set-split-lazy
- TARGET: gtest-striped-map-boost
- TARGET: gtest-striped-map-cuckoo
- TARGET: gtest-striped-map-std
- TARGET: gtest-striped-set-boost
- TARGET: gtest-striped-set-cuckoo
- TARGET: gtest-striped-set-std
- TARGET: gtest-tree-bronson
- TARGET: gtest-tree-ellen
configuration:
- Release
platform:
- Win32
build_script:
- msbuild projects/Win/vc141/cds.vcxproj /p:Configuration=%configuration% /p:PlatformTarget="Win32"
- msbuild projects/Win/vc141/%TARGET%.vcxproj /p:Configuration=%configuration% /p:PlatformTarget="Win32"
#- msbuild projects/Win/vc141/cds.vcxproj /p:Configuration=%configuration% /p:PlatformTarget=x64
#- msbuild projects/Win/vc141/%TARGET%.vcxproj /p:Configuration=%configuration% /p:PlatformTarget=x64
test_script:
- cmd: set path=%path%;%GTEST_LIB32%;C:\projects\libcds\bin\vc.v141\%platform%-release\
- cmd: C:\projects\libcds\bin\vc.v141\%platform%-release\%TARGET%.exe

View File

@ -0,0 +1,239 @@
#########################################
# Generic parameters
workspace: $WORKSPACE
libcds-source: source
make-job: 10
gtest-include: $GTEST_ROOT/googletest/include
#########################################
#GCC-4.8
gcc-4.8-root: $GCC48_ROOT/bin
gcc-4.8-cxx: g++-4.8
gcc-4.8-cc: gcc-4.8
gcc-4.8-exe-ldflags: -L$GCC48_ROOT/lib64 -Wl,-rpath=$GCC48_ROOT/lib64
gcc-4.8-extlib: rt
gcc-4.8-boost: $BOOST_ROOT
gcc-4.8-64-boost-lib: stage64-gcc4.8/lib
gcc-4.8-gtest: $GTEST_ROOT
gcc-4.8-64-gtest-lib: $GTEST_ROOT/lib-gcc4.8/libgtest.a
########################################
#GCC-4.9
gcc-4.9-root: $GCC49_ROOT/bin
gcc-4.9-cxx: g++-4.9
gcc-4.9-cc: gcc-4.9
gcc-4.9-exe-ldflags: -Wl,-rpath=$GCC49_ROOT/lib64
gcc-4.9-extlib: rt
gcc-4.9-boost: $BOOST_ROOT
gcc-4.9-64-boost-lib: stage64-gcc4.9/lib
gcc-4.9-gtest: $GTEST_ROOT
gcc-4.9-64-gtest-lib: $GTEST_ROOT/lib-gcc4.9/libgtest.a
########################################
#GCC-5
gcc-5-root: $GCC5_ROOT/bin
gcc-5-cxx: g++-5
gcc-5-cc: gcc-5
gcc-5-boost: $BOOST_ROOT
gcc-5-exe-ldflags: -Wl,-rpath=$GCC5_ROOT/lib64
gcc-5-extlib: rt
gcc-5-64-boost-lib: stage64-gcc5/lib
gcc-5-64-asan-boost-lib: stage64-gcc5-asan/lib
gcc-5-64-tsan-boost-lib: stage64-gcc5-tsan/lib
gcc-5-gtest: $GTEST_ROOT
gcc-5-64-gtest-lib: $GTEST_ROOT/lib-gcc5/libgtest.a
########################################
#GCC-6
gcc-6-root: $GCC6_ROOT/bin
gcc-6-cxx: g++-6
gcc-6-cc: gcc-6
gcc-6-boost: $BOOST_ROOT
gcc-6-cxxflags: -march=native -std=c++14
gcc-6-exe-ldflags: -Wl,-rpath=$GCC6_ROOT/lib64
gcc-6-extlib: rt
gcc-6-64-boost-lib: stage64-gcc6/lib
gcc-6-64-asan-boost-lib: stage64-gcc6-asan/lib
gcc-6-64-tsan-boost-lib: stage64-gcc6-tsan/lib
gcc-6-gtest: $GTEST_ROOT
gcc-6-64-gtest-lib: $GTEST_ROOT/lib-gcc6/libgtest.a
########################################
#GCC-7
gcc-7-root: $GCC7_ROOT/bin
gcc-7-cxx: g++-7
gcc-7-cc: gcc-7
gcc-7-boost: $BOOST_ROOT
gcc-7-cxxflags: -march=native -std=c++1z
gcc-7-exe-ldflags: -Wl,-rpath=$GCC7_ROOT/lib64
gcc-7-extlib: rt
gcc-7-64-boost-lib: stage64-gcc7/lib
gcc-7-64-asan-boost-lib: stage64-gcc7-asan/lib
gcc-7-64-tsan-boost-lib: stage64-gcc7-tsan/lib
gcc-7-gtest: $GTEST_ROOT
gcc-7-64-gtest-lib: $GTEST_ROOT/lib-gcc7/libgtest.a
########################################
#GCC-8
gcc-8-root: $GCC8_ROOT/bin
gcc-8-cxx: g++-8
gcc-8-cc: gcc-8
gcc-8-boost: $BOOST_ROOT
gcc-8-cxxflags: -march=native -std=c++17 -Wmultistatement-macros
gcc-8-exe-ldflags: -Wl,-rpath=$GCC8_ROOT/lib64
gcc-8-extlib: rt
gcc-8-path: $DEVTOOLSET6_BIN
gcc-8-64-boost-lib: stage64-gcc7/lib
gcc-8-64-asan-boost-lib: stage64-gcc7-asan/lib
gcc-8-64-tsan-boost-lib: stage64-gcc7-tsan/lib
gcc-8-gtest: $GTEST_ROOT
gcc-8-64-gtest-lib: $GTEST_ROOT/lib-gcc7/libgtest.a
########################################
# clang-3.6
clang-3.6-root: $CLANG36_ROOT/bin
clang-3.6-ld-lib-path: $GCC5_ROOT/lib64
clang-3.6-cxx: clang++
clang-3.6-cc: clang
clang-3.6-cxxflags: -Wdocumentation
clang-3.6-exe-ldflags: -L$GCC5_ROOT/lib64 -latomic -Wl,-rpath=$GCC5_ROOT/lib64
clang-3.6-extlib: rt
clang-3.6-boost: $BOOST_ROOT
clang-3.6-64-boost-lib: stage64-clang3.6/lib
clang-3.6-gtest: $GTEST_ROOT
clang-3.6-64-gtest-lib: $GTEST_ROOT/lib-clang3.6/libgtest.a
########################################
# clang-3.7
clang-3.7-root: $CLANG37_ROOT/bin
clang-3.7-ld-lib-path: $GCC6_ROOT/lib64
clang-3.7-cxx: clang++
clang-3.7-cc: clang
clang-3.7-cxxflags: -stdlib=libc++ -Wdocumentation
clang-3.7-exe-ldflags: -L$CLANG37_ROOT/lib -Wl,-rpath=$CLANG37_ROOT/lib -lc++abi
clang-3.7-extlib: rt
clang-3.7-boost: $BOOST_ROOT
clang-3.7-64-boost-lib: stage64-clang3.7/lib
clang-3.7-gtest: $GTEST_ROOT
clang-3.7-64-gtest-lib: $GTEST_ROOT/lib-clang3.7/libgtest.a
clang-3.7-cmake-flags: -DCMAKE_C_COMPILER_WORKS=1 -DCMAKE_CXX_COMPILER_WORKS=1
########################################
# clang-3.8
clang-3.8-root: $CLANG38_ROOT/bin
clang-3.8-ld-lib-path: $GCC6_ROOT/lib64
clang-3.8-cxx: clang++
clang-3.8-cc: clang
clang-3.8-cxxflags: -stdlib=libc++ -Wdocumentation
clang-3.8-exe-ldflags: -L$CLANG38_ROOT/lib -Wl,-rpath=$CLANG38_ROOT/lib
clang-3.8-extlib: rt
clang-3.8-boost: $BOOST_ROOT
clang-3.8-64-boost-lib: stage64-clang3.8/lib
clang-3.8-gtest: $GTEST_ROOT
clang-3.8-64-gtest-lib: $GTEST_ROOT/lib-clang3.8/libgtest.a
########################################
# clang-3.9
clang-3.9-root: $CLANG39_ROOT/bin
clang-3.9-ld-lib-path: $GCC6_ROOT/lib64
clang-3.9-cxx: clang++
clang-3.9-cc: clang
clang-3.9-cxxflags: -stdlib=libc++ -Wdocumentation
clang-3.9-exe-ldflags: -L$CLANG39_ROOT/lib -Wl,-rpath=$CLANG39_ROOT/lib
clang-3.9-extlib: rt
clang-3.9-boost: $BOOST_ROOT
clang-3.9-64-boost-lib: stage64-clang3.9/lib
clang-3.9-64-asan-boost-lib: stage64-clang3.9-asan/lib
clang-3.9-64-tsan-boost-lib: stage64-clang3.9-tsan/lib
clang-3.9-gtest: $GTEST_ROOT
clang-3.9-64-gtest-lib: $GTEST_ROOT/lib-clang3.9/libgtest.a
########################################
# clang-4
clang-4-root: $CLANG4_ROOT/bin
clang-4-cxx: clang++
clang-4-cc: clang
clang-4-cxxflags: -stdlib=libc++ -Wdocumentation -std=c++14
clang-4-exe-ldflags: -L$CLANG4_ROOT/lib -Wl,-rpath=$CLANG4_ROOT/lib
clang-4-extlib: rt
clang-4-boost: $BOOST_ROOT
clang-4-64-boost-lib: stage64-clang4/lib
clang-4-64-asan-boost-lib: stage64-clang4-asan/lib
clang-4-64-tsan-boost-lib: stage64-clang4-tsan/lib
clang-4-gtest: $GTEST_ROOT
clang-4-64-gtest-lib: $GTEST_ROOT/lib-clang4/libgtest.a
########################################
# clang-5
clang-5-root: $CLANG5_ROOT/bin
clang-5-cxx: clang++
clang-5-cc: clang
clang-5-cxxflags: -stdlib=libc++ -Wdocumentation -std=c++1z
clang-5-exe-ldflags: -L$CLANG5_ROOT/lib -Wl,-rpath=$CLANG5_ROOT/lib
clang-5-extlib: rt
clang-5-path: $DEVTOOLSET6_BIN
clang-5-boost: $LIB_ROOT/boost_1_65_1
clang-5-64-boost-lib: stage64-clang5-std17/lib
clang-5-64-asan-boost-lib: stage64-clang5-asan/lib
clang-5-64-tsan-boost-lib: stage64-clang5-tsan/lib
clang-5-gtest: $GTEST_ROOT
clang-5-64-gtest-lib: $GTEST_ROOT/lib-clang5/libgtest.a
########################################
# clang-6
clang-6-root: $CLANG6_ROOT/bin
clang-6-cxx: clang++
clang-6-cc: clang
clang-6-cxxflags: -stdlib=libc++ -Wdocumentation -std=c++17
clang-6-exe-ldflags: -L$CLANG6_ROOT/lib -Wl,-rpath=$CLANG6_ROOT/lib
clang-6-extlib: rt
clang-6-path: $DEVTOOLSET6_BIN
clang-6-boost: $LIB_ROOT/boost_1_65_1
clang-6-64-boost-lib: stage64-clang6-std17/lib
clang-6-64-asan-boost-lib: stage64-clang6-asan/lib
clang-6-64-tsan-boost-lib: stage64-clang6-tsan/lib
clang-6-gtest: $GTEST_ROOT
clang-6-64-gtest-lib: $GTEST_ROOT/lib-clang6/libgtest.a
########################################
# clang-7
clang-7-root: $CLANG6_ROOT/bin
clang-7-cxx: clang++
clang-7-cc: clang
clang-7-cxxflags: -stdlib=libc++ -Wdocumentation -std=c++17
clang-7-exe-ldflags: -fuse-ld=lld -L$CLANG7_ROOT/lib -Wl,-rpath=$CLANG7_ROOT/lib
clang-7-extlib: rt
clang-7-path: $DEVTOOLSET6_BIN
clang-7-boost: $LIB_ROOT/boost_1_65_1
clang-7-64-boost-lib: stage64-clang7-std17/lib
clang-7-64-asan-boost-lib: stage64-clang7-asan/lib
clang-7-64-tsan-boost-lib: stage64-clang7-tsan/lib
clang-7-gtest: $GTEST_ROOT
clang-7-64-gtest-lib: $GTEST_ROOT/lib-clang6/libgtest.a

View File

@ -0,0 +1,88 @@
#! /bin/bash
# Useful envvars:
# CI_SCRIPT_PATH - path where to find scripts
# TOOLSET - toolset: x64-gcc-5, x64-clang-3.9 and so on
# BUILD_TYPE - build type: 'dbg', 'rel', 'asan', 'tsan'
# WORKSPACE - path where to build
env|sort
case "$TOOLSET" in
"x64-gcc-4.8")
echo "GCC-4.8 '$BUILD_TYPE', toolset root: $GCC48_ROOT"
$CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-4.8-64 $*
EXIT_CODE=$?
;;
"x64-gcc-4.9")
echo "GCC-4.9 '$BUILD_TYPE', toolset root: $GCC49_ROOT"
$CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-4.9-64 $*
EXIT_CODE=$?
;;
"x64-gcc-5")
echo "GCC-5 '$BUILD_TYPE', toolset root: $GCC5_ROOT"
$CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-5-64 $*
EXIT_CODE=$?
;;
"x64-gcc-6")
echo "GCC-6 '$BUILD_TYPE', toolset root: $GCC6_ROOT"
$CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-6-64 $*
EXIT_CODE=$?
;;
"x64-gcc-7")
echo "GCC-7 '$BUILD_TYPE', toolset root: $GCC7_ROOT"
$CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-7-64 $*
EXIT_CODE=$?
;;
"x64-gcc-8")
echo "GCC-8 '$BUILD_TYPE', toolset root: $GCC8_ROOT"
$CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-8-64 $*
EXIT_CODE=$?
;;
"x64-clang-3.6")
echo "clang-3.6 '$BUILD_TYPE', toolset root: $CLANG36_ROOT"
$CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-3.6-64 $*
EXIT_CODE=$?
;;
"x64-clang-3.7")
echo "clang-3.7 '$BUILD_TYPE', toolset root: $CLANG37_ROOT"
$CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-3.7-64 $*
EXIT_CODE=$?
;;
"x64-clang-3.8")
echo "clang-3.8 '$BUILD_TYPE', toolset root: $CLANG38_ROOT"
$CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-3.8-64 $*
EXIT_CODE=$?
;;
"x64-clang-3.9")
echo "clang-3.9 '$BUILD_TYPE', toolset root: $CLANG39_ROOT"
$CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-3.9-64 $*
EXIT_CODE=$?
;;
"x64-clang-4")
echo "clang-4 '$BUILD_TYPE', toolset root: $CLANG4_ROOT"
$CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-4-64 $*
EXIT_CODE=$?
;;
"x64-clang-5")
echo "clang-5 '$BUILD_TYPE', toolset root: $CLANG5_ROOT"
$CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-5-64 $*
EXIT_CODE=$?
;;
"x64-clang-6")
echo "clang-6 '$BUILD_TYPE', toolset root: $CLANG6_ROOT"
$CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-6-64 $*
EXIT_CODE=$?
;;
"x64-clang-7")
echo "clang-7 '$BUILD_TYPE', toolset root: $CLANG7_ROOT"
$CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-7-64 $*
EXIT_CODE=$?
;;
* )
echo "Undefined toolset '$TOOLSET'"
exit 1
;;
esac
exit $EXIT_CODE

View File

@ -0,0 +1,38 @@
#CMAKE_2_8_12=/home/libcds-ci/bin/cmake/cmake-2.8.12/bin
CMAKE_3_6_2=/home/libcds-ci/bin/cmake-3.6/bin
CMAKE3=$CMAKE_3_6_2
PATH=$CMAKE3:$PATH:$HOME/.local/bin:$HOME/bin
DEVTOOLSET6_BIN=/opt/rh/devtoolset-6/root/usr/bin
TOOLSET_ROOT=$HOME/bin
GCC48_ROOT=$TOOLSET_ROOT/gcc-4.8
GCC49_ROOT=$TOOLSET_ROOT/gcc-4.9
GCC5_ROOT=$TOOLSET_ROOT/gcc-5
GCC6_ROOT=$TOOLSET_ROOT/gcc-6
GCC7_ROOT=$TOOLSET_ROOT/gcc-7
GCC8_ROOT=$TOOLSET_ROOT/gcc-8
CLANG36_ROOT=$TOOLSET_ROOT/clang-3.6
CLANG37_ROOT=$TOOLSET_ROOT/clang-3.7
CLANG38_ROOT=$TOOLSET_ROOT/clang-3.8
CLANG39_ROOT=$TOOLSET_ROOT/clang-3.9
CLANG4_ROOT=$TOOLSET_ROOT/clang-4
CLANG5_ROOT=$TOOLSET_ROOT/clang-5
CLANG6_ROOT=$TOOLSET_ROOT/clang-6
CLANG7_ROOT=$TOOLSET_ROOT/clang-7
CLANG_STDLIB="-stdlib=libc++"
CLANG37_CXXFLAGS=$CLANG_STDLIB
CLANG38_CXXFLAGS=$CLANG_STDLIB
CLANG39_CXXFLAGS=$CLANG_STDLIB
CLANG4_CXXFLAGS=$CLANG_STDLIB
CLANG5_CXXFLAGS=$CLANG_STDLIB
CLANG6_CXXFLAGS=$CLANG_STDLIB
CLANG7_CXXFLAGS="$CLANG_STDLIB -fuse-ld=lld"
LIB_ROOT=$HOME/lib
BOOST_ROOT=$LIB_ROOT/boost
GTEST_ROOT=$LIB_ROOT/gtest

195
extern/libcds/build/CI/VASEx-CI/cds-libs vendored Normal file
View File

@ -0,0 +1,195 @@
#########################################
# Generic parameters
workspace: $WORKSPACE
libcds-source: source
make-job: 10
gtest-include: $GTEST_ROOT/googletest/include
#########################################
#GCC-4.8
gcc-4.8-root: $GCC48_ROOT/bin
gcc-4.8-cxx: g++-4.8
gcc-4.8-cc: gcc-4.8
gcc-4.8-exe-ldflags: -L$GCC48_ROOT/lib64 -Wl,-rpath=$GCC48_ROOT/lib64
gcc-4.8-boost: $BOOST_ROOT
gcc-4.8-64-boost-lib: stage64-gcc4.8/lib
gcc-4.8-gtest: $GTEST_ROOT
gcc-4.8-64-gtest-lib: $GTEST_ROOT/lib-gcc4.8/libgtest.a
########################################
#GCC-4.9
gcc-4.9-root: $GCC49_ROOT/bin
gcc-4.9-cxx: g++-4.9
gcc-4.9-cc: gcc-4.9
gcc-4.9-exe-ldflags: -Wl,-rpath=$GCC49_ROOT/lib64
gcc-4.9-boost: $BOOST_ROOT
gcc-4.9-64-boost-lib: stage64-gcc4.9/lib
gcc-4.9-gtest: $GTEST_ROOT
gcc-4.9-64-gtest-lib: $GTEST_ROOT/lib-gcc4.9/libgtest.a
########################################
#GCC-5
gcc-5-root: $GCC5_ROOT/bin
gcc-5-cxx: g++-5
gcc-5-cc: gcc-5
gcc-5-boost: $BOOST_ROOT
gcc-5-exe-ldflags: -Wl,-rpath=$GCC5_ROOT/lib64
gcc-5-64-boost-lib: stage64-gcc5/lib
gcc-5-64-asan-boost-lib: stage64-gcc5-asan/lib
gcc-5-64-tsan-boost-lib: stage64-gcc5-tsan/lib
gcc-5-gtest: $GTEST_ROOT
gcc-5-64-gtest-lib: $GTEST_ROOT/lib-gcc5/libgtest.a
########################################
#GCC-6
gcc-6-root: $GCC6_ROOT/bin
gcc-6-cxx: g++-6
gcc-6-cc: gcc-6
gcc-6-boost: $BOOST_ROOT
gcc-6-cxxflags: -march=native -std=c++14
gcc-6-exe-ldflags: -Wl,-rpath=$GCC6_ROOT/lib64
gcc-6-64-boost-lib: stage64-gcc6/lib
gcc-6-64-asan-boost-lib: stage64-gcc6-asan/lib
gcc-6-64-tsan-boost-lib: stage64-gcc6-tsan/lib
gcc-6-gtest: $GTEST_ROOT
gcc-6-64-gtest-lib: $GTEST_ROOT/lib-gcc6/libgtest.a
########################################
#GCC-7
gcc-7-root: $GCC7_ROOT/bin
gcc-7-cxx: g++-7
gcc-7-cc: gcc-7
gcc-7-boost: $BOOST_ROOT
gcc-7-cxxflags: -march=native -std=c++1z
gcc-7-exe-ldflags: -Wl,-rpath=$GCC7_ROOT/lib64
gcc-7-64-boost-lib: stage64-gcc7/lib
gcc-7-64-asan-boost-lib: stage64-gcc7-asan/lib
gcc-7-64-tsan-boost-lib: stage64-gcc7-tsan/lib
gcc-7-gtest: $GTEST_ROOT
gcc-7-64-gtest-lib: $GTEST_ROOT/lib-gcc7/libgtest.a
########################################
#GCC-8
gcc-8-root: $GCC8_ROOT/bin
gcc-8-cxx: g++-8
gcc-8-cc: gcc-8
gcc-8-boost: $BOOST_ROOT
gcc-8-cxxflags: -march=native -std=c++17 -Wmultistatement-macros
gcc-8-exe-ldflags: -Wl,-rpath=$GCC8_ROOT/lib64
gcc-8-extlib: rt
gcc-8-64-boost-lib: stage64-gcc7/lib
gcc-8-64-asan-boost-lib: stage64-gcc7-asan/lib
gcc-8-64-tsan-boost-lib: stage64-gcc7-tsan/lib
gcc-8-gtest: $GTEST_ROOT
gcc-8-64-gtest-lib: $GTEST_ROOT/lib-gcc7/libgtest.a
########################################
# clang-3.6
clang-3.6-root: $CLANG36_ROOT/bin
clang-3.6-ld-lib-path: $GCC6_ROOT/lib64
clang-3.6-cxx: clang++
clang-3.6-cc: clang
clang-3.6-cxxflags: -Wdocumentation
clang-3.6-exe-ldflags: -L$GCC5_ROOT/lib64 -latomic -Wl,-rpath=$GCC5_ROOT/lib64
clang-3.6-boost: $BOOST_ROOT
clang-3.6-64-boost-lib: stage64-clang3.6/lib
clang-3.6-gtest: $GTEST_ROOT
clang-3.6-64-gtest-lib: $GTEST_ROOT/lib-clang3.6/libgtest.a
########################################
# clang-3.7
clang-3.7-root: $CLANG37_ROOT/bin
clang-3.7-cxx: clang++
clang-3.7-cc: clang
clang-3.7-cxxflags: -stdlib=libc++ -Wdocumentation
clang-3.7-exe-ldflags: -L$CLANG37_ROOT/lib -Wl,-rpath=$CLANG37_ROOT/lib -lc++abi
clang-3.7-boost: $BOOST_ROOT
clang-3.7-64-boost-lib: stage64-clang3.7/lib
clang-3.7-gtest: $GTEST_ROOT
clang-3.7-64-gtest-lib: $GTEST_ROOT/lib-clang3.7/libgtest.a
clang-3.7-cmake-flags: -DCMAKE_C_COMPILER_WORKS=1 -DCMAKE_CXX_COMPILER_WORKS=1
########################################
# clang-3.8
clang-3.8-root: $CLANG38_ROOT/bin
clang-3.8-cxx: clang++
clang-3.8-cc: clang
clang-3.8-cxxflags: -stdlib=libc++ -Wdocumentation
clang-3.8-exe-ldflags: -L$CLANG38_ROOT/lib -Wl,-rpath=$CLANG38_ROOT/lib
clang-3.8-boost: $BOOST_ROOT
clang-3.8-64-boost-lib: stage64-clang3.8/lib
clang-3.8-gtest: $GTEST_ROOT
clang-3.8-64-gtest-lib: $GTEST_ROOT/lib-clang3.8/libgtest.a
########################################
# clang-3.9
clang-3.9-root: $CLANG39_ROOT/bin
clang-3.9-cxx: clang++
clang-3.9-cc: clang
clang-3.9-cxxflags: -stdlib=libc++ -Wdocumentation
clang-3.9-exe-ldflags: -L$CLANG39_ROOT/lib -Wl,-rpath=$CLANG39_ROOT/lib
clang-3.9-boost: $BOOST_ROOT
clang-3.9-64-boost-lib: stage64-clang3.9/lib
clang-3.9-64-asan-boost-lib: stage64-clang3.9-asan/lib
clang-3.9-64-tsan-boost-lib: stage64-clang3.9-tsan/lib
clang-3.9-gtest: $GTEST_ROOT
clang-3.9-64-gtest-lib: $GTEST_ROOT/lib-clang3.9/libgtest.a
########################################
# clang-4
clang-4-root: $CLANG4_ROOT/bin
clang-4-cxx: clang++
clang-4-cc: clang
clang-4-cxxflags: -stdlib=libc++ -Wdocumentation -std=c++14
clang-4-exe-ldflags: -L$CLANG4_ROOT/lib -Wl,-rpath=$CLANG4_ROOT/lib
clang-4-boost: $BOOST_ROOT
clang-4-64-boost-lib: stage64-clang4/lib
clang-4-64-asan-boost-lib: stage64-clang4-asan/lib
clang-4-64-tsan-boost-lib: stage64-clang4-tsan/lib
clang-4-gtest: $GTEST_ROOT
clang-4-64-gtest-lib: $GTEST_ROOT/lib-clang4/libgtest.a
########################################
# clang-5
clang-5-root: $CLANG5_ROOT/bin
clang-5-cxx: clang++
clang-5-cc: clang
clang-5-cxxflags: -stdlib=libc++ -Wdocumentation -std=c++1z
clang-5-exe-ldflags: -L$CLANG5_ROOT/lib -Wl,-rpath=$CLANG5_ROOT/lib
clang-5-boost: $LIB_ROOT/boost_1_65_1
clang-5-64-boost-lib: stage64-clang5-std17/lib
clang-5-64-asan-boost-lib: stage64-clang5-asan/lib
clang-5-64-tsan-boost-lib: stage64-clang5-tsan/lib
clang-5-gtest: $GTEST_ROOT
clang-5-64-gtest-lib: $GTEST_ROOT/lib-clang5/libgtest.a
########################################
# clang-6
clang-6-root: $CLANG6_ROOT/bin
clang-6-cxx: clang++
clang-6-cc: clang
clang-6-cxxflags: -stdlib=libc++ -Wdocumentation -std=c++17
clang-6-exe-ldflags: -L$CLANG6_ROOT/lib -Wl,-rpath=$CLANG6_ROOT/lib
clang-6-boost: $LIB_ROOT/boost_1_65_1
clang-6-64-boost-lib: stage64-clang6-std17/lib
clang-6-64-asan-boost-lib: stage64-clang6-asan/lib
clang-6-64-tsan-boost-lib: stage64-clang6-tsan/lib
clang-6-gtest: $GTEST_ROOT
clang-6-64-gtest-lib: $GTEST_ROOT/lib-clang6/libgtest.a

View File

@ -0,0 +1,81 @@
#! /bin/bash
# Useful envvars:
# CI_SCRIPT_PATH - path where to find scripts
# TOOLSET - toolset: x64-gcc-5, x64-clang-3.9 and so on
# BUILD_TYPE - build type: 'dbg', 'rel', 'asan', 'tsan'
# WORKSPACE - path where to build
env|sort
case "$TOOLSET" in
"x64-gcc-4.8")
echo "GCC-4.8 '$BUILD_TYPE', toolset root: $GCC48_ROOT"
$CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-4.8-64 $*
exit $?
;;
"x64-gcc-4.9")
echo "GCC-4.9 '$BUILD_TYPE', toolset root: $GCC49_ROOT"
$CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-4.9-64 $*
exit $?
;;
"x64-gcc-5")
echo "GCC-5 '$BUILD_TYPE', toolset root: $GCC5_ROOT"
$CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-5-64 $*
exit $?
;;
"x64-gcc-6")
echo "GCC-6 '$BUILD_TYPE', toolset root: $GCC6_ROOT"
$CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-6-64 $*
exit $?
;;
"x64-gcc-7")
echo "GCC-7 '$BUILD_TYPE', toolset root: $GCC7_ROOT"
$CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-7-64 $*
exit $?
;;
"x64-gcc-8")
echo "GCC-8 '$BUILD_TYPE', toolset root: $GCC8_ROOT"
$CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-8-64 $*
exit $?
;;
"x64-clang-3.6")
echo "clang-3.6 '$BUILD_TYPE', toolset root: $CLANG36_ROOT"
$CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-3.6-64 $*
exit $?
;;
"x64-clang-3.7")
echo "clang-3.7 '$BUILD_TYPE', toolset root: $CLANG37_ROOT"
$CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-3.7-64 $*
exit $?
;;
"x64-clang-3.8")
echo "clang-3.8 '$BUILD_TYPE', toolset root: $CLANG38_ROOT"
$CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-3.8-64 $*
exit $?
;;
"x64-clang-3.9")
echo "clang-3.9 '$BUILD_TYPE', toolset root: $CLANG39_ROOT"
$CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-3.9-64 $*
exit $?
;;
"x64-clang-4")
echo "clang-4 '$BUILD_TYPE', toolset root: $CLANG4_ROOT"
$CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-4-64 $*
exit $?
;;
"x64-clang-5")
echo "clang-5 '$BUILD_TYPE', toolset root: $CLANG5_ROOT"
$CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-5-64 $*
exit $?
;;
"x64-clang-6")
echo "clang-6 '$BUILD_TYPE', toolset root: $CLANG6_ROOT"
$CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-6-64 $*
exit $?
;;
* )
echo "Undefined toolset '$TOOLSET'"
exit 1
;;
esac

35
extern/libcds/build/CI/VASEx-CI/ci-env vendored Normal file
View File

@ -0,0 +1,35 @@
CMAKE_2_8_12=/home/libcds-ci/bin/cmake/cmake-2.8.12/bin
CMAKE_3_6_2=/home/libcds-ci/bin/cmake/cmake-3.6.2/bin
CMAKE3=$CMAKE_3_6_2
PATH=$CMAKE3:$PATH:$HOME/.local/bin:$HOME/bin
TOOLSET_ROOT=$HOME/bin
GCC48_ROOT=$TOOLSET_ROOT/gcc-4.8
GCC49_ROOT=$TOOLSET_ROOT/gcc-4.9
GCC5_ROOT=$TOOLSET_ROOT/gcc-5
GCC6_ROOT=$TOOLSET_ROOT/gcc-6
GCC7_ROOT=$TOOLSET_ROOT/gcc-7
GCC8_ROOT=$TOOLSET_ROOT/gcc-8
CLANG35_ROOT=$TOOLSET_ROOT/clang-3.5
CLANG36_ROOT=$TOOLSET_ROOT/clang-3.6
CLANG37_ROOT=$TOOLSET_ROOT/clang-3.7
CLANG38_ROOT=$TOOLSET_ROOT/clang-3.8
CLANG39_ROOT=$TOOLSET_ROOT/clang-3.9
CLANG4_ROOT=$TOOLSET_ROOT/clang-4
CLANG5_ROOT=$TOOLSET_ROOT/clang-5
CLANG6_ROOT=$TOOLSET_ROOT/clang-6
CLANG_STDLIB="-stdlib=libc++"
CLANG37_CXXFLAGS=$CLANG_STDLIB
CLANG38_CXXFLAGS=$CLANG_STDLIB
CLANG39_CXXFLAGS=$CLANG_STDLIB
CLANG4_CXXFLAGS=$CLANG_STDLIB
CLANG5_CXXFLAGS=$CLANG_STDLIB
CLANG6_CXXFLAGS=$CLANG_STDLIB
LIB_ROOT=$HOME/lib
BOOST_ROOT=$LIB_ROOT/boost
GTEST_ROOT=$LIB_ROOT/gtest

113
extern/libcds/build/CI/cmake-gen vendored Normal file
View File

@ -0,0 +1,113 @@
#! /usr/bin/perl
my $compiler=shift;
my $bitness =shift;
my $build =shift;
$build="rel" unless $build;
my $cmake_build="RELEASE";
$cmake_build="DEBUG" if $build eq 'dbg';
my $cds_libs="cds-libs";
# get generic props
my $workspace=get_gen_prop("workspace") || "$HOME";
my $cds_source=get_gen_prop("libcds-source") || "../libcds";
my $make_jobs=get_gen_prop("make-job") || 2;
# get compiler-specific props
my $comp_root=get_prop("root");
my $boost=get_prop( "boost" );
my $boost_libs=get_prop( "boost-lib" );
my $gtest=get_prop("gtest");
my $gtest_lib=get_prop( "gtest-lib");
my $gtest_inc=get_prop("gtest-include") || get_gen_prop("gtest-include");
my $cxx=get_prop("cxx") or $compiler;
my $cc=get_prop("cc") or $compiler;
my $cxxflags=get_prop("cxxflags");
my $ldflags=get_prop("ldflags");
my $cmake_exe_ldflags=get_prop("exe-ldflags");
my $ext_lib=get_prop("extlib");
my $ld_lib_path=get_prop("ld-lib-path");
my $sys_path=get_prop("path");
my $cmake_flags=get_prop("cmake-flags");
my $filename="cds-$build-$compiler-$bitness";
open( my $out, ">", $filename ) or die "Cannot open cds-$build-$compiler-$bitness";
print $out "#! /bin/sh\n\n";
print $out "root=$workspace\n";
print $out "CDS_SOURCE=\$root/$cds_source\n";
print $out "OBJ_ROOT=\$root/obj\n";
print $out "BIN_ROOT=\$root/bin\n";
print $out "GTEST_ROOT=$gtest\n" if $gtest;
print $out "export PATH=$sys_path:\$PATH\n" if $sys_path;
print $out "\n";
print $out "rm -fr \$OBJ_ROOT\n";
print $out "rm -fr \$BIN_ROOT\n";
print $out "mkdir -p \$OBJ_ROOT\n";
print $out "#cp -f run-ctest-rel \$OBJ_ROOT/run-ctest\n" if $build eq 'rel';
print $out "#cp -f run-ctest-dbg \$OBJ_ROOT/run-ctest\n" unless $build eq 'rel';
print $out "cd \$OBJ_ROOT\n";
print $out "\n";
print $out "LD_LIBRARY_PATH=$ld_lib_path:\$LD_LIBRARY_PATH \\\n" if $ld_lib_path;
print $out "LDFLAGS=\"$ldflags\" \\\n" if $ldflags;
print $out "cmake -G \"Unix Makefiles\" \\\n";
print $out " -DCMAKE_BUILD_TYPE=$cmake_build \\\n";
print $out " -DCMAKE_C_COMPILER=$comp_root/$cc \\\n";
print $out " -DCMAKE_CXX_COMPILER=$comp_root/$cxx \\\n";
print $out " -DCMAKE_CXX_FLAGS=\"$cxxflags\" \\\n" if $cxxflags;
print $out " -DCMAKE_EXE_LINKER_FLAGS=\"$cmake_exe_ldflags\" \\\n" if $cmake_exe_ldflags;
print $out " -DCDS_BIN_DIR=\$BIN_ROOT \\\n";
print $out " -DWITH_TESTS=ON \\\n";
print $out " -DWITH_ASAN=ON \\\n" if $build eq 'asan';
print $out " -DWITH_TSAN=ON \\\n" if $build eq 'tsan';
print $out " -DBOOST_ROOT=$boost \\\n";
print $out " -DBOOST_LIBRARYDIR=$boost/$boost_libs \\\n" if $boost_libs;
print $out " -DGTEST_INCLUDE_DIRS=$gtest_inc \\\n" if $gtest_inc;
print $out " -DGTEST_LIBRARIES=$gtest_lib \\\n" if $gtest_lib;
print $out " -DEXTERNAL_SYSTEM_LIBS=\"$ext_lib\" \\\n" if $ext_lib;
print $out " $cmake_flags \\\n" if $cmake_flags;
print $out " \$CDS_SOURCE && \\\n";
print $out "make -j $make_jobs \$* \n";
close $out;
chmod 0755, $filename;
sub get_prop($@)
{
my $what=shift;
my $key="$compiler-$bitness-$build-$what:";
my $grep = `grep -P $key $cds_libs`;
if ( $grep ) {
my @ret = $grep =~ /^$key\s+(\S.*\S*)\s+/;
return $ret[0] if @ret;
}
$key = "$compiler-$bitness-$what:";
my $grep = `grep -P $key $cds_libs`;
if ( $grep ) {
my @ret = $grep =~ /^$key\s+(\S.*\S*)\s+/;
return $ret[0] if @ret;
}
$key = "$compiler-$what:";
my $grep = `grep -P $key $cds_libs`;
if ( $grep ) {
my @ret = $grep =~ /^$key\s+(\S.*\S*)\s+/;
return $ret[0] if @ret;
}
}
sub get_gen_prop($@)
{
my $key=shift;
$key = "$key:";
my $grep = `grep -P $key $cds_libs`;
if ( $grep ) {
my @ret = $grep =~ /^$key\s+(\S.*\S*)\s+/;
return $ret[0] if @ret;
}
}

48
extern/libcds/build/CI/gen-all vendored Normal file
View File

@ -0,0 +1,48 @@
#! /bin/sh
./cmake-gen gcc-4.8 64 dbg
./cmake-gen gcc-4.8 64 rel
./cmake-gen gcc-4.9 64 dbg
./cmake-gen gcc-4.9 64 rel
./cmake-gen gcc-5 64 dbg
./cmake-gen gcc-5 64 rel
./cmake-gen gcc-5 64 tsan
./cmake-gen gcc-5 64 asan
./cmake-gen gcc-6 64 dbg
./cmake-gen gcc-6 64 rel
./cmake-gen gcc-6 64 tsan
./cmake-gen gcc-6 64 asan
./cmake-gen gcc-7 64 dbg
./cmake-gen gcc-7 64 rel
./cmake-gen gcc-7 64 tsan
./cmake-gen gcc-7 64 asan
./cmake-gen gcc-8 64 dbg
./cmake-gen gcc-8 64 rel
./cmake-gen gcc-8 64 tsan
./cmake-gen gcc-8 64 asan
./cmake-gen clang-3.6 64 dbg
./cmake-gen clang-3.6 64 rel
./cmake-gen clang-3.7 64 dbg
./cmake-gen clang-3.7 64 rel
./cmake-gen clang-3.8 64 dbg
./cmake-gen clang-3.8 64 rel
./cmake-gen clang-3.9 64 dbg
./cmake-gen clang-3.9 64 rel
./cmake-gen clang-3.9 64 asan
./cmake-gen clang-3.9 64 tsan
./cmake-gen clang-4 64 dbg
./cmake-gen clang-4 64 rel
./cmake-gen clang-4 64 asan
./cmake-gen clang-4 64 tsan
./cmake-gen clang-5 64 dbg
./cmake-gen clang-5 64 rel
./cmake-gen clang-5 64 asan
./cmake-gen clang-5 64 tsan
./cmake-gen clang-6 64 dbg
./cmake-gen clang-6 64 rel
./cmake-gen clang-6 64 asan
./cmake-gen clang-6 64 tsan
./cmake-gen clang-7 64 dbg
./cmake-gen clang-7 64 rel
./cmake-gen clang-7 64 asan
./cmake-gen clang-7 64 tsan

View File

@ -0,0 +1,31 @@
#!/bin/bash
set -e
set -x
if [[ "$(uname -s)" == 'Darwin' ]]; then
brew update || brew update
brew outdated pyenv || brew upgrade pyenv
brew install pyenv-virtualenv
brew install cmake || true
if which pyenv > /dev/null; then
eval "$(pyenv init -)"
fi
pyenv install 2.7.10
pyenv virtualenv 2.7.10 conan
pyenv rehash
pyenv activate conan
pip install conan --upgrade
pip install conan_package_tools
conan user
exit 0
fi
pip install --user conan --upgrade
pip install --user conan_package_tools
conan user

26
extern/libcds/build/CI/travis-ci/run.sh vendored Normal file
View File

@ -0,0 +1,26 @@
#!/bin/bash
set -e
set -x
CONAN_INSTALL_FLAGS="-s compiler.libcxx=libstdc++11"
if [[ "$(uname -s)" == 'Darwin' ]]; then
if which pyenv > /dev/null; then
eval "$(pyenv init -)"
fi
pyenv activate conan
CONAN_INSTALL_FLAGS=""
fi
#export CXX=$CXX_COMPILER
#export CC=$C_COMPILER
mkdir build-test && cd build-test
conan install --build $CONAN_INSTALL_FLAGS -s build_type=$BUILD_TYPE ..
cmake -DCMAKE_PREFIX_PATH="$TRAVIS_BUILD_DIR/build-test/deps" -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DCMAKE_EXE_LINKER_FLAGS=$LINKER_FLAGS -DWITH_TESTS=ON ..
cmake --build . -- -j2 $TARGET
if [[ "$(uname -s)" == 'Darwin' ]]; then
export DYLD_LIBRARY_PATH=$TRAVIS_BUILD_DIR/build-test/deps/lib
fi
ctest -VV -R $TARGET

View File

@ -0,0 +1,141 @@
# Source: https://github.com/axr/solar-cmake
# Based on the Qt 5 processor detection code, so should be very accurate
# https://qt.gitorious.org/qt/qtbase/blobs/master/src/corelib/global/qprocessordetection.h
# Currently handles arm (v5, v6, v7), x86 (32/64), ia64, and ppc (32/64)
# Regarding POWER/PowerPC, just as is noted in the Qt source,
# "There are many more known variants/revisions that we do not handle/detect."
set(archdetect_c_code "
#if defined(__arm__) || defined(__TARGET_ARCH_ARM)
#if defined(__ARM_ARCH_7__) \\
|| defined(__ARM_ARCH_7A__) \\
|| defined(__ARM_ARCH_7R__) \\
|| defined(__ARM_ARCH_7M__) \\
|| (defined(__TARGET_ARCH_ARM) && __TARGET_ARCH_ARM-0 >= 7)
#error cmake_ARCH armv7
#elif defined(__ARM_ARCH_6__) \\
|| defined(__ARM_ARCH_6J__) \\
|| defined(__ARM_ARCH_6T2__) \\
|| defined(__ARM_ARCH_6Z__) \\
|| defined(__ARM_ARCH_6K__) \\
|| defined(__ARM_ARCH_6ZK__) \\
|| defined(__ARM_ARCH_6M__) \\
|| (defined(__TARGET_ARCH_ARM) && __TARGET_ARCH_ARM-0 >= 6)
#error cmake_ARCH armv6
#elif defined(__ARM_ARCH_5TEJ__) \\
|| (defined(__TARGET_ARCH_ARM) && __TARGET_ARCH_ARM-0 >= 5)
#error cmake_ARCH armv5
#else
#error cmake_ARCH arm
#endif
#elif defined(__aarch64__)
#if defined(__ARM_ARCH) && __ARM_ARCH == 8
#error cmake_ARCH armv8
#else
#error cmake_ARCH arm64
#endif
#elif defined(__i386) || defined(__i386__) || defined(_M_IX86)
#error cmake_ARCH i386
#elif defined(__x86_64) || defined(__x86_64__) || defined(__amd64) || defined(_M_X64)
#error cmake_ARCH x86_64
#elif defined(__ia64) || defined(__ia64__) || defined(_M_IA64)
#error cmake_ARCH ia64
#elif defined(__ppc__) || defined(__ppc) || defined(__powerpc__) \\
|| defined(_ARCH_COM) || defined(_ARCH_PWR) || defined(_ARCH_PPC) \\
|| defined(_M_MPPC) || defined(_M_PPC)
#if defined(__ppc64__) || defined(__powerpc64__) || defined(__64BIT__)
#error cmake_ARCH ppc64
#else
#error cmake_ARCH ppc
#endif
#endif
#error cmake_ARCH unknown
")
# Set ppc_support to TRUE before including this file or ppc and ppc64
# will be treated as invalid architectures since they are no longer supported by Apple
function(target_architecture output_var)
if(APPLE AND CMAKE_OSX_ARCHITECTURES)
# On OS X we use CMAKE_OSX_ARCHITECTURES *if* it was set
# First let's normalize the order of the values
# Note that it's not possible to compile PowerPC applications if you are using
# the OS X SDK version 10.6 or later - you'll need 10.4/10.5 for that, so we
# disable it by default
# See this page for more information:
# http://stackoverflow.com/questions/5333490/how-can-we-restore-ppc-ppc64-as-well-as-full-10-4-10-5-sdk-support-to-xcode-4
# Architecture defaults to i386 or ppc on OS X 10.5 and earlier, depending on the CPU type detected at runtime.
# On OS X 10.6+ the default is x86_64 if the CPU supports it, i386 otherwise.
foreach(osx_arch ${CMAKE_OSX_ARCHITECTURES})
if("${osx_arch}" STREQUAL "ppc" AND ppc_support)
set(osx_arch_ppc TRUE)
elseif("${osx_arch}" STREQUAL "i386")
set(osx_arch_i386 TRUE)
elseif("${osx_arch}" STREQUAL "x86_64")
set(osx_arch_x86_64 TRUE)
elseif("${osx_arch}" STREQUAL "ppc64" AND ppc_support)
set(osx_arch_ppc64 TRUE)
else()
message(FATAL_ERROR "Invalid OS X arch name: ${osx_arch}")
endif()
endforeach()
# Now add all the architectures in our normalized order
if(osx_arch_ppc)
list(APPEND ARCH ppc)
endif()
if(osx_arch_i386)
list(APPEND ARCH i386)
endif()
if(osx_arch_x86_64)
list(APPEND ARCH x86_64)
endif()
if(osx_arch_ppc64)
list(APPEND ARCH ppc64)
endif()
else()
file(WRITE "${CMAKE_BINARY_DIR}/arch.c" "${archdetect_c_code}")
enable_language(C)
# Detect the architecture in a rather creative way...
# This compiles a small C program which is a series of ifdefs that selects a
# particular #error preprocessor directive whose message string contains the
# target architecture. The program will always fail to compile (both because
# file is not a valid C program, and obviously because of the presence of the
# #error preprocessor directives... but by exploiting the preprocessor in this
# way, we can detect the correct target architecture even when cross-compiling,
# since the program itself never needs to be run (only the compiler/preprocessor)
try_run(
run_result_unused
compile_result_unused
"${CMAKE_BINARY_DIR}"
"${CMAKE_BINARY_DIR}/arch.c"
COMPILE_OUTPUT_VARIABLE ARCH
CMAKE_FLAGS CMAKE_OSX_ARCHITECTURES=${CMAKE_OSX_ARCHITECTURES}
)
# Parse the architecture name from the compiler output
string(REGEX MATCH "cmake_ARCH ([a-zA-Z0-9_]+)" ARCH "${ARCH}")
# Get rid of the value marker leaving just the architecture name
string(REPLACE "cmake_ARCH " "" ARCH "${ARCH}")
# If we are compiling with an unknown architecture this variable should
# already be set to "unknown" but in the case that it's empty (i.e. due
# to a typo in the code), then set it to unknown
if (NOT ARCH)
set(ARCH unknown)
endif()
endif()
set(${output_var} "${ARCH}" PARENT_SCOPE)
endfunction()

View File

@ -0,0 +1 @@
libcds - Concurrent Data Structure C++ library

View File

@ -0,0 +1 @@
ldconfig

View File

@ -0,0 +1 @@
ldconfig

104
extern/libcds/build/cmake/readme.md vendored Normal file
View File

@ -0,0 +1,104 @@
Building library with CMake
===============
CDS suports both in-source and out-of-source cmake build types. Now project uses:
- CMake: general cross-platform building
- CTest: all unit tests can be run in a standard way by *ctest* command
- CPack: for making rpm/deb/nsys etc. packages
Compiling and testing
----------
**Building out-of-source in "RELEASE" mode ("DEBUG" is default)**
- Wherever create empty directory for building, for instance `libcds-debug`
- Prepare: `cmake -DCMAKE_BUILD_TYPE=RELEASE <path to the project's root directory with CMakeLists.txt>`
- Compile: `make -j4`
- As a result you'll see shared and static cds libraries in the build directory
**Warning**: We strongly recommend not to use static cds library. Static library is not tested and not maintained. You can use it on own risk.
After using command cmake -L <path to the project's root directory with CMakeLists.txt> one can see some additional variables, that can activate additional features:
- `WITH_TESTS:BOOL=OFF`: if you want to build library with unit testing support use *-DWITH_TESTS=ON* on prepare step. Be careful with this flag, because compile time will dramatically increase
- `WITH_TESTS_COVERAGE:BOOL=OFF`: Analyze test coverage using gcov (only for gcc)
- `WITH_BOOST_ATOMIC:BOOL=OFF`: Use boost atomics (only for boost >= 1.54)
- `WITH_ASAN:BOOL=OFF`: compile libcds with AddressSanitizer instrumentation
- `WITH_TSAN:BOOL=OFF`: compile libcds with ThreadSanitizer instrumentation
Additional gtest hints (for unit and stress tests only):
- `GTEST_INCLUDE_DIRS=path`: gives full `path` to gtest include dir.
- `GTEST_LIBRARY=path`: gives full `path` to `libgtest.a`.
Packaging
----------
In order to package library *CPack* is used, command *cpack -G <Generator>* should create correspondent packages for particular operating system. Now the project supports building the following package types:
- *RPM*: redhat-based linux distribs
- *DEB*: debian-based linux distribs
- *TGZ*: simple "*tgz*" archive with library and headers
- *NSYS*: windows installer package (NSYS should be installed)
"Live" building and packaging example
----------
- `git clone https://github.com/khizmax/libcds.git`
- `mkdir libcds-release`
- `cd libcds-release`
- `cmake -DWITH\_TESTS=ON -DCMAKE\_BUILD_TYPE=RELEASE ../libcds`
```
-- The C compiler identification is GNU 4.8.3
-- The CXX compiler identification is GNU 4.8.3
...
-- Found Threads: TRUE
-- Boost version: 1.54.0
-- Found the following Boost libraries:
-- system
-- thread
Build type -- RELEASE
-- Configuring done
-- Generating done
-- Build files have been written to: <...>/libcds-release
```
- `make -j4`
```
Scanning dependencies of target cds
Scanning dependencies of target test-common
Scanning dependencies of target cds-s
Scanning dependencies of target test-hdr-offsetof
[ 1%] Building CXX object CMakeFiles/cds-s.dir/src/hp_gc.cpp.o
...
[100%] Built target test-hdr
```
- `ctest`
```
Test project /home/kel/projects_cds/libcds-debug
Start 1: test-hdr
1/7 Test #1: test-hdr ......................... Passed 1352.24 sec
Start 2: cdsu-misc
2/7 Test #2: cdsu-misc ........................ Passed 0.00 sec
Start 3: cdsu-map
...
```
- `cpack -G RPM`
```
CPack: Create package using RPM
CPack: Install projects
CPack: - Run preinstall target for: cds
CPack: - Install project: cds
CPack: - Install component: devel
CPack: - Install component: lib
CPack: Create package
CPackRPM:Debug: Adding /usr/local to builtin omit list.
CPackRPM: Will use GENERATED spec file: /home/kel/projects_cds/libcds-debug/_CPack_Packages/Linux/RPM/SPECS/cds-devel.spec
CPackRPM: Will use GENERATED spec file: /home/kel/projects_cds/libcds-debug/_CPack_Packages/Linux/RPM/SPECS/cds-lib.spec
CPack: - package: /home/kel/projects_cds/libcds-debug/cds-2.1.0-1-devel.rpm generated.
CPack: - package: /home/kel/projects_cds/libcds-debug/cds-2.1.0-1-lib.rpm generated.
```
Future development
----------
- CDash: use CI system

496
extern/libcds/cds/algo/atomic.h vendored Normal file
View File

@ -0,0 +1,496 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_CXX11_ATOMIC_H
#define CDSLIB_CXX11_ATOMIC_H
#include <cds/details/defs.h>
#include <cds/user_setup/cache_line.h>
namespace cds {
/// C++11 Atomic library support
/** @anchor cds_cxx11_atomic
\p libcds can use the following implementations of the atomics:
- STL \p &lt;atomic&gt;. This is used by default
- \p boost.atomic for boost 1.54 and above. To use it you should define \p CDS_USE_BOOST_ATOMIC for
your compiler invocation, for example, for gcc specify \p -DCDS_USE_BOOST_ATOMIC
in command line
- \p libcds implementation of atomic operation according to C++11 standard as
specified in <a href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf">N3242, p.29</a>.
\p libcds implementation is not the full standard compliant, it provides only C++ part of standard,
for example, \p libcds has no static initialization of the atomic variables and some other C features.
However, that imlementation is enough for the library purposes. Supported architecture: x86, amd64,
ia64 (Itanium) 64bit, 64bit Sparc. To use \p libcds atomic you should define \p CDS_USE_LIBCDS_ATOMIC
in the compiler command line (\p -DCDS_USE_LIBCDS_ATOMIC for gcc/clang).
@note For Clang compiler \p libcds doesn't use native \p libc++ \p &lt;atomic&gt; due some problems.
Instead, \p libcds atomic is used by default, or you can try to use \p boost.atomic.
The library defines \p atomics alias for atomic namespace:
- <tt>namespace atomics = std</tt> for STL
- <tt>namespace atomics = boost</tt> for \p boost.atomic
- <tt>namespace atomics = cds::cxx11_atomic</tt> for library-provided atomic implementation
*/
namespace cxx11_atomic {
}} // namespace cds::cxx11_atomic
//@cond
#if defined(CDS_USE_BOOST_ATOMIC)
// boost atomic
# include <boost/version.hpp>
# if BOOST_VERSION >= 105400
# include <boost/atomic.hpp>
namespace atomics = boost;
# define CDS_CXX11_ATOMIC_BEGIN_NAMESPACE namespace boost {
# define CDS_CXX11_ATOMIC_END_NAMESPACE }
# else
# error "Boost version 1.54 or above is needed for boost.atomic"
# endif
#elif defined(CDS_USE_LIBCDS_ATOMIC)
// libcds atomic
# include <cds/compiler/cxx11_atomic.h>
namespace atomics = cds::cxx11_atomic;
# define CDS_CXX11_ATOMIC_BEGIN_NAMESPACE namespace cds { namespace cxx11_atomic {
# define CDS_CXX11_ATOMIC_END_NAMESPACE }}
#else
// Compiler provided C++11 atomic
# include <atomic>
namespace atomics = std;
# define CDS_CXX11_ATOMIC_BEGIN_NAMESPACE namespace std {
# define CDS_CXX11_ATOMIC_END_NAMESPACE }
#endif
//@endcond
namespace cds {
/// Atomic primitives
/**
This namespace contains useful primitives derived from <tt>std::atomic</tt>.
*/
namespace atomicity {
/// Atomic event counter.
/**
This class is based on <tt>std::atomic_size_t</tt>.
It uses relaxed memory ordering \p memory_order_relaxed and may be used as a statistic counter.
*/
class event_counter
{
//@cond
atomics::atomic_size_t m_counter;
//@endcond
public:
typedef size_t value_type ; ///< Type of counter
public:
// Initializes event counter with zero
event_counter() noexcept
: m_counter(size_t(0))
{}
/// Assign operator
/**
Returns \p n.
*/
value_type operator =(
value_type n ///< new value of the counter
) noexcept
{
m_counter.exchange( n, atomics::memory_order_relaxed );
return n;
}
/// Addition
/**
Returns new value of the atomic counter.
*/
size_t operator +=(
size_t n ///< addendum
) noexcept
{
return m_counter.fetch_add( n, atomics::memory_order_relaxed ) + n;
}
/// Substraction
/**
Returns new value of the atomic counter.
*/
size_t operator -=(
size_t n ///< subtrahend
) noexcept
{
return m_counter.fetch_sub( n, atomics::memory_order_relaxed ) - n;
}
/// Get current value of the counter
operator size_t () const noexcept
{
return m_counter.load( atomics::memory_order_relaxed );
}
/// Preincrement
size_t operator ++() noexcept
{
return m_counter.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
}
/// Postincrement
size_t operator ++(int) noexcept
{
return m_counter.fetch_add( 1, atomics::memory_order_relaxed );
}
/// Predecrement
size_t operator --() noexcept
{
return m_counter.fetch_sub( 1, atomics::memory_order_relaxed ) - 1;
}
/// Postdecrement
size_t operator --(int) noexcept
{
return m_counter.fetch_sub( 1, atomics::memory_order_relaxed );
}
/// Get current value of the counter
size_t get() const noexcept
{
return m_counter.load( atomics::memory_order_relaxed );
}
/// Resets the counter to 0
void reset() noexcept
{
m_counter.store( 0, atomics::memory_order_release );
}
};
/// Atomic item counter
/**
This class is simplified interface around \p std::atomic_size_t.
The class supports getting current value of the counter and increment/decrement its value.
See also: improved version that eliminates false sharing - \p cache_friendly_item_counter.
*/
class item_counter
{
public:
typedef atomics::atomic_size_t atomic_type; ///< atomic type used
typedef size_t counter_type; ///< Integral item counter type (size_t)
private:
//@cond
atomic_type m_Counter; ///< Atomic item counter
//@endcond
public:
/// Default ctor initializes the counter to zero.
item_counter()
: m_Counter(counter_type(0))
{}
/// Returns current value of the counter
counter_type value(atomics::memory_order order = atomics::memory_order_relaxed) const
{
return m_Counter.load( order );
}
/// Same as \ref value() with relaxed memory ordering
operator counter_type() const
{
return value();
}
/// Returns underlying atomic interface
atomic_type& getAtomic()
{
return m_Counter;
}
/// Returns underlying atomic interface (const)
const atomic_type& getAtomic() const
{
return m_Counter;
}
/// Increments the counter. Semantics: postincrement
counter_type inc(atomics::memory_order order = atomics::memory_order_relaxed )
{
return m_Counter.fetch_add( 1, order );
}
/// Increments the counter. Semantics: postincrement
counter_type inc( counter_type count, atomics::memory_order order = atomics::memory_order_relaxed )
{
return m_Counter.fetch_add( count, order );
}
/// Decrements the counter. Semantics: postdecrement
counter_type dec(atomics::memory_order order = atomics::memory_order_relaxed)
{
return m_Counter.fetch_sub( 1, order );
}
/// Decrements the counter. Semantics: postdecrement
counter_type dec( counter_type count, atomics::memory_order order = atomics::memory_order_relaxed )
{
return m_Counter.fetch_sub( count, order );
}
/// Preincrement
counter_type operator ++()
{
return inc() + 1;
}
/// Postincrement
counter_type operator ++(int)
{
return inc();
}
/// Predecrement
counter_type operator --()
{
return dec() - 1;
}
/// Postdecrement
counter_type operator --(int)
{
return dec();
}
/// Increment by \p count
counter_type operator +=( counter_type count )
{
return inc( count ) + count;
}
/// Decrement by \p count
counter_type operator -=( counter_type count )
{
return dec( count ) - count;
}
/// Resets count to 0
void reset(atomics::memory_order order = atomics::memory_order_relaxed)
{
m_Counter.store( 0, order );
}
};
#if CDS_COMPILER == CDS_COMPILER_CLANG
// CLang unhappy: pad1_ and pad2_ - unused private field warning
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wunused-private-field"
#endif
/// Atomic cache-friendly item counter
/**
Atomic item counter with cache-line padding to avoid false sharing.
Adding cache-line padding before and after atomic counter eliminates the contention
in read path of many containers and can notably improve search operations in sets/maps.
*/
class cache_friendly_item_counter
{
public:
typedef atomics::atomic_size_t atomic_type; ///< atomic type used
typedef size_t counter_type; ///< Integral item counter type (size_t)
private:
//@cond
char pad1_[cds::c_nCacheLineSize];
atomic_type m_Counter; ///< Atomic item counter
char pad2_[cds::c_nCacheLineSize - sizeof( atomic_type )];
//@endcond
public:
/// Default ctor initializes the counter to zero.
cache_friendly_item_counter()
: m_Counter(counter_type(0))
{}
/// Returns current value of the counter
counter_type value(atomics::memory_order order = atomics::memory_order_relaxed) const
{
return m_Counter.load( order );
}
/// Same as \ref value() with relaxed memory ordering
operator counter_type() const
{
return value();
}
/// Returns underlying atomic interface
atomic_type& getAtomic()
{
return m_Counter;
}
/// Returns underlying atomic interface (const)
const atomic_type& getAtomic() const
{
return m_Counter;
}
/// Increments the counter. Semantics: postincrement
counter_type inc(atomics::memory_order order = atomics::memory_order_relaxed )
{
return m_Counter.fetch_add( 1, order );
}
/// Increments the counter. Semantics: postincrement
counter_type inc( counter_type count, atomics::memory_order order = atomics::memory_order_relaxed )
{
return m_Counter.fetch_add( count, order );
}
/// Decrements the counter. Semantics: postdecrement
counter_type dec(atomics::memory_order order = atomics::memory_order_relaxed)
{
return m_Counter.fetch_sub( 1, order );
}
/// Decrements the counter. Semantics: postdecrement
counter_type dec( counter_type count, atomics::memory_order order = atomics::memory_order_relaxed )
{
return m_Counter.fetch_sub( count, order );
}
/// Preincrement
counter_type operator ++()
{
return inc() + 1;
}
/// Postincrement
counter_type operator ++(int)
{
return inc();
}
/// Predecrement
counter_type operator --()
{
return dec() - 1;
}
/// Postdecrement
counter_type operator --(int)
{
return dec();
}
/// Increment by \p count
counter_type operator +=( counter_type count )
{
return inc( count ) + count;
}
/// Decrement by \p count
counter_type operator -=( counter_type count )
{
return dec( count ) - count;
}
/// Resets count to 0
void reset(atomics::memory_order order = atomics::memory_order_relaxed)
{
m_Counter.store( 0, order );
}
};
#if CDS_COMPILER == CDS_COMPILER_CLANG
# pragma GCC diagnostic pop
#endif
/// Empty item counter
/**
This class may be used instead of \ref item_counter when you do not need full \ref item_counter interface.
All methods of the class is empty and returns 0.
The object of this class should not be used in data structure that behavior significantly depends on item counting
(for example, in many hash map implementation).
*/
class empty_item_counter {
public:
typedef size_t counter_type ; ///< Counter type
public:
/// Returns 0
static counter_type value(atomics::memory_order /*order*/ = atomics::memory_order_relaxed)
{
return 0;
}
/// Same as \ref value(), always returns 0.
operator counter_type() const
{
return value();
}
/// Dummy increment. Always returns 0
static counter_type inc(atomics::memory_order /*order*/ = atomics::memory_order_relaxed)
{
return 0;
}
/// Dummy increment. Always returns 0
static counter_type inc( counter_type /*count*/, atomics::memory_order /*order*/ = atomics::memory_order_relaxed )
{
return 0;
}
/// Dummy increment. Always returns 0
static counter_type dec(atomics::memory_order /*order*/ = atomics::memory_order_relaxed)
{
return 0;
}
/// Dummy increment. Always returns 0
static counter_type dec( counter_type /*count*/, atomics::memory_order /*order*/ = atomics::memory_order_relaxed )
{
return 0;
}
/// Dummy pre-increment. Always returns 0
counter_type operator ++() const
{
return 0;
}
/// Dummy post-increment. Always returns 0
counter_type operator ++(int) const
{
return 0;
}
/// Dummy pre-decrement. Always returns 0
counter_type operator --() const
{
return 0;
}
/// Dummy post-decrement. Always returns 0
counter_type operator --(int) const
{
return 0;
}
/// Dummy increment by \p count, always returns 0
counter_type operator +=( counter_type count )
{
CDS_UNUSED( count );
return 0;
}
/// Dummy decrement by \p count, always returns 0
counter_type operator -=( counter_type count )
{
CDS_UNUSED( count );
return 0;
}
/// Dummy function
static void reset(atomics::memory_order /*order*/ = atomics::memory_order_relaxed)
{}
};
} // namespace atomicity
} // namespace cds
#endif // #ifndef CDSLIB_CXX11_ATOMIC_H

View File

@ -0,0 +1,439 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_BACKOFF_STRATEGY_H
#define CDSLIB_BACKOFF_STRATEGY_H
/*
Filename: backoff_strategy.h
Created 2007.03.01 by Maxim Khiszinsky
Description:
Generic back-off strategies
Editions:
2007.03.01 Maxim Khiszinsky Created
2008.10.02 Maxim Khiszinsky Backoff action transfers from contructor to operator() for all backoff schemas
2009.09.10 Maxim Khiszinsky reset() function added
*/
#include <utility> // declval
#include <thread>
#include <chrono>
#include <cds/compiler/backoff.h>
namespace cds {
/// Different backoff schemes
/**
Back-off schema may be used in lock-free algorithms when the algorithm cannot perform some action because a conflict
with the other concurrent operation is encountered. In this case current thread can do another work or can call
processor's performance hint.
The interface of back-off strategy is following:
\code
struct backoff_strategy {
void operator()();
template <typename Predicate> bool operator()( Predicate pr );
void reset();
};
\endcode
\p operator() operator calls back-off strategy's action. It is main part of back-off strategy.
Interruptible back-off <tt>template < typename Predicate > bool operator()( Predicate pr )</tt>
allows to interrupt back-off spinning if \p pr predicate returns \p true.
\p Predicate is a functor with the following interface:
\code
struct predicate {
bool operator()();
};
\endcode
\p reset() function resets internal state of back-off strategy to initial state. It is required for some
back-off strategies, for example, exponential back-off.
*/
namespace backoff {
/// Empty backoff strategy. Do nothing
struct empty {
//@cond
void operator ()() const noexcept
{}
template <typename Predicate>
bool operator()(Predicate pr) const noexcept( noexcept(std::declval<Predicate>()()))
{
return pr();
}
static void reset() noexcept
{}
//@endcond
};
/// Switch to another thread (yield). Good for thread preemption architecture.
struct yield {
//@cond
void operator ()() const noexcept
{
std::this_thread::yield();
}
template <typename Predicate>
bool operator()(Predicate pr) const noexcept( noexcept(std::declval<Predicate>()()))
{
if ( pr())
return true;
operator()();
return false;
}
static void reset() noexcept
{}
//@endcond
};
/// Random pause
/**
This back-off strategy calls processor-specific pause hint instruction
if one is available for the processor architecture.
*/
struct pause {
//@cond
void operator ()() const noexcept
{
# ifdef CDS_backoff_hint_defined
platform::backoff_hint();
# endif
}
template <typename Predicate>
bool operator()(Predicate pr) const noexcept( noexcept(std::declval<Predicate>()()))
{
if ( pr())
return true;
operator()();
return false;
}
static void reset() noexcept
{}
//@endcond
};
/// Processor hint back-off
/**
This back-off schema calls performance hint instruction if it is available for current processor.
Otherwise, it calls \p nop.
*/
struct hint
{
//@cond
void operator ()() const noexcept
{
# if defined(CDS_backoff_hint_defined)
platform::backoff_hint();
# elif defined(CDS_backoff_nop_defined)
platform::backoff_nop();
# endif
}
template <typename Predicate>
bool operator()(Predicate pr) const noexcept(noexcept(std::declval<Predicate>()()))
{
if ( pr())
return true;
operator()();
return false;
}
static void reset() noexcept
{}
//@endcond
};
/// \p backoff::exponential const traits
struct exponential_const_traits
{
typedef hint fast_path_backoff; ///< Fast-path back-off strategy
typedef yield slow_path_backoff; ///< Slow-path back-off strategy
enum: size_t {
lower_bound = 16, ///< Minimum spinning limit
upper_bound = 16 * 1024 ///< Maximum spinning limit
};
};
/// \p nackoff::exponential runtime traits
struct exponential_runtime_traits
{
typedef hint fast_path_backoff; ///< Fast-path back-off strategy
typedef yield slow_path_backoff; ///< Slow-path back-off strategy
static size_t lower_bound; ///< Minimum spinning limit, default is 16
static size_t upper_bound; ///< Maximum spinning limit, default is 16*1024
};
/// Exponential back-off
/**
This back-off strategy is composite. It consists of \p SpinBkoff and \p YieldBkoff
back-off strategy. In first, the strategy tries to apply repeatedly \p SpinBkoff
(spinning phase) until internal counter of failed attempts reaches its maximum
spinning value. Then, the strategy transits to high-contention phase
where it applies \p YieldBkoff until \p reset() is called.
On each spinning iteration the internal spinning counter is doubled.
Selecting the best value for maximum spinning limit is platform and application specific task.
The limits are described by \p Traits template parameter.
There are two types of \p Traits:
- constant traits \p exponential_const_traits - specifies the lower and upper limits
as a compile-time constants; to change the limits you should recompile your application
- runtime traits \p exponential_runtime_traits - specifies the limits as \p s_nExpMin
and \p s_nExpMax variables which can be changed at runtime to tune back-off strategy.
The traits class must declare two data member:
- \p lower_bound - the lower bound of spinning loop
- \p upper_bound - the upper boudn of spinning loop
You may use \p Traits template parameter to separate back-off implementations.
For example, you may define two \p exponential back-offs that is the best for your task A and B:
\code
#include <cds/algo/backoff_strategy.h>
namespace bkoff = cds::backoff;
// the best bounds for task A
struct traits_A: public bkoff::exponential_const_traits
{
static size_t lower_bound;
static size_t upper_bound;
};
size_t traits_A::lower_bound = 1024;
size_t traits_A::upper_bound = 8 * 1024;
// the best bounds for task B
struct traits_B: public bkoff::exponential_const_traits
{
static size_t lower_bound;
static size_t upper_bound;
};
size_t traits_A::lower_bound = 16;
size_t traits_A::upper_bound = 1024;
// // define your back-off specialization
typedef bkoff::exponential<traits_A> expBackOffA;
typedef bkoff::exponential<traits_B> expBackOffB;
\endcode
*/
template <typename Traits = exponential_const_traits >
class exponential
{
public:
typedef Traits traits; ///< Traits
typedef typename traits::fast_path_backoff spin_backoff ; ///< spin (fast-path) back-off strategy
typedef typename traits::slow_path_backoff yield_backoff ; ///< yield (slow-path) back-off strategy
protected:
size_t m_nExpCur ; ///< Current spin counter in range [traits::s_nExpMin, traits::s_nExpMax]
spin_backoff m_bkSpin ; ///< Spinning (fast-path) phase back-off strategy
yield_backoff m_bkYield ; ///< Yield phase back-off strategy
public:
/// Default ctor
exponential() noexcept
: m_nExpCur( traits::lower_bound )
{}
//@cond
void operator ()() noexcept(noexcept(std::declval<spin_backoff>()()) && noexcept(std::declval<yield_backoff>()()))
{
if ( m_nExpCur <= traits::upper_bound ) {
for ( size_t n = 0; n < m_nExpCur; ++n )
m_bkSpin();
m_nExpCur *= 2;
}
else
m_bkYield();
}
template <typename Predicate>
bool operator()( Predicate pr ) noexcept( noexcept(std::declval<Predicate>()()) && noexcept(std::declval<spin_backoff>()()) && noexcept(std::declval<yield_backoff>()()))
{
if ( m_nExpCur <= traits::upper_bound ) {
for ( size_t n = 0; n < m_nExpCur; ++n ) {
if ( m_bkSpin(pr))
return true;
}
m_nExpCur *= 2;
}
else
return m_bkYield(pr);
return false;
}
void reset() noexcept( noexcept( std::declval<spin_backoff>().reset()) && noexcept( std::declval<yield_backoff>().reset()))
{
m_nExpCur = traits::lower_bound;
m_bkSpin.reset();
m_bkYield.reset();
}
//@endcond
};
//@cond
template <typename FastPathBkOff, typename SlowPathBkOff>
struct make_exponential
{
struct traits: public exponential_const_traits
{
typedef FastPathBkOff fast_path_backoff;
typedef SlowPathBkOff slow_path_backoff;
};
typedef exponential<traits> type;
};
template <typename FastPathBkOff, typename SlowPathBkOff>
using make_exponential_t = typename make_exponential<FastPathBkOff, SlowPathBkOff>::type;
//@endcond
/// Constant traits for \ref delay back-off strategy
struct delay_const_traits
{
typedef std::chrono::milliseconds duration_type; ///< Timeout type
enum: unsigned {
timeout = 5 ///< Delay timeout
};
};
/// Runtime traits for \ref delay back-off strategy
struct delay_runtime_traits
{
typedef std::chrono::milliseconds duration_type; ///< Timeout type
static unsigned timeout; ///< Delay timeout, default 5
};
/// Delay back-off strategy
/**
Template arguments:
- \p Duration - duration type, default is \p std::chrono::milliseconds
- \p Traits - a class that defines default timeout.
Choosing the best value for th timeout is platform and application specific task.
The default values for timeout is provided by \p Traits class that should
\p timeout data member. There are two predefined \p Traits implementation:
- \p delay_const_traits - defines \p timeout as a constant (enum).
To change timeout you should recompile your application.
- \p delay_runtime_traits - specifies timeout as static data member that can be changed
at runtime to tune the back-off strategy.
You may use \p Traits template parameter to separate back-off implementations.
For example, you may define two \p delay back-offs for 5 and 10 ms timeout:
\code
#include <cds/algo/backoff_strategy.h>
namespace bkoff = cds::backoff;
// 5ms delay
struct ms5
{
typedef std::chrono::milliseconds duration_type;
enum: unsigned { timeout = 5 };
};
// 10ms delay, runtime support
struct ms10
{
typedef std::chrono::milliseconds duration_type;
static unsigned timeout;
};
unsigned ms10::timeout = 10;
// define your back-off specialization
typedef bkoff::delay<std::chrono::milliseconds, ms5> delay5;
typedef bkoff::delay<std::chrono::milliseconds, ms10> delay10;
\endcode
*/
template <typename Traits = delay_const_traits>
class delay
{
public:
typedef Traits traits; ///< Traits
typedef typename Traits::duration_type duration_type; ///< Duration type (default \p std::chrono::milliseconds)
protected:
///@cond
duration_type const timeout;
///@endcond
public:
/// Default ctor takes the timeout from \p traits::timeout
delay() noexcept
: timeout( traits::timeout )
{}
/// Initializes timeout from \p nTimeout
constexpr explicit delay( unsigned int nTimeout ) noexcept
: timeout( nTimeout )
{}
//@cond
void operator()() const
{
std::this_thread::sleep_for( timeout );
}
template <typename Predicate>
bool operator()(Predicate pr) const
{
for ( unsigned int i = 0; i < traits::timeout; i += 2 ) {
if ( pr())
return true;
std::this_thread::sleep_for( duration_type( 2 ));
}
return false;
}
static void reset() noexcept
{}
//@endcond
};
//@cond
template <unsigned int Timeout, class Duration = std::chrono::milliseconds >
struct make_delay_of
{
struct traits {
typedef Duration duration_type;
enum: unsigned { timeout = Timeout };
};
typedef delay<traits> type;
};
//@endcond
/// Delay back-off strategy, template version
/**
This is a simplified version of \p backoff::delay class.
Template parameter \p Timeout sets a delay timeout of \p Duration unit.
*/
template <unsigned int Timeout, class Duration = std::chrono::milliseconds >
using delay_of = typename make_delay_of< Timeout, Duration >::type;
/// Default backoff strategy
typedef exponential<exponential_const_traits> Default;
/// Default back-off strategy for lock primitives
typedef exponential<exponential_const_traits> LockDefault;
} // namespace backoff
} // namespace cds
#endif // #ifndef CDSLIB_BACKOFF_STRATEGY_H

18
extern/libcds/cds/algo/base.h vendored Normal file
View File

@ -0,0 +1,18 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_ALGO_BASE_H
#define CDSLIB_ALGO_BASE_H
#include <cds/details/defs.h>
namespace cds {
/// Different approaches and techniques for supporting high-concurrent data structure
namespace algo {}
} // namespace cds
#endif // #ifndef CDSLIB_ALGO_BASE_H

159
extern/libcds/cds/algo/bit_reversal.h vendored Normal file
View File

@ -0,0 +1,159 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_ALGO_BIT_REVERSAL_H
#define CDSLIB_ALGO_BIT_REVERSAL_H
#include <cds/algo/base.h>
// Source: http://stackoverflow.com/questions/746171/best-algorithm-for-bit-reversal-from-msb-lsb-to-lsb-msb-in-c
namespace cds { namespace algo {
/// Bit reversal algorithms
namespace bit_reversal {
/// SWAR algorithm (source: http://aggregate.org/MAGIC/#Bit%20Reversal)
struct swar {
/// 32bit
uint32_t operator()( uint32_t x ) const
{
x = ( ( ( x & 0xaaaaaaaa ) >> 1 ) | ( ( x & 0x55555555 ) << 1 ));
x = ( ( ( x & 0xcccccccc ) >> 2 ) | ( ( x & 0x33333333 ) << 2 ));
x = ( ( ( x & 0xf0f0f0f0 ) >> 4 ) | ( ( x & 0x0f0f0f0f ) << 4 ));
x = ( ( ( x & 0xff00ff00 ) >> 8 ) | ( ( x & 0x00ff00ff ) << 8 ));
return( ( x >> 16 ) | ( x << 16 ));
}
/// 64bit
uint64_t operator()( uint64_t x ) const
{
return ( static_cast<uint64_t>( operator()( static_cast<uint32_t>( x ))) << 32 ) // low 32bit
| ( static_cast<uint64_t>( operator()( static_cast<uint32_t>( x >> 32 )))); // high 32bit
}
};
/// Lookup table algorithm
struct lookup {
/// 32bit
uint32_t operator()( uint32_t x ) const
{
static uint8_t const table[] = {
0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0, 0x30, 0xB0, 0x70, 0xF0,
0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8, 0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8,
0x04, 0x84, 0x44, 0xC4, 0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4,
0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC, 0x3C, 0xBC, 0x7C, 0xFC,
0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2, 0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2,
0x0A, 0x8A, 0x4A, 0xCA, 0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA,
0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6, 0x36, 0xB6, 0x76, 0xF6,
0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE, 0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE,
0x01, 0x81, 0x41, 0xC1, 0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1,
0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9, 0x39, 0xB9, 0x79, 0xF9,
0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5, 0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5,
0x0D, 0x8D, 0x4D, 0xCD, 0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD,
0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3, 0x33, 0xB3, 0x73, 0xF3,
0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB, 0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB,
0x07, 0x87, 0x47, 0xC7, 0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7,
0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF, 0x3F, 0xBF, 0x7F, 0xFF
};
static_assert( sizeof( table ) / sizeof( table[0] ) == 256, "Table size mismatch" );
return ( static_cast<uint32_t>( table[x & 0xff] ) << 24 ) |
( static_cast<uint32_t>( table[( x >> 8 ) & 0xff] ) << 16 ) |
( static_cast<uint32_t>( table[( x >> 16 ) & 0xff] ) << 8 ) |
( static_cast<uint32_t>( table[( x >> 24 ) & 0xff] ));
}
/// 64bit
uint64_t operator()( uint64_t x ) const
{
return ( static_cast<uint64_t>( operator()( static_cast<uint32_t>( x ))) << 32 ) |
static_cast<uint64_t>( operator()( static_cast<uint32_t>( x >> 32 )));
}
};
/// Mul-Div algorithm for 32bit architectire
/// Mul-Div algorithm
struct muldiv {
//@cond
static uint8_t muldiv32_byte( uint8_t b )
{
return static_cast<uint8_t>( ( ( b * 0x0802LU & 0x22110LU ) | ( b * 0x8020LU & 0x88440LU )) * 0x10101LU >> 16 );
}
static uint8_t muldiv64_byte( uint8_t b )
{
return static_cast<uint8_t>( ( b * 0x0202020202ULL & 0x010884422010ULL ) % 1023 );
}
// for 32bit architecture
static uint32_t muldiv32( uint32_t x )
{
return static_cast<uint32_t>( muldiv32_byte( static_cast<uint8_t>( x >> 24 )))
| ( static_cast<uint32_t>( muldiv32_byte( static_cast<uint8_t>( x >> 16 ))) << 8 )
| ( static_cast<uint32_t>( muldiv32_byte( static_cast<uint8_t>( x >> 8 ))) << 16 )
| ( static_cast<uint32_t>( muldiv32_byte( static_cast<uint8_t>( x ))) << 24 );
}
static uint64_t muldiv32( uint64_t x )
{
return static_cast<uint64_t>( muldiv32_byte( static_cast<uint8_t>( x >> 56 )))
| ( static_cast<uint64_t>( muldiv32_byte( static_cast<uint8_t>( x >> 48 ))) << 8 )
| ( static_cast<uint64_t>( muldiv32_byte( static_cast<uint8_t>( x >> 40 ))) << 16 )
| ( static_cast<uint64_t>( muldiv32_byte( static_cast<uint8_t>( x >> 32 ))) << 24 )
| ( static_cast<uint64_t>( muldiv32_byte( static_cast<uint8_t>( x >> 24 ))) << 32 )
| ( static_cast<uint64_t>( muldiv32_byte( static_cast<uint8_t>( x >> 16 ))) << 40 )
| ( static_cast<uint64_t>( muldiv32_byte( static_cast<uint8_t>( x >> 8 ))) << 48 )
| ( static_cast<uint64_t>( muldiv32_byte( static_cast<uint8_t>( x ))) << 56 );
}
/// for 64bit architectire
static uint32_t muldiv64( uint32_t x )
{
return static_cast<uint32_t>( muldiv64_byte( static_cast<uint8_t>( x >> 24 )))
| ( static_cast<uint32_t>( muldiv64_byte( static_cast<uint8_t>( x >> 16 ))) << 8 )
| ( static_cast<uint32_t>( muldiv64_byte( static_cast<uint8_t>( x >> 8 ))) << 16 )
| ( static_cast<uint32_t>( muldiv64_byte( static_cast<uint8_t>( x ))) << 24 );
}
static uint64_t muldiv64( uint64_t x )
{
return static_cast<uint64_t>( muldiv64_byte( static_cast<uint8_t>( x >> 56 )))
| ( static_cast<uint64_t>( muldiv64_byte( static_cast<uint8_t>( x >> 48 ))) << 8 )
| ( static_cast<uint64_t>( muldiv64_byte( static_cast<uint8_t>( x >> 40 ))) << 16 )
| ( static_cast<uint64_t>( muldiv64_byte( static_cast<uint8_t>( x >> 32 ))) << 24 )
| ( static_cast<uint64_t>( muldiv64_byte( static_cast<uint8_t>( x >> 24 ))) << 32 )
| ( static_cast<uint64_t>( muldiv64_byte( static_cast<uint8_t>( x >> 16 ))) << 40 )
| ( static_cast<uint64_t>( muldiv64_byte( static_cast<uint8_t>( x >> 8 ))) << 48 )
| ( static_cast<uint64_t>( muldiv64_byte( static_cast<uint8_t>( x ))) << 56 );
}
//@endcond
/// 32bit
uint32_t operator()( uint32_t x ) const
{
# if CDS_BUILD_BITS == 32
return muldiv32( x );
# else
return muldiv64( x );
# endif
}
/// 64bit
uint64_t operator()( uint64_t x ) const
{
# if CDS_BUILD_BITS == 32
return muldiv32( x );
# else
return muldiv64( x );
# endif
}
};
} // namespace bit_reversal
}} // namespace cds::algo
#endif // #ifndef CDSLIB_ALGO_BIT_REVERSAL_H

143
extern/libcds/cds/algo/bitop.h vendored Normal file
View File

@ -0,0 +1,143 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_BITOP_H
#define CDSLIB_BITOP_H
/*
Different bit algorithms:
LSB get least significant bit number
MSB get most significant bit number
bswap swap byte order of word
RBO reverse bit order of word
Editions:
2007.10.08 Maxim.Khiszinsky Created
*/
#include <cds/details/defs.h>
#include <cds/compiler/bitop.h>
namespace cds {
/// Bit operations
namespace bitop {
///@cond none
namespace details {
template <int> struct BitOps;
// 32-bit bit ops
template <> struct BitOps<4> {
typedef uint32_t TUInt;
static int MSB( TUInt x ) { return bitop::platform::msb32( x ); }
static int LSB( TUInt x ) { return bitop::platform::lsb32( x ); }
static int MSBnz( TUInt x ) { return bitop::platform::msb32nz( x ); }
static int LSBnz( TUInt x ) { return bitop::platform::lsb32nz( x ); }
static int SBC( TUInt x ) { return bitop::platform::sbc32( x ) ; }
static int ZBC( TUInt x ) { return bitop::platform::zbc32( x ) ; }
static TUInt RBO( TUInt x ) { return bitop::platform::rbo32( x ); }
static bool complement( TUInt& x, int nBit ) { return bitop::platform::complement32( &x, nBit ); }
static TUInt RandXorShift(TUInt x) { return bitop::platform::RandXorShift32(x); }
};
// 64-bit bit ops
template <> struct BitOps<8> {
typedef uint64_t TUInt;
static int MSB( TUInt x ) { return bitop::platform::msb64( x ); }
static int LSB( TUInt x ) { return bitop::platform::lsb64( x ); }
static int MSBnz( TUInt x ) { return bitop::platform::msb64nz( x ); }
static int LSBnz( TUInt x ) { return bitop::platform::lsb64nz( x ); }
static int SBC( TUInt x ) { return bitop::platform::sbc64( x ) ; }
static int ZBC( TUInt x ) { return bitop::platform::zbc64( x ) ; }
static TUInt RBO( TUInt x ) { return bitop::platform::rbo64( x ); }
static bool complement( TUInt& x, int nBit ) { return bitop::platform::complement64( &x, nBit ); }
static TUInt RandXorShift(TUInt x) { return bitop::platform::RandXorShift64(x); }
};
} // namespace details
//@endcond
/// Get least significant bit (LSB) number (1..32/64), 0 if nArg == 0
template <typename T>
static inline int LSB( T nArg )
{
return details::BitOps< sizeof(T) >::LSB( (typename details::BitOps<sizeof(T)>::TUInt) nArg );
}
/// Get least significant bit (LSB) number (0..31/63)
/**
Precondition: nArg != 0
*/
template <typename T>
static inline int LSBnz( T nArg )
{
assert( nArg != 0 );
return details::BitOps< sizeof(T) >::LSBnz( (typename details::BitOps<sizeof(T)>::TUInt) nArg );
}
/// Get most significant bit (MSB) number (1..32/64), 0 if nArg == 0
template <typename T>
static inline int MSB( T nArg )
{
return details::BitOps< sizeof(T) >::MSB( (typename details::BitOps<sizeof(T)>::TUInt) nArg );
}
/// Get most significant bit (MSB) number (0..31/63)
/**
Precondition: nArg != 0
*/
template <typename T>
static inline int MSBnz( T nArg )
{
assert( nArg != 0 );
return details::BitOps< sizeof(T) >::MSBnz( (typename details::BitOps<sizeof(T)>::TUInt) nArg );
}
/// Get non-zero bit count of a word
template <typename T>
static inline int SBC( T nArg )
{
return details::BitOps< sizeof(T) >::SBC( (typename details::BitOps<sizeof(T)>::TUInt) nArg );
}
/// Get zero bit count of a word
template <typename T>
static inline int ZBC( T nArg )
{
return details::BitOps< sizeof(T) >::ZBC( (typename details::BitOps<sizeof(T)>::TUInt) nArg );
}
/// Reverse bit order of \p nArg
template <typename T>
static inline T RBO( T nArg )
{
return (T) details::BitOps< sizeof(T) >::RBO( (typename details::BitOps<sizeof(T)>::TUInt) nArg );
}
/// Complement bit \p nBit in \p nArg
template <typename T>
static inline bool complement( T& nArg, int nBit )
{
return details::BitOps< sizeof(T) >::complement( reinterpret_cast< typename details::BitOps<sizeof(T)>::TUInt& >( nArg ), nBit );
}
/// Simple random number generator
template <typename T>
static inline T RandXorShift( T x)
{
return (T) details::BitOps< sizeof(T) >::RandXorShift(x);
}
} // namespace bitop
} //namespace cds
#endif // #ifndef CDSLIB_BITOP_H

61
extern/libcds/cds/algo/elimination.h vendored Normal file
View File

@ -0,0 +1,61 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_ALGO_ELIMINATION_H
#define CDSLIB_ALGO_ELIMINATION_H
#include <cds/algo/elimination_tls.h>
#include <cds/algo/elimination_opt.h>
#include <cds/algo/atomic.h>
#include <cds/threading/model.h>
namespace cds { namespace algo {
/// Elimination technique
/** @anchor cds_elimination_description
Elimination technique allows highly distributed coupling and execution of operations with reverse
semantics like the pushes and pops on a stack. If a push followed by a pop are performed
on a stack, the data structure's state does not change (similarly for a pop followed by a push).
This means that if one can cause pairs of pushes and pops to meet and pair up in
separate locations, the threads can exchange values without having to touch a centralized structure
since they have anyhow "eliminated" each other's effect on it. Elimination can be implemented
by using a collision array in which threads pick random locations in order to try and collide.
Pairs of threads that "collide" in some location run through a synchronization protocol,
and all such disjoint collisions can be performed in parallel. If a thread has not met another
in the selected location or if it met a thread with an operation that cannot be eliminated
(such as two push operations), an alternative scheme must be used.
*/
namespace elimination {
/// Base class describing an operation for eliminating
/**
This class contains some debugng info.
Actual operation descriptor depends on real container and its interface.
*/
struct operation_desc
{
record * pOwner; ///< Owner of the descriptor
};
/// Acquires elimination record for the current thread
template <typename OperationDesc>
static inline record * init_record( OperationDesc& op )
{
record& rec = cds::threading::elimination_record();
assert( rec.is_free());
op.pOwner = &rec;
rec.pOp = static_cast<operation_desc *>( &op );
return &rec;
}
/// Releases elimination record for the current thread
static inline void clear_record()
{
cds::threading::elimination_record().pOp = nullptr;
}
} // namespace elimination
}} // namespace cds::algo
#endif // CDSLIB_ALGO_ELIMINATION_H

View File

@ -0,0 +1,40 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_ALGO_ELIMINATION_OPT_H
#define CDSLIB_ALGO_ELIMINATION_OPT_H
#include <cds/details/defs.h>
namespace cds { namespace opt {
/// Enable \ref cds_elimination_description "elimination back-off" for the container
template <bool Enable>
struct enable_elimination {
//@cond
template <class Base> struct pack: public Base
{
static constexpr const bool enable_elimination = Enable;
};
//@endcond
};
/// \ref cds_elimination_description "Elimination back-off strategy" option setter
/**
Back-off strategy for elimination.
Usually, elimination back-off strategy is \p cds::backoff::delay.
*/
template <typename Type>
struct elimination_backoff {
//@cond
template <class Base> struct pack: public Base
{
typedef Type elimination_backoff;
};
//@endcond
};
}} // namespace cds::opt
#endif // #ifndef CDSLIB_ALGO_ELIMINATION_OPT_H

View File

@ -0,0 +1,37 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_ALGO_ELIMINATION_TLS_H
#define CDSLIB_ALGO_ELIMINATION_TLS_H
#include <cds/algo/base.h>
namespace cds { namespace algo { namespace elimination {
// Forwards
struct operation_desc;
/// Per-thread elimination record
/** @headerfile cds/algo/elimination.h
*/
struct record
{
operation_desc * pOp ; ///< Operation descriptor
/// Initialization
record()
: pOp( nullptr )
{}
/// Checks if the record is free
bool is_free() const
{
return pOp == nullptr;
}
};
}}} // cds::algo::elimination
#endif // #ifndef CDSLIB_ALGO_ELIMINATION_TLS_H

11
extern/libcds/cds/algo/flat_combining.h vendored Normal file
View File

@ -0,0 +1,11 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_ALGO_FLAT_COMBINING_H
#define CDSLIB_ALGO_FLAT_COMBINING_H
#include <cds/algo/flat_combining/kernel.h>
#endif // #ifndef CDSLIB_ALGO_FLAT_COMBINING_H

View File

@ -0,0 +1,67 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_ALGO_FLAT_COMBINING_DEFS_H
#define CDSLIB_ALGO_FLAT_COMBINING_DEFS_H
#include <cds/algo/atomic.h>
namespace cds { namespace algo { namespace flat_combining {
/// Special values of \p publication_record::nRequest
enum request_value
{
req_EmptyRecord, ///< Publication record is empty
req_Response, ///< Operation is done
req_Operation ///< First operation id for derived classes
};
/// \p publication_record state
enum record_state {
inactive, ///< Record is inactive
active, ///< Record is active
removed ///< Record should be removed
};
/// Record of publication list
/**
Each data structure based on flat combining contains a class derived from \p %publication_record
*/
struct publication_record {
atomics::atomic<unsigned int> nRequest; ///< Request field (depends on data structure)
atomics::atomic<unsigned int> nState; ///< Record state: inactive, active, removed
atomics::atomic<unsigned int> nAge; ///< Age of the record
atomics::atomic<publication_record *> pNext; ///< Next record in active publication list
atomics::atomic<publication_record *> pNextAllocated; ///< Next record in allocated publication list
/// Initializes publication record
publication_record()
: nRequest( req_EmptyRecord )
, nAge( 0 )
, pNext( nullptr )
, pNextAllocated( nullptr )
{
nState.store( inactive, atomics::memory_order_release );
}
/// Returns the value of \p nRequest field
unsigned int op( atomics::memory_order mo = atomics::memory_order_relaxed ) const
{
return nRequest.load( mo );
}
/// Checks if the operation is done
bool is_done() const
{
return nRequest.load( atomics::memory_order_relaxed ) == req_Response;
}
};
}}} // namespace cds::algo::flat_combining
#endif // CDSLIB_ALGO_FLAT_COMBINING_DEFS_H

View File

@ -0,0 +1,875 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_ALGO_FLAT_COMBINING_KERNEL_H
#define CDSLIB_ALGO_FLAT_COMBINING_KERNEL_H
#include <cds/algo/flat_combining/defs.h>
#include <cds/algo/flat_combining/wait_strategy.h>
#include <cds/sync/spinlock.h>
#include <cds/details/allocator.h>
#include <cds/opt/options.h>
#include <cds/algo/int_algo.h>
namespace cds { namespace algo {
/// @defgroup cds_flat_combining_intrusive Intrusive flat combining containers
/// @defgroup cds_flat_combining_container Non-intrusive flat combining containers
/// Flat combining
/**
@anchor cds_flat_combining_description
Flat combining (FC) technique is invented by Hendler, Incze, Shavit and Tzafrir in their paper
[2010] <i>"Flat Combining and the Synchronization-Parallelism Tradeoff"</i>.
The technique converts a sequential data structure to its concurrent implementation.
A few structures are added to the sequential implementation: a <i>global lock</i>,
a <i>count</i> of the number of combining passes, and a pointer to the <i>head</i>
of a <i>publication list</i>. The publication list is a list of thread-local records
of a size proportional to the number of threads that are concurrently accessing the shared object.
Each thread \p t accessing the structure to perform an invocation of some method \p f()
on the shared object executes the following sequence of steps:
<ol>
<li>Write the invocation opcode and parameters (if any) of the method \p f() to be applied
sequentially to the shared object in the <i>request</i> field of your thread local publication
record (there is no need to use a load-store memory barrier). The <i>request</i> field will later
be used to receive the response. If your thread local publication record is marked as active
continue to step 2, otherwise continue to step 5.</li>
<li>Check if the global lock is taken. If so (another thread is an active combiner), spin on the <i>request</i>
field waiting for a response to the invocation (one can add a yield at this point to allow other threads
on the same core to run). Once in a while while spinning check if the lock is still taken and that your
record is active (you may use any of \p wait_strategy instead of spinning). If your record is inactive proceed to step 5.
Once the response is available, reset the request field to null and return the response.</li>
<li>If the lock is not taken, attempt to acquire it and become a combiner. If you fail,
return to spinning in step 2.</li>
<li>Otherwise, you hold the lock and are a combiner.
<ul>
<li>Increment the combining pass count by one.</li>
<li>Execute a \p fc_apply() by traversing the publication list from the head,
combining all non-null method call invocations, setting the <i>age</i> of each of these records
to the current <i>count</i>, applying the combined method calls to the structure D, and returning
responses to all the invocations. This traversal is guaranteed to be wait-free.</li>
<li>If the <i>count</i> is such that a cleanup needs to be performed, traverse the publication
list from the <i>head</i>. Starting from the second item (we always leave the item pointed to
by the head in the list), remove from the publication list all records whose <i>age</i> is
much smaller than the current <i>count</i>. This is done by removing the node and marking it
as inactive.</li>
<li>Release the lock.</li>
</ul>
<li>If you have no thread local publication record allocate one, marked as active. If you already
have one marked as inactive, mark it as active. Execute a store-load memory barrier. Proceed to insert
the record into the list with a successful CAS to the <i>head</i>. Then proceed to step 1.</li>
</ol>
As the test results show, the flat combining technique is suitable for non-intrusive containers
like stack, queue, deque. For intrusive concurrent containers the flat combining demonstrates
less impressive results.
\ref cds_flat_combining_container "List of FC-based containers" in libcds.
\ref cds_flat_combining_intrusive "List of intrusive FC-based containers" in libcds.
*/
namespace flat_combining {
/// Flat combining internal statistics
template <typename Counter = cds::atomicity::event_counter >
struct stat
{
typedef Counter counter_type; ///< Event counter type
counter_type m_nOperationCount ; ///< How many operations have been performed
counter_type m_nCombiningCount ; ///< Combining call count
counter_type m_nCompactPublicationList; ///< Count of publication list compacting
counter_type m_nDeactivatePubRecord; ///< How many publication records were deactivated during compacting
counter_type m_nActivatePubRecord; ///< Count of publication record activating
counter_type m_nPubRecordCreated ; ///< Count of created publication records
counter_type m_nPubRecordDeleted ; ///< Count of deleted publication records
counter_type m_nPassiveWaitCall; ///< Count of passive waiting call (\p kernel::wait_for_combining())
counter_type m_nPassiveWaitIteration;///< Count of iteration inside passive waiting
counter_type m_nPassiveWaitWakeup; ///< Count of forcing wake-up of passive wait cycle
counter_type m_nInvokeExclusive; ///< Count of call \p kernel::invoke_exclusive()
counter_type m_nWakeupByNotifying; ///< How many times the passive thread be waked up by a notification
counter_type m_nPassiveToCombiner; ///< How many times the passive thread becomes the combiner
/// Returns current combining factor
/**
Combining factor is how many operations perform in one combine pass:
<tt>combining_factor := m_nOperationCount / m_nCombiningCount</tt>
*/
double combining_factor() const
{
return m_nCombiningCount.get() ? double( m_nOperationCount.get()) / m_nCombiningCount.get() : 0.0;
}
//@cond
void onOperation() { ++m_nOperationCount; }
void onCombining() { ++m_nCombiningCount; }
void onCompactPublicationList() { ++m_nCompactPublicationList; }
void onDeactivatePubRecord() { ++m_nDeactivatePubRecord; }
void onActivatePubRecord() { ++m_nActivatePubRecord; }
void onCreatePubRecord() { ++m_nPubRecordCreated; }
void onDeletePubRecord() { ++m_nPubRecordDeleted; }
void onPassiveWait() { ++m_nPassiveWaitCall; }
void onPassiveWaitIteration() { ++m_nPassiveWaitIteration; }
void onPassiveWaitWakeup() { ++m_nPassiveWaitWakeup; }
void onInvokeExclusive() { ++m_nInvokeExclusive; }
void onWakeupByNotifying() { ++m_nWakeupByNotifying; }
void onPassiveToCombiner() { ++m_nPassiveToCombiner; }
//@endcond
};
/// Flat combining dummy internal statistics
struct empty_stat
{
//@cond
void onOperation() const {}
void onCombining() const {}
void onCompactPublicationList() const {}
void onDeactivatePubRecord() const {}
void onActivatePubRecord() const {}
void onCreatePubRecord() const {}
void onDeletePubRecord() const {}
void onPassiveWait() const {}
void onPassiveWaitIteration() const {}
void onPassiveWaitWakeup() const {}
void onInvokeExclusive() const {}
void onWakeupByNotifying() const {}
void onPassiveToCombiner() const {}
//@endcond
};
/// Type traits of \ref kernel class
/**
You can define different type traits for \ref kernel
by specifying your struct based on \p %traits
or by using \ref make_traits metafunction.
*/
struct traits
{
typedef cds::sync::spin lock_type; ///< Lock type
typedef cds::algo::flat_combining::wait_strategy::backoff< cds::backoff::delay_of<2>> wait_strategy; ///< Wait strategy
typedef CDS_DEFAULT_ALLOCATOR allocator; ///< Allocator used for TLS data (allocating \p publication_record derivatives)
typedef empty_stat stat; ///< Internal statistics
typedef opt::v::relaxed_ordering memory_model; ///< /// C++ memory ordering model
};
/// Metafunction converting option list to traits
/**
\p Options are:
- \p opt::lock_type - mutex type, default is \p cds::sync::spin
- \p opt::wait_strategy - wait strategy, see \p wait_strategy namespace, default is \p wait_strategy::backoff.
- \p opt::allocator - allocator type, default is \ref CDS_DEFAULT_ALLOCATOR
- \p opt::stat - internal statistics, possible type: \ref stat, \ref empty_stat (the default)
- \p opt::memory_model - C++ memory ordering model.
List of all available memory ordering see \p opt::memory_model.
Default is \p cds::opt::v::relaxed_ordering
*/
template <typename... Options>
struct make_traits {
# ifdef CDS_DOXYGEN_INVOKED
typedef implementation_defined type ; ///< Metafunction result
# else
typedef typename cds::opt::make_options<
typename cds::opt::find_type_traits< traits, Options... >::type
,Options...
>::type type;
# endif
};
/// The kernel of flat combining
/**
Template parameters:
- \p PublicationRecord - a type derived from \ref publication_record
- \p Traits - a type traits of flat combining, default is \p flat_combining::traits.
\ref make_traits metafunction can be used to create type traits
The kernel object should be a member of a container class. The container cooperates with flat combining
kernel object. There are two ways to interact with the kernel:
- One-by-one processing the active records of the publication list. This mode provides by \p combine() function:
the container acquires its publication record by \p acquire_record(), fills its fields and calls
\p combine() function of its kernel object. If the current thread becomes a combiner, the kernel
calls \p fc_apply() function of the container for each active non-empty record. Then, the container
should release its publication record by \p release_record(). Only one pass through the publication
list is possible.
- Batch processing - \p batch_combine() function. It this mode the container obtains access
to entire publication list. This mode allows the container to perform an elimination, for example,
the stack can collide \p push() and \p pop() requests. The sequence of invocations is the following:
the container acquires its publication record by \p acquire_record(), fills its field and call
\p batch_combine() function of its kernel object. If the current thread becomes a combiner,
the kernel calls \p fc_process() function of the container passing two iterators pointing to
the begin and the end of publication list (see \ref iterator class). The iterators allow
multiple pass through active records of publication list. For each processed record the container
should call \p operation_done() function. On the end, the container should release
its record by \p release_record().
*/
template <
typename PublicationRecord
,typename Traits = traits
>
class kernel
{
public:
typedef Traits traits; ///< Type traits
typedef typename traits::lock_type global_lock_type; ///< Global lock type
typedef typename traits::wait_strategy wait_strategy; ///< Wait strategy type
typedef typename traits::allocator allocator; ///< Allocator type (used for allocating publication_record_type data)
typedef typename traits::stat stat; ///< Internal statistics
typedef typename traits::memory_model memory_model; ///< C++ memory model
typedef typename wait_strategy::template make_publication_record<PublicationRecord>::type publication_record_type; ///< Publication record type
protected:
//@cond
typedef cds::details::Allocator< publication_record_type, allocator > cxx11_allocator; ///< internal helper cds::details::Allocator
typedef std::lock_guard<global_lock_type> lock_guard;
//@endcond
protected:
atomics::atomic<unsigned int> m_nCount; ///< Total count of combining passes. Used as an age.
publication_record_type* m_pHead; ///< Head of active publication list
publication_record_type* m_pAllocatedHead; ///< Head of allocated publication list
boost::thread_specific_ptr< publication_record_type > m_pThreadRec; ///< Thread-local publication record
mutable global_lock_type m_Mutex; ///< Global mutex
mutable stat m_Stat; ///< Internal statistics
unsigned int const m_nCompactFactor; ///< Publication list compacting factor (the list will be compacted through \p %m_nCompactFactor combining passes)
unsigned int const m_nCombinePassCount; ///< Number of combining passes
wait_strategy m_waitStrategy; ///< Wait strategy
public:
/// Initializes the object
/**
Compact factor = 1024
Combiner pass count = 8
*/
kernel()
: kernel( 1024, 8 )
{}
/// Initializes the object
kernel(
unsigned int nCompactFactor ///< Publication list compacting factor (the list will be compacted through \p nCompactFactor combining passes)
,unsigned int nCombinePassCount ///< Number of combining passes for combiner thread
)
: m_nCount(0)
, m_pHead( nullptr )
, m_pAllocatedHead( nullptr )
, m_pThreadRec( tls_cleanup )
, m_nCompactFactor( static_cast<unsigned>( cds::beans::ceil2( static_cast<size_t>( nCompactFactor )) - 1 )) // binary mask
, m_nCombinePassCount( nCombinePassCount )
{
assert( m_pThreadRec.get() == nullptr );
publication_record_type* pRec = cxx11_allocator().New();
m_pAllocatedHead =
m_pHead = pRec;
m_pThreadRec.reset( pRec );
m_Stat.onCreatePubRecord();
}
/// Destroys the object and all publication records
~kernel()
{
m_pThreadRec.reset(); // calls tls_cleanup()
// delete all publication records
for ( publication_record* p = m_pAllocatedHead; p; ) {
publication_record * pRec = p;
p = p->pNextAllocated.load( memory_model::memory_order_relaxed );
free_publication_record( static_cast<publication_record_type *>( pRec ));
}
}
/// Gets publication list record for the current thread
/**
If there is no publication record for the current thread
the function allocates it.
*/
publication_record_type * acquire_record()
{
publication_record_type * pRec = m_pThreadRec.get();
if ( !pRec ) {
// Allocate new publication record
pRec = cxx11_allocator().New();
m_pThreadRec.reset( pRec );
m_Stat.onCreatePubRecord();
// Insert in allocated list
assert( m_pAllocatedHead != nullptr );
publication_record* p = m_pAllocatedHead->pNextAllocated.load( memory_model::memory_order_relaxed );
do {
pRec->pNextAllocated.store( p, memory_model::memory_order_release );
} while ( !m_pAllocatedHead->pNextAllocated.compare_exchange_weak( p, pRec, memory_model::memory_order_release, atomics::memory_order_acquire ));
publish( pRec );
}
else if ( pRec->nState.load( memory_model::memory_order_acquire ) != active )
publish( pRec );
assert( pRec->op() == req_EmptyRecord );
return pRec;
}
/// Marks publication record for the current thread as empty
void release_record( publication_record_type * pRec )
{
assert( pRec->is_done());
pRec->nRequest.store( req_EmptyRecord, memory_model::memory_order_release );
}
/// Trying to execute operation \p nOpId
/**
\p pRec is the publication record acquiring by \ref acquire_record earlier.
\p owner is a container that is owner of flat combining kernel object.
As a result the current thread can become a combiner or can wait for
another combiner performs \p pRec operation.
If the thread becomes a combiner, the kernel calls \p owner.fc_apply
for each active non-empty publication record.
*/
template <class Container>
void combine( unsigned int nOpId, publication_record_type * pRec, Container& owner )
{
assert( nOpId >= req_Operation );
assert( pRec );
pRec->nRequest.store( nOpId, memory_model::memory_order_release );
m_Stat.onOperation();
try_combining( owner, pRec );
}
/// Trying to execute operation \p nOpId in batch-combine mode
/**
\p pRec is the publication record acquiring by \p acquire_record() earlier.
\p owner is a container that owns flat combining kernel object.
As a result the current thread can become a combiner or can wait for
another combiner performs \p pRec operation.
If the thread becomes a combiner, the kernel calls \p owner.fc_process()
giving the container the full access over publication list. This function
is useful for an elimination technique if the container supports any kind of
that. The container can perform multiple pass through publication list.
\p owner.fc_process() has two arguments - forward iterators on begin and end of
publication list, see \ref iterator class. For each processed record the container
should call \p operation_done() function to mark the record as processed.
On the end of \p %batch_combine the \p combine() function is called
to process rest of publication records.
*/
template <class Container>
void batch_combine( unsigned int nOpId, publication_record_type* pRec, Container& owner )
{
assert( nOpId >= req_Operation );
assert( pRec );
pRec->nRequest.store( nOpId, memory_model::memory_order_release );
m_Stat.onOperation();
try_batch_combining( owner, pRec );
}
/// Invokes \p Func in exclusive mode
/**
Some operation in flat combining containers should be called in exclusive mode
i.e the current thread should become the combiner to process the operation.
The typical example is \p empty() function.
\p %invoke_exclusive() allows do that: the current thread becomes the combiner,
invokes \p f exclusively but unlike a typical usage the thread does not process any pending request.
Instead, after end of \p f call the current thread wakes up a pending thread if any.
*/
template <typename Func>
void invoke_exclusive( Func f )
{
{
lock_guard l( m_Mutex );
f();
}
m_waitStrategy.wakeup( *this );
m_Stat.onInvokeExclusive();
}
/// Marks \p rec as executed
/**
This function should be called by container if \p batch_combine() mode is used.
For usual combining (see \p combine()) this function is excess.
*/
void operation_done( publication_record& rec )
{
rec.nRequest.store( req_Response, memory_model::memory_order_release );
m_waitStrategy.notify( *this, static_cast<publication_record_type&>( rec ));
}
/// Internal statistics
stat const& statistics() const
{
return m_Stat;
}
//@cond
// For container classes based on flat combining
stat& internal_statistics() const
{
return m_Stat;
}
//@endcond
/// Returns the compact factor
unsigned int compact_factor() const
{
return m_nCompactFactor + 1;
}
/// Returns number of combining passes for combiner thread
unsigned int combine_pass_count() const
{
return m_nCombinePassCount;
}
public:
/// Publication list iterator
/**
Iterators are intended for batch processing by container's
\p fc_process function.
The iterator allows iterate through active publication list.
*/
class iterator
{
//@cond
friend class kernel;
publication_record_type * m_pRec;
//@endcond
protected:
//@cond
iterator( publication_record_type * pRec )
: m_pRec( pRec )
{
skip_inactive();
}
void skip_inactive()
{
while ( m_pRec && (m_pRec->nState.load( memory_model::memory_order_acquire ) != active
|| m_pRec->op( memory_model::memory_order_relaxed) < req_Operation ))
{
m_pRec = static_cast<publication_record_type*>(m_pRec->pNext.load( memory_model::memory_order_acquire ));
}
}
//@endcond
public:
/// Initializes an empty iterator object
iterator()
: m_pRec( nullptr )
{}
/// Copy ctor
iterator( iterator const& src )
: m_pRec( src.m_pRec )
{}
/// Pre-increment
iterator& operator++()
{
assert( m_pRec );
m_pRec = static_cast<publication_record_type *>( m_pRec->pNext.load( memory_model::memory_order_acquire ));
skip_inactive();
return *this;
}
/// Post-increment
iterator operator++(int)
{
assert( m_pRec );
iterator it(*this);
++(*this);
return it;
}
/// Dereference operator, can return \p nullptr
publication_record_type* operator ->()
{
return m_pRec;
}
/// Dereference operator, the iterator should not be an end iterator
publication_record_type& operator*()
{
assert( m_pRec );
return *m_pRec;
}
/// Iterator equality
friend bool operator==( iterator it1, iterator it2 )
{
return it1.m_pRec == it2.m_pRec;
}
/// Iterator inequality
friend bool operator!=( iterator it1, iterator it2 )
{
return !( it1 == it2 );
}
};
/// Returns an iterator to the first active publication record
iterator begin() { return iterator(m_pHead); }
/// Returns an iterator to the end of publication list. Should not be dereferenced.
iterator end() { return iterator(); }
public:
/// Gets current value of \p rec.nRequest
/**
This function is intended for invoking from a wait strategy
*/
int get_operation( publication_record& rec )
{
return rec.op( memory_model::memory_order_acquire );
}
/// Wakes up any waiting thread
/**
This function is intended for invoking from a wait strategy
*/
void wakeup_any()
{
publication_record* pRec = m_pHead;
while ( pRec ) {
if ( pRec->nState.load( memory_model::memory_order_acquire ) == active
&& pRec->op( memory_model::memory_order_acquire ) >= req_Operation )
{
m_waitStrategy.notify( *this, static_cast<publication_record_type&>( *pRec ));
break;
}
pRec = pRec->pNext.load( memory_model::memory_order_acquire );
}
}
private:
//@cond
static void tls_cleanup( publication_record_type* pRec )
{
// Thread done
// pRec that is TLS data should be excluded from publication list
pRec->nState.store( removed, memory_model::memory_order_release );
}
void free_publication_record( publication_record_type* pRec )
{
cxx11_allocator().Delete( pRec );
m_Stat.onDeletePubRecord();
}
void publish( publication_record_type* pRec )
{
assert( pRec->nState.load( memory_model::memory_order_relaxed ) == inactive );
pRec->nAge.store( m_nCount.load(memory_model::memory_order_relaxed), memory_model::memory_order_relaxed );
pRec->nState.store( active, memory_model::memory_order_relaxed );
// Insert record to publication list
if ( m_pHead != static_cast<publication_record *>(pRec)) {
publication_record * p = m_pHead->pNext.load( memory_model::memory_order_relaxed );
if ( p != static_cast<publication_record *>( pRec )) {
do {
pRec->pNext.store( p, memory_model::memory_order_release );
// Failed CAS changes p
} while ( !m_pHead->pNext.compare_exchange_weak( p, static_cast<publication_record *>(pRec),
memory_model::memory_order_release, atomics::memory_order_acquire ));
m_Stat.onActivatePubRecord();
}
}
}
void republish( publication_record_type* pRec )
{
if ( pRec->nState.load( memory_model::memory_order_relaxed ) != active ) {
// The record has been excluded from publication list. Reinsert it
publish( pRec );
}
}
template <class Container>
void try_combining( Container& owner, publication_record_type* pRec )
{
if ( m_Mutex.try_lock()) {
// The thread becomes a combiner
lock_guard l( m_Mutex, std::adopt_lock_t());
// The record pRec can be excluded from publication list. Re-publish it
republish( pRec );
combining( owner );
assert( pRec->op( memory_model::memory_order_relaxed ) == req_Response );
}
else {
// There is another combiner, wait while it executes our request
if ( !wait_for_combining( pRec )) {
// The thread becomes a combiner
lock_guard l( m_Mutex, std::adopt_lock_t());
// The record pRec can be excluded from publication list. Re-publish it
republish( pRec );
combining( owner );
assert( pRec->op( memory_model::memory_order_relaxed ) == req_Response );
}
}
}
template <class Container>
void try_batch_combining( Container& owner, publication_record_type * pRec )
{
if ( m_Mutex.try_lock()) {
// The thread becomes a combiner
lock_guard l( m_Mutex, std::adopt_lock_t());
// The record pRec can be excluded from publication list. Re-publish it
republish( pRec );
batch_combining( owner );
assert( pRec->op( memory_model::memory_order_relaxed ) == req_Response );
}
else {
// There is another combiner, wait while it executes our request
if ( !wait_for_combining( pRec )) {
// The thread becomes a combiner
lock_guard l( m_Mutex, std::adopt_lock_t());
// The record pRec can be excluded from publication list. Re-publish it
republish( pRec );
batch_combining( owner );
assert( pRec->op( memory_model::memory_order_relaxed ) == req_Response );
}
}
}
template <class Container>
void combining( Container& owner )
{
// The thread is a combiner
assert( !m_Mutex.try_lock());
unsigned int const nCurAge = m_nCount.fetch_add( 1, memory_model::memory_order_relaxed ) + 1;
unsigned int nEmptyPassCount = 0;
unsigned int nUsefulPassCount = 0;
for ( unsigned int nPass = 0; nPass < m_nCombinePassCount; ++nPass ) {
if ( combining_pass( owner, nCurAge ))
++nUsefulPassCount;
else if ( ++nEmptyPassCount > nUsefulPassCount )
break;
}
m_Stat.onCombining();
if ( ( nCurAge & m_nCompactFactor ) == 0 )
compact_list( nCurAge );
}
template <class Container>
bool combining_pass( Container& owner, unsigned int nCurAge )
{
publication_record* p = m_pHead;
bool bOpDone = false;
while ( p ) {
switch ( p->nState.load( memory_model::memory_order_acquire )) {
case active:
if ( p->op( memory_model::memory_order_acquire ) >= req_Operation ) {
p->nAge.store( nCurAge, memory_model::memory_order_relaxed );
owner.fc_apply( static_cast<publication_record_type*>( p ));
operation_done( *p );
bOpDone = true;
}
break;
case inactive:
// Only m_pHead can be inactive in the publication list
assert( p == m_pHead );
break;
case removed:
// Such record will be removed on compacting phase
break;
default:
/// ??? That is impossible
assert( false );
}
p = p->pNext.load( memory_model::memory_order_acquire );
}
return bOpDone;
}
template <class Container>
void batch_combining( Container& owner )
{
// The thread is a combiner
assert( !m_Mutex.try_lock());
unsigned int const nCurAge = m_nCount.fetch_add( 1, memory_model::memory_order_relaxed ) + 1;
for ( unsigned int nPass = 0; nPass < m_nCombinePassCount; ++nPass )
owner.fc_process( begin(), end());
combining_pass( owner, nCurAge );
m_Stat.onCombining();
if ( ( nCurAge & m_nCompactFactor ) == 0 )
compact_list( nCurAge );
}
bool wait_for_combining( publication_record_type* pRec )
{
m_waitStrategy.prepare( *pRec );
m_Stat.onPassiveWait();
while ( pRec->op( memory_model::memory_order_acquire ) != req_Response ) {
// The record can be excluded from publication list. Reinsert it
republish( pRec );
m_Stat.onPassiveWaitIteration();
// Wait while operation processing
if ( m_waitStrategy.wait( *this, *pRec ))
m_Stat.onWakeupByNotifying();
if ( m_Mutex.try_lock()) {
if ( pRec->op( memory_model::memory_order_acquire ) == req_Response ) {
// Operation is done
m_Mutex.unlock();
// Wake up a pending threads
m_waitStrategy.wakeup( *this );
m_Stat.onPassiveWaitWakeup();
break;
}
// The thread becomes a combiner
m_Stat.onPassiveToCombiner();
return false;
}
}
return true;
}
void compact_list( unsigned int nCurAge )
{
// Compacts publication list
// This function is called only by combiner thread
try_again:
publication_record * pPrev = m_pHead;
for ( publication_record * p = pPrev->pNext.load( memory_model::memory_order_acquire ); p; ) {
switch ( p->nState.load( memory_model::memory_order_relaxed )) {
case active:
if ( p->nAge.load( memory_model::memory_order_relaxed ) + m_nCompactFactor < nCurAge )
{
publication_record * pNext = p->pNext.load( memory_model::memory_order_relaxed );
if ( pPrev->pNext.compare_exchange_strong( p, pNext,
memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
{
p->nState.store( inactive, memory_model::memory_order_release );
p = pNext;
m_Stat.onDeactivatePubRecord();
continue;
}
}
break;
case removed:
publication_record * pNext = p->pNext.load( memory_model::memory_order_acquire );
if ( cds_likely( pPrev->pNext.compare_exchange_strong( p, pNext, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))) {
p = pNext;
continue;
}
else {
// CAS can be failed only in beginning of list
assert( pPrev == m_pHead );
goto try_again;
}
}
pPrev = p;
p = p->pNext.load( memory_model::memory_order_acquire );
}
// Iterate over allocated list to find removed records
pPrev = m_pAllocatedHead;
for ( publication_record * p = pPrev->pNextAllocated.load( memory_model::memory_order_acquire ); p; ) {
if ( p->nState.load( memory_model::memory_order_relaxed ) == removed ) {
publication_record * pNext = p->pNextAllocated.load( memory_model::memory_order_relaxed );
if ( pPrev->pNextAllocated.compare_exchange_strong( p, pNext, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
free_publication_record( static_cast<publication_record_type *>( p ));
p = pNext;
continue;
}
}
pPrev = p;
p = p->pNextAllocated.load( memory_model::memory_order_relaxed );
}
m_Stat.onCompactPublicationList();
}
//@endcond
};
//@cond
class container
{
public:
template <typename PubRecord>
void fc_apply( PubRecord * )
{
assert( false );
}
template <typename Iterator>
void fc_process( Iterator, Iterator )
{
assert( false );
}
};
//@endcond
} // namespace flat_combining
}} // namespace cds::algo
/*
CppMem model (http://svr-pes20-cppmem.cl.cam.ac.uk/cppmem/)
// Combiner thread - slave (waiting) thread
int main() {
atomic_int y = 0; // pRec->op
int x = 0; // pRec->data
{{{
{ // slave thread (not combiner)
// Op data
x = 1;
// Annotate request (op)
y.store(1, release);
// Wait while request done
y.load(acquire).readsvalue(2);
// Read result
r2=x;
}
|||
{ // Combiner thread
// Read request (op)
r1=y.load(acquire).readsvalue(1);
// Execute request - change request data
x = 2;
// store "request processed" flag (pRec->op := req_Response)
y.store(2, release);
}
}}};
return 0;
}
*/
#endif // #ifndef CDSLIB_ALGO_FLAT_COMBINING_KERNEL_H

View File

@ -0,0 +1,417 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_ALGO_FLAT_COMBINING_WAIT_STRATEGY_H
#define CDSLIB_ALGO_FLAT_COMBINING_WAIT_STRATEGY_H
#include <cds/algo/flat_combining/defs.h>
#include <cds/algo/backoff_strategy.h>
#include <mutex>
#include <condition_variable>
#include <boost/thread/tss.hpp> // thread_specific_ptr
namespace cds { namespace opt {
/// Wait strategy option for \p flat_combining::kernel
template <typename Strategy>
struct wait_strategy {
//@cond
template <typename Base> struct pack: public Base
{
typedef Strategy wait_strategy;
};
//@endcond
};
}} // namespace cds::opt
namespace cds { namespace algo { namespace flat_combining {
/// Wait strategies for \p flat_combining technique
/**
Wait strategy specifies how a thread waits until its request is performed by the combiner.
See \p wait_strategy::empty wait strategy to explain the interface.
*/
namespace wait_strategy {
/// Empty wait strategy
/**
Empty wait strategy is just spinning on request field.
All functions are empty.
*/
struct empty
{
/// Metafunction for defining a publication record for flat combining technique
/**
Any wait strategy may expand the publication record for storing
its own private data.
\p PublicationRecord is the type specified by \p flat_combining::kernel.
- If the strategy has no thread-private data, it should typedef \p PublicationRecord
as a return \p type of metafunction.
- Otherwise, if the strategy wants to store anything in thread-local data,
it should expand \p PublicationRecord, for example:
\code
template <typename PublicationRecord>
struct make_publication_record {
struct type: public PublicationRecord
{
int strategy_data;
};
};
\endcode
*/
template <typename PublicationRecord>
struct make_publication_record {
typedef PublicationRecord type; ///< Metafunction result
};
/// Prepares the strategy
/**
This function is called before enter to waiting cycle.
Some strategies need to prepare its thread-local data in \p rec.
\p PublicationRecord is thread's publication record of type \p make_publication_record::type
*/
template <typename PublicationRecord>
void prepare( PublicationRecord& rec )
{
CDS_UNUSED( rec );
}
/// Waits for the combiner
/**
The thread calls this function to wait for the combiner process
the request.
The function returns \p true if the thread was waked up by the combiner,
otherwise it should return \p false.
\p FCKernel is a \p flat_combining::kernel object,
\p PublicationRecord is thread's publication record of type \p make_publication_record::type
*/
template <typename FCKernel, typename PublicationRecord>
bool wait( FCKernel& fc, PublicationRecord& rec )
{
CDS_UNUSED( fc );
CDS_UNUSED( rec );
return false;
}
/// Wakes up the thread
/**
The combiner calls \p %notify() when it has been processed the request.
\p FCKernel is a \p flat_combining::kernel object,
\p PublicationRecord is thread's publication record of type \p make_publication_record::type
*/
template <typename FCKernel, typename PublicationRecord>
void notify( FCKernel& fc, PublicationRecord& rec )
{
CDS_UNUSED( fc );
CDS_UNUSED( rec );
}
/// Moves control to other thread
/**
This function is called when the thread becomes the combiner
but the request of the thread is already processed.
The strategy may call \p fc.wakeup_any() instructs the kernel
to wake up any pending thread.
\p FCKernel is a \p flat_combining::kernel object,
*/
template <typename FCKernel>
void wakeup( FCKernel& fc )
{
CDS_UNUSED( fc );
}
};
/// Back-off wait strategy
/**
Template argument \p Backoff specifies back-off strategy, default is cds::backoff::delay_of<2>
*/
template <typename BackOff = cds::backoff::delay_of<2>>
struct backoff
{
typedef BackOff back_off; ///< Back-off strategy
/// Incorporates back-off strategy into publication record
template <typename PublicationRecord>
struct make_publication_record
{
//@cond
struct type: public PublicationRecord
{
back_off bkoff;
};
//@endcond
};
/// Resets back-off strategy in \p rec
template <typename PublicationRecord>
void prepare( PublicationRecord& rec )
{
rec.bkoff.reset();
}
/// Calls back-off strategy
template <typename FCKernel, typename PublicationRecord>
bool wait( FCKernel& /*fc*/, PublicationRecord& rec )
{
rec.bkoff();
return false;
}
/// Does nothing
template <typename FCKernel, typename PublicationRecord>
void notify( FCKernel& /*fc*/, PublicationRecord& /*rec*/ )
{}
/// Does nothing
template <typename FCKernel>
void wakeup( FCKernel& )
{}
};
/// Wait strategy based on the single mutex and the condition variable
/**
The strategy shares the mutex and conditional variable for all thread.
Template parameter \p Milliseconds specifies waiting duration;
the minimal value is 1.
*/
template <int Milliseconds = 2>
class single_mutex_single_condvar
{
//@cond
std::mutex m_mutex;
std::condition_variable m_condvar;
bool m_wakeup;
typedef std::unique_lock< std::mutex > unique_lock;
//@endcond
public:
enum {
c_nWaitMilliseconds = Milliseconds < 1 ? 1 : Milliseconds ///< Waiting duration
};
/// Empty metafunction
template <typename PublicationRecord>
struct make_publication_record {
typedef PublicationRecord type; ///< publication record type
};
/// Default ctor
single_mutex_single_condvar()
: m_wakeup( false )
{}
/// Does nothing
template <typename PublicationRecord>
void prepare( PublicationRecord& /*rec*/ )
{}
/// Sleeps on condition variable waiting for notification from combiner
template <typename FCKernel, typename PublicationRecord>
bool wait( FCKernel& fc, PublicationRecord& rec )
{
if ( fc.get_operation( rec ) >= req_Operation ) {
unique_lock lock( m_mutex );
if ( fc.get_operation( rec ) >= req_Operation ) {
if ( m_wakeup ) {
m_wakeup = false;
return true;
}
bool ret = m_condvar.wait_for( lock, std::chrono::milliseconds( c_nWaitMilliseconds )) == std::cv_status::no_timeout;
m_wakeup = false;
return ret;
}
}
return false;
}
/// Calls condition variable function \p notify_all()
template <typename FCKernel, typename PublicationRecord>
void notify( FCKernel& fc, PublicationRecord& /*rec*/ )
{
wakeup( fc );
}
/// Calls condition variable function \p notify_all()
template <typename FCKernel>
void wakeup( FCKernel& /*fc*/ )
{
unique_lock lock( m_mutex );
m_wakeup = true;
m_condvar.notify_all();
}
};
/// Wait strategy based on the single mutex and thread-local condition variables
/**
The strategy shares the mutex, but each thread has its own conditional variable
Template parameter \p Milliseconds specifies waiting duration;
the minimal value is 1.
*/
template <int Milliseconds = 2>
class single_mutex_multi_condvar
{
//@cond
std::mutex m_mutex;
bool m_wakeup;
typedef std::unique_lock< std::mutex > unique_lock;
//@endcond
public:
enum {
c_nWaitMilliseconds = Milliseconds < 1 ? 1 : Milliseconds ///< Waiting duration
};
/// Incorporates a condition variable into \p PublicationRecord
template <typename PublicationRecord>
struct make_publication_record {
/// Metafunction result
struct type: public PublicationRecord
{
//@cond
std::condition_variable m_condvar;
//@endcond
};
};
/// Default ctor
single_mutex_multi_condvar()
: m_wakeup( false )
{}
/// Does nothing
template <typename PublicationRecord>
void prepare( PublicationRecord& /*rec*/ )
{}
/// Sleeps on condition variable waiting for notification from combiner
template <typename FCKernel, typename PublicationRecord>
bool wait( FCKernel& fc, PublicationRecord& rec )
{
if ( fc.get_operation( rec ) >= req_Operation ) {
unique_lock lock( m_mutex );
if ( fc.get_operation( rec ) >= req_Operation ) {
if ( m_wakeup ) {
m_wakeup = false;
return true;
}
bool ret = rec.m_condvar.wait_for( lock, std::chrono::milliseconds( c_nWaitMilliseconds )) == std::cv_status::no_timeout;
m_wakeup = false;
return ret;
}
}
return false;
}
/// Calls condition variable function \p notify_one()
template <typename FCKernel, typename PublicationRecord>
void notify( FCKernel& /*fc*/, PublicationRecord& rec )
{
unique_lock lock( m_mutex );
m_wakeup = true;
rec.m_condvar.notify_one();
}
/// Calls \p fc.wakeup_any() to wake up any pending thread
template <typename FCKernel>
void wakeup( FCKernel& fc )
{
fc.wakeup_any();
}
};
/// Wait strategy where each thread has a mutex and a condition variable
/**
Template parameter \p Milliseconds specifies waiting duration;
the minimal value is 1.
*/
template <int Milliseconds = 2>
class multi_mutex_multi_condvar
{
//@cond
typedef std::unique_lock< std::mutex > unique_lock;
//@endcond
public:
enum {
c_nWaitMilliseconds = Milliseconds < 1 ? 1 : Milliseconds ///< Waiting duration
};
/// Incorporates a condition variable and a mutex into \p PublicationRecord
template <typename PublicationRecord>
struct make_publication_record {
/// Metafunction result
struct type: public PublicationRecord
{
//@cond
std::mutex m_mutex;
std::condition_variable m_condvar;
bool m_wakeup;
type()
: m_wakeup( false )
{}
//@endcond
};
};
/// Does nothing
template <typename PublicationRecord>
void prepare( PublicationRecord& /*rec*/ )
{}
/// Sleeps on condition variable waiting for notification from combiner
template <typename FCKernel, typename PublicationRecord>
bool wait( FCKernel& fc, PublicationRecord& rec )
{
if ( fc.get_operation( rec ) >= req_Operation ) {
unique_lock lock( rec.m_mutex );
if ( fc.get_operation( rec ) >= req_Operation ) {
if ( rec.m_wakeup ) {
rec.m_wakeup = false;
return true;
}
bool ret = rec.m_condvar.wait_for( lock, std::chrono::milliseconds( c_nWaitMilliseconds )) == std::cv_status::no_timeout;
rec.m_wakeup = false;
return ret;
}
}
return false;
}
/// Calls condition variable function \p notify_one()
template <typename FCKernel, typename PublicationRecord>
void notify( FCKernel& /*fc*/, PublicationRecord& rec )
{
unique_lock lock( rec.m_mutex );
rec.m_wakeup = true;
rec.m_condvar.notify_one();
}
/// Calls \p fc.wakeup_any() to wake up any pending thread
template <typename FCKernel>
void wakeup( FCKernel& fc )
{
fc.wakeup_any();
}
};
} // namespace wait_strategy
}}} // namespace cds::algo::flat_combining
#endif //CDSLIB_ALGO_FLAT_COMBINING_WAIT_STRATEGY_H

147
extern/libcds/cds/algo/int_algo.h vendored Normal file
View File

@ -0,0 +1,147 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_INT_ALGO_H
#define CDSLIB_INT_ALGO_H
#include <cds/algo/bitop.h>
namespace cds { namespace beans {
/// Returns largest previous integer for <tt>log2( n )</tt>
static inline size_t log2floor( size_t n )
{
return n ? cds::bitop::MSBnz( n ) : 0;
}
/// Returns smallest following integer for <tt>log2( n )</tt>
static inline size_t log2ceil( size_t n )
{
size_t i = log2floor( n );
return ( size_t( 1 ) << i ) < n ? i + 1 : i;
}
/// Returns largest previous power of 2 for \p n
/**
Examples:
\code
floor2(0) == 1 // !!!
floor2(1) == 1
floor2(2) == 2
floor2(3) == 2
floor2(4) == 4
floor2(15) == 8
floor2(16) == 16
floor2(17) == 16
\endcode
*/
static inline size_t floor2( size_t n )
{
return size_t(1) << log2floor( n );
}
/// Returns smallest following power of 2 for \p n
/**
Examples:
\code
ceil2(0) == 1 // !!!
ceil2(1) == 1
ceil2(2) == 2
ceil2(3) == 4
ceil2(4) == 4
ceil2(15) == 16
ceil2(16) == 16
ceil2(17) == 32
\endcode
*/
static inline size_t ceil2( size_t n )
{
return size_t(1) << log2ceil( n );
}
/// Checks if \p n is power of 2
constexpr static inline bool is_power2( size_t n ) noexcept
{
return (n & (n - 1)) == 0 && n;
}
/// Returns binary logarithm of \p n if \p n is power of two, otherwise returns 0
static inline size_t log2( size_t n )
{
return is_power2(n) ? log2floor(n) : 0;
}
#if CDS_BUILD_BITS == 32
//@cond
// 64bit specializations
/// Returns largest previous integer for <tt>log2( n )</tt>
static inline uint64_t log2floor( uint64_t n )
{
return n ? cds::bitop::MSBnz( n ) : 0;
}
/// Returns smallest following integer for <tt>log2( n )</tt>
static inline uint64_t log2ceil( uint64_t n )
{
uint64_t i = log2floor( n );
return (uint64_t( 1 ) << i) < n ? i + 1 : i;
}
/// Returns largest previous power of 2 for \p n
/**
Examples:
\code
floor2(0) == 1 // !!!
floor2(1) == 1
floor2(2) == 2
floor2(3) == 2
floor2(4) == 4
floor2(15) == 8
floor2(16) == 16
floor2(17) == 16
\endcode
*/
static inline uint64_t floor2( uint64_t n )
{
return uint64_t( 1 ) << log2floor( n );
}
/// Returns smallest following power of 2 for \p n
/**
Examples:
\code
ceil2(0) == 1 // !!!
ceil2(1) == 1
ceil2(2) == 2
ceil2(3) == 4
ceil2(4) == 4
ceil2(15) == 16
ceil2(16) == 16
ceil2(17) == 32
\endcode
*/
static inline uint64_t ceil2( uint64_t n )
{
return uint64_t( 1 ) << log2ceil( n );
}
/// Checks if \p n is power of 2
constexpr static inline bool is_power2( uint64_t n ) noexcept
{
return (n & (n - 1)) == 0 && n;
}
/// Returns binary logarithm of \p n if \p n is power of two, otherwise returns 0
static inline uint64_t log2( uint64_t n )
{
return is_power2( n ) ? log2floor( n ) : 0;
}
//@endcond
#endif //#if CDS_BUILD_BITS == 32
}} // namespace cds::beans
#endif // #ifndef CDSLIB_INT_ALGO_H

445
extern/libcds/cds/algo/split_bitstring.h vendored Normal file
View File

@ -0,0 +1,445 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_ALGO_SPLIT_BITSTRING_H
#define CDSLIB_ALGO_SPLIT_BITSTRING_H
#include <cds/algo/base.h>
namespace cds { namespace algo {
/// Cuts a bit sequence from fixed-size bit-string
/**
The splitter can be used as an iterator over bit-string.
Each call of \p cut() or \p safe_cut() cuts the bit count specified
and keeps the position inside bit-string for the next call.
The splitter stores a const reference to bit-string, not a copy.
The maximum count of bits that can be cut in a single call is <tt> sizeof(UInt) * 8 </tt>
The splitter keeps byte order.
Template parameters:
- \p BitString - a fixed-sized type that interprets as bit string
- \p BitStringSize - the size of \p BitString in bytes, default is <tt>sizeof( BitString )</tt>.
You can specify 0 for default.
- \p UInt - an unsigned integer, return type for \p cut(), default is \p unsigned
There are specialized splitters:
- a simplified \p byte_splitter algorithm that is suitable when count is multiple of 8.
- \p number_splitter algorithm is suitable for a number
*/
template <typename BitString, size_t BitStringSize = sizeof( BitString ), typename UInt = unsigned >
class split_bitstring
{
public:
typedef BitString bitstring; ///< Bit-string type
typedef UInt uint_type; ///< Result type of \p cut() function
static constexpr size_t const c_bitstring_size = BitStringSize ? BitStringSize : sizeof( BitString ); ///< size of \p BitString in bytes
//@cond
static constexpr unsigned const c_nBitPerByte = 8;
//@endcond
public:
/// Initializises the splitter with reference to \p h and zero start bit offset
explicit split_bitstring( bitstring const& h )
: cur_( reinterpret_cast<uint8_t const*>( &h ))
, offset_( 0 )
, first_( cur_ )
, last_( cur_ + c_bitstring_size )
{}
/// Initializises the splitter with reference to \p h and start bit offset \p nBitOffset
split_bitstring( bitstring const& h, size_t nBitOffset )
: cur_( reinterpret_cast<uint8_t const*>( &h ) + nBitOffset / c_nBitPerByte )
, offset_( nBitOffset % c_nBitPerByte )
, first_( reinterpret_cast<uint8_t const*>( &h ))
, last_( first_ + c_bitstring_size )
{}
/// Returns \p true if end-of-string is not reached yet
explicit operator bool() const
{
return !eos();
}
/// Returns \p true if end-of-stream encountered
bool eos() const
{
return cur_ >= last_;
}
/// Cuts next \p count bits from bit-string
/**
For performance reason, the function does not manage out-of-bound condition.
To control that use \p safe_cut().
*/
uint_type cut( unsigned count )
{
assert( !eos());
uint_type result = 0;
# if defined( CDS_ARCH_LITTLE_ENDIAN )
for ( unsigned done = 0; done < count; ) {
assert( cur_ < last_ );
unsigned bits = count - done;
if ( bits > c_nBitPerByte - offset_ )
bits = c_nBitPerByte - offset_;
result |= static_cast<uint_type>(( *cur_ >> offset_ ) & (( 1 << bits ) - 1 )) << done;
offset_ += bits;
assert( offset_ <= c_nBitPerByte );
if ( offset_ == c_nBitPerByte ) {
offset_ = 0;
++cur_;
}
done += bits;
}
# else
while ( count ) {
assert( cur_ < last_ );
unsigned bits = count <= ( c_nBitPerByte - offset_ ) ? count : c_nBitPerByte - offset_;
result = ( result << bits ) | (( *cur_ >> offset_ ) & ( ( 1 << bits ) - 1 ));
offset_ += bits;
assert( offset_ <= c_nBitPerByte );
if ( offset_ == c_nBitPerByte ) {
offset_ = 0;
++cur_;
}
count -= bits;
}
# endif
return result;
}
/// Cuts up to \p count from the bit-string
/**
Safe analog of \p cut() but if \p count is more than the rest of bit-string,
only the rest is returned.
When \p eos() condition is met the function returns 0.
*/
uint_type safe_cut( unsigned count )
{
if ( eos())
return 0;
unsigned const rest = static_cast<unsigned>( last_ - cur_ - 1 ) * c_nBitPerByte + ( c_nBitPerByte - offset_ );
if ( rest < count )
count = rest;
return count ? cut( count ) : 0;
}
/// Resets the splitter
void reset() noexcept
{
cur_ = first_;
offset_ = 0;
}
/// Returns pointer to source bitstring
bitstring const * source() const
{
return reinterpret_cast<bitstring const *>( first_ );
}
/// Returns current bit offset from beginning of bit-string
size_t bit_offset() const
{
return offset_ + (cur_ - first_) * c_nBitPerByte;
}
/// Returns how many bits remain
size_t rest_count() const
{
return c_bitstring_size * c_nBitPerByte - bit_offset();
}
/// Returns \p true for any argument
static constexpr bool is_correct( unsigned /*count*/ )
{
return true;
}
private:
//@cond
uint8_t const* cur_;
unsigned offset_;
uint8_t const* const first_;
uint8_t const* const last_;
//@endcond
};
/// Simplified \p split_bitstring algorithm when \p count is multiple of 8
template <typename BitString, size_t BitStringSize = sizeof( BitString ), typename UInt = unsigned >
class byte_splitter
{
public:
typedef BitString bitstring; ///< Bit-string type
typedef UInt uint_type; ///< Result type of \p cut() function
static constexpr size_t const c_bitstring_size = BitStringSize ? BitStringSize : sizeof( BitString ); ///< size of \p BitString in bytes
//@cond
static constexpr unsigned const c_nBitPerByte = 8;
//@endcond
public:
/// Initializises the splitter with reference to \p h and zero start bit offset
explicit byte_splitter( bitstring const& h )
: cur_( reinterpret_cast<uint8_t const*>( &h ))
, first_( cur_ )
, last_( cur_ + c_bitstring_size )
{}
/// Initializises the splitter with reference to \p h and start bit offset \p nBitOffset
byte_splitter( bitstring const& h, size_t nBitOffset )
: cur_( reinterpret_cast<uint8_t const*>( &h ) + nBitOffset / c_nBitPerByte )
, first_( reinterpret_cast<uint8_t const*>( &h ))
, last_( first_ + c_bitstring_size )
{
assert( is_correct( static_cast<unsigned>( nBitOffset )));
assert( !eos());
}
/// Returns \p true if end-of-string is not reached yet
explicit operator bool() const
{
return !eos();
}
/// Returns \p true if end-of-stream encountered
bool eos() const
{
return cur_ >= last_;
}
/// Cuts next \p count bits (must be multiplier of 8) from bit-string
/**
For performance reason, the function does not manage out-of-bound condition.
To control that use \p safe_cut().
*/
uint_type cut( unsigned count )
{
assert( !eos());
assert( is_correct( count ));
uint_type result = 0;
# if defined( CDS_ARCH_LITTLE_ENDIAN )
for ( unsigned i = 0; i < count; i += c_nBitPerByte ) {
result |= static_cast<uint_type>( *cur_ ) << i;
++cur_;
}
# else
for ( ; count; count -= c_nBitPerByte ) {
result = ( result << c_nBitPerByte ) | *cur_;
++cur_;
}
# endif
return result;
}
/// Cuts up to \p count from the bit-string
/**
Safe analog of \p cut(): if \p count is more than the rest of bit-string,
only the rest is returned.
When \p eos() condition is met the function returns 0.
*/
uint_type safe_cut( unsigned count )
{
if ( eos())
return 0;
unsigned const rest = static_cast<unsigned>( last_ - cur_ - 1 ) * c_nBitPerByte;
if ( rest < count )
count = rest;
return count ? cut( count ) : 0;
}
/// Resets the splitter
void reset() noexcept
{
cur_ = first_;
}
/// Returns pointer to source bitstring
bitstring const* source() const
{
return reinterpret_cast<bitstring const *>( first_ );
}
/// Returns current bit offset from beginning of bit-string
size_t bit_offset() const
{
return (cur_ - first_) * c_nBitPerByte;
}
/// Returns how many bits remain
size_t rest_count() const
{
return c_bitstring_size * c_nBitPerByte - bit_offset();
}
/// Checks if \p count is multiple of 8
static constexpr bool is_correct( unsigned count )
{
return count % 8 == 0;
}
private:
//@cond
uint8_t const* cur_;
uint8_t const* const first_;
uint8_t const* const last_;
//@endcond
};
/// Cuts a bit sequence from a number
/**
The splitter can be used as an iterator over bit representation of the number of type \p Int.
Each call of \p cut() or \p safe_cut() cuts the bit count specified
and keeps the position inside the number for the next call.
*/
template <typename Int>
class number_splitter
{
public:
typedef Int int_type; ///< Number type
typedef Int uint_type; ///< Result type of \p cut() function
//@cond
static constexpr unsigned const c_nBitPerByte = 8;
//@endcond
public:
/// Initalizes the splitter with nymber \p n and initial bit offset 0
explicit number_splitter( int_type n )
: number_( n )
, shift_( 0 )
{}
/// Initalizes the splitter with nymber \p n and initial bit offset \p initial_offset
number_splitter( int_type n, size_t initial_offset )
: number_( n )
, shift_( static_cast<unsigned>( initial_offset ))
{
assert( initial_offset < sizeof( int_type ) * c_nBitPerByte );
}
/// Returns \p true if end-of-string is not reached yet
explicit operator bool() const
{
return !eos();
}
/// Returns \p true if end-of-stream encountered
bool eos() const
{
return shift_ >= sizeof( int_type ) * c_nBitPerByte;
}
/// Cuts next \p count bits (must be multiplier of 8) from the number
/**
For performance reason, the function does not manage out-of-bound condition.
To control that use \p safe_cut().
*/
int_type cut( unsigned count )
{
assert( !eos());
assert( is_correct( count ));
int_type result = ( number_ >> shift_ ) & (( 1 << count ) - 1 );
shift_ += count;
return result;
}
/// Cuts up to \p count from the bit-string
/**
Safe analog of \p cut(): if \p count is more than the rest of \p int_type,
only the rest is returned.
When \p eos() condition is met the function returns 0.
*/
int_type safe_cut( unsigned count )
{
if ( eos())
return 0;
unsigned rest = static_cast<unsigned>( rest_count());
if ( rest < count )
count = rest;
return count ? cut( count ) : 0;
}
/// Resets the splitter
void reset() noexcept
{
shift_ = 0;
}
/// Returns initial number
int_type source() const
{
return number_;
}
/// Returns current bit offset from beginning of the number
size_t bit_offset() const
{
return shift_;
}
/// Returns how many bits remain
size_t rest_count() const
{
return sizeof( int_type ) * c_nBitPerByte - shift_;
}
/// Checks if \p count is multiple of 8
static constexpr bool is_correct( unsigned count )
{
return count < sizeof( int_type ) * c_nBitPerByte;
}
private:
//@cond
int_type const number_;
unsigned shift_;
//@endcond
};
/// Metafunctin to select a most suitable splitter for type \p BitString of size \p BitStringSize
template <typename BitString, size_t BitStringSize >
struct select_splitter
{
typedef split_bitstring< BitString, BitStringSize > type; ///< metafunction result
};
//@cond
# define CDS_SELECT_NUMBER_SPLITTER( num_type ) \
template <> struct select_splitter<num_type, sizeof(num_type)> { typedef number_splitter<num_type> type; }
CDS_SELECT_NUMBER_SPLITTER( int );
CDS_SELECT_NUMBER_SPLITTER( unsigned );
CDS_SELECT_NUMBER_SPLITTER( short );
CDS_SELECT_NUMBER_SPLITTER( unsigned short );
CDS_SELECT_NUMBER_SPLITTER( long );
CDS_SELECT_NUMBER_SPLITTER( unsigned long );
CDS_SELECT_NUMBER_SPLITTER( long long );
CDS_SELECT_NUMBER_SPLITTER( unsigned long long );
# undef CDS_SELECT_NUMBER_SPLITTER
//@endcond
}} // namespace cds::algo
#endif // #ifndef CDSLIB_ALGO_SPLIT_BITSTRING_H

39
extern/libcds/cds/compiler/backoff.h vendored Normal file
View File

@ -0,0 +1,39 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_BACKOFF_IMPL_H
#define CDSLIB_COMPILER_BACKOFF_IMPL_H
#include <cds/details/defs.h>
#if CDS_COMPILER == CDS_COMPILER_MSVC || (CDS_COMPILER == CDS_COMPILER_INTEL && CDS_OS_INTERFACE == CDS_OSI_WINDOWS)
# if CDS_PROCESSOR_ARCH == CDS_PROCESSOR_X86
# include <cds/compiler/vc/x86/backoff.h>
# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_AMD64
# include <cds/compiler/vc/amd64/backoff.h>
# else
# error "MS VC++ compiler: unsupported processor architecture"
# endif
#elif CDS_COMPILER == CDS_COMPILER_GCC || CDS_COMPILER == CDS_COMPILER_CLANG || CDS_COMPILER == CDS_COMPILER_INTEL
# if CDS_PROCESSOR_ARCH == CDS_PROCESSOR_X86
# include <cds/compiler/gcc/x86/backoff.h>
# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_AMD64
# include <cds/compiler/gcc/amd64/backoff.h>
# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_IA64
# include <cds/compiler/gcc/ia64/backoff.h>
# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_SPARC
# include <cds/compiler/gcc/sparc/backoff.h>
# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_PPC64
# include <cds/compiler/gcc/ppc64/backoff.h>
# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_ARM7
# include <cds/compiler/gcc/arm7/backoff.h>
# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_ARM8
# include <cds/compiler/gcc/arm8/backoff.h>
# endif
#else
# error "Undefined compiler"
#endif
#endif // #ifndef CDSLIB_COMPILER_BACKOFF_IMPL_H

43
extern/libcds/cds/compiler/bitop.h vendored Normal file
View File

@ -0,0 +1,43 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_BITOP_H
#define CDSLIB_COMPILER_BITOP_H
// Choose appropriate header for current architecture and compiler
#if CDS_COMPILER == CDS_COMPILER_MSVC || (CDS_COMPILER == CDS_COMPILER_INTEL && CDS_OS_INTERFACE == CDS_OSI_WINDOWS)
/************************************************************************/
/* MS Visual C++ */
/************************************************************************/
# if CDS_PROCESSOR_ARCH == CDS_PROCESSOR_X86
# include <cds/compiler/vc/x86/bitop.h>
# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_AMD64
# include <cds/compiler/vc/amd64/bitop.h>
# endif
#elif CDS_COMPILER == CDS_COMPILER_GCC || CDS_COMPILER == CDS_COMPILER_CLANG || CDS_COMPILER == CDS_COMPILER_INTEL
/************************************************************************/
/* GCC */
/************************************************************************/
# if CDS_PROCESSOR_ARCH == CDS_PROCESSOR_X86
# include <cds/compiler/gcc/x86/bitop.h>
# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_AMD64
# include <cds/compiler/gcc/amd64/bitop.h>
# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_SPARC
# include <cds/compiler/gcc/sparc/bitop.h>
# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_IA64
# include <cds/compiler/gcc/ia64/bitop.h>
# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_PPC64
# include <cds/compiler/gcc/ppc64/bitop.h>
# endif
#endif // Compiler choice
// Generic (C) implementation
#include <cds/details/bitop_generic.h>
#endif // #ifndef CDSLIB_COMPILER_BITOP_H

127
extern/libcds/cds/compiler/clang/defs.h vendored Normal file
View File

@ -0,0 +1,127 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_CLANG_DEFS_H
#define CDSLIB_COMPILER_CLANG_DEFS_H
// Compiler version
#define CDS_COMPILER_VERSION (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__)
// Compiler name
#define CDS_COMPILER__NAME ("clang " __clang_version__)
#define CDS_COMPILER__NICK "clang"
#if CDS_COMPILER_VERSION < 30600
# error "Compiler version error. Clang version 3.6.0 and above is supported"
#endif
#if __cplusplus < CDS_CPLUSPLUS_11
# error C++11 and above is required
#endif
#if defined(_LIBCPP_VERSION) && !defined(CDS_USE_BOOST_ATOMIC) && CDS_COMPILER_VERSION < 30700
// Note: Clang libc++ atomic leads to program crash.
// So, we use libcds atomic implementation
# define CDS_USE_LIBCDS_ATOMIC
#endif
// clang for Windows
#if defined( _MSC_VER )
# define CDS_OS_INTERFACE CDS_OSI_WINDOWS
# if defined(_WIN64)
# define CDS_OS_TYPE CDS_OS_WIN64
# define CDS_OS__NAME "Win64"
# define CDS_OS__NICK "Win64"
# elif defined(_WIN32)
# define CDS_OS_TYPE CDS_OS_WIN32
# define CDS_OS__NAME "Win32"
# define CDS_OS__NICK "Win32"
# endif
#endif
#include <cds/compiler/gcc/compiler_macro.h>
#define alignof __alignof__
// C++11 thread_local keyword
#if !(CDS_OS_TYPE == CDS_OS_OSX && CDS_COMPILER_VERSION < 30600)
// OS X error?
// See http://stackoverflow.com/questions/23791060/c-thread-local-storage-clang-503-0-40-mac-osx
// http://stackoverflow.com/questions/28094794/why-does-apple-clang-disallow-c11-thread-local-when-official-clang-supports
// clang 3.6 ok?..
# define CDS_CXX11_THREAD_LOCAL_SUPPORT
#endif
// Attributes
#if CDS_COMPILER_VERSION >= 30600
# if __cplusplus == CDS_CPLUSPLUS_11 // C++11
# define CDS_DEPRECATED( reason ) [[gnu::deprecated(reason)]]
# else // C++14
# define CDS_DEPRECATED( reason ) [[deprecated(reason)]]
# endif
#endif
#define CDS_NORETURN __attribute__((__noreturn__))
// *************************************************
// Features
#if defined(__has_feature) && __has_feature(thread_sanitizer)
# ifndef CDS_THREAD_SANITIZER_ENABLED
# define CDS_THREAD_SANITIZER_ENABLED
# endif
#endif
#if defined(__has_feature) && __has_feature(address_sanitizer)
# ifndef CDS_ADDRESS_SANITIZER_ENABLED
# define CDS_ADDRESS_SANITIZER_ENABLED
# endif
#endif
// *************************************************
// Alignment macro
#define CDS_TYPE_ALIGNMENT(n) __attribute__ ((aligned (n)))
#define CDS_CLASS_ALIGNMENT(n) __attribute__ ((aligned (n)))
#define CDS_DATA_ALIGNMENT(n) __attribute__ ((aligned (n)))
// likely/unlikely
#define cds_likely( expr ) __builtin_expect( !!( expr ), 1 )
#define cds_unlikely( expr ) __builtin_expect( !!( expr ), 0 )
// Exceptions
#if defined( __EXCEPTIONS ) && __EXCEPTIONS == 1
# define CDS_EXCEPTION_ENABLED
#endif
// double-width CAS support - only for libc++
// You can manually suppress wide-atomic support by defining in compiler command line:
// for 64bit platform: -DCDS_DISABLE_128BIT_ATOMIC
// for 32bit platform: -DCDS_DISABLE_64BIT_ATOMIC
#ifdef _LIBCPP_VERSION
# if CDS_BUILD_BITS == 64
# if !defined( CDS_DISABLE_128BIT_ATOMIC ) && defined( __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16 )
# define CDS_DCAS_SUPPORT
# endif
# else
# if !defined( CDS_DISABLE_64BIT_ATOMIC ) && defined( __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 )
# define CDS_DCAS_SUPPORT
# endif
# endif
#endif
//if constexpr support (C++17)
#ifndef constexpr_if
# if defined( __cpp_if_constexpr ) && __cpp_if_constexpr >= 201606
# define constexpr_if if constexpr
# endif
#endif
#include <cds/compiler/gcc/compiler_barriers.h>
#endif // #ifndef CDSLIB_COMPILER_GCC_DEFS_H

2207
extern/libcds/cds/compiler/cxx11_atomic.h vendored Normal file

File diff suppressed because it is too large Load Diff

51
extern/libcds/cds/compiler/defs.h vendored Normal file
View File

@ -0,0 +1,51 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_DEFS_H
#define CDSLIB_COMPILER_DEFS_H
// __cplusplus values
#define CDS_CPLUSPLUS_11 201103L
#define CDS_CPLUSPLUS_14 201402L
#define CDS_CPLUSPLUS_17 201703L
// VC 2017 is not full C++11-compatible yet
//#if __cplusplus < CDS_CPLUSPLUS_11
//# error C++11 and above is required
//#endif
#if CDS_COMPILER == CDS_COMPILER_MSVC
# include <cds/compiler/vc/defs.h>
#elif CDS_COMPILER == CDS_COMPILER_GCC
# include <cds/compiler/gcc/defs.h>
#elif CDS_COMPILER == CDS_COMPILER_INTEL
# include <cds/compiler/icl/defs.h>
#elif CDS_COMPILER == CDS_COMPILER_CLANG
# include <cds/compiler/clang/defs.h>
#elif CDS_COMPILER == CDS_COMPILER_UNKNOWN
# error Unknown compiler. Compilation aborted
#else
# error Unknown value of CDS_COMPILER macro
#endif
#ifndef CDS_EXPORT_API
# define CDS_EXPORT_API
#endif
#ifndef cds_likely
# define cds_likely( expr ) expr
# define cds_unlikely( expr ) expr
#endif
//if constexpr support (C++17)
#ifndef constexpr_if
# define constexpr_if if
#endif
// Features
#include <cds/compiler/feature_tsan.h>
#endif // #ifndef CDSLIB_COMPILER_DEFS_H

View File

@ -0,0 +1,91 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_FEATURE_TSAN_H
#define CDSLIB_COMPILER_FEATURE_TSAN_H
// Thread Sanitizer annotations.
// From http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/test/tsan/annotate_happens_before.cc?view=markup
//@cond
#ifdef CDS_THREAD_SANITIZER_ENABLED
# define CDS_TSAN_ANNOTATE_HAPPENS_BEFORE(addr) AnnotateHappensBefore(__FILE__, __LINE__, reinterpret_cast<void*>(addr))
# define CDS_TSAN_ANNOTATE_HAPPENS_AFTER(addr) AnnotateHappensAfter(__FILE__, __LINE__, reinterpret_cast<void*>(addr))
# define CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN AnnotateIgnoreReadsBegin(__FILE__, __LINE__)
# define CDS_TSAN_ANNOTATE_IGNORE_READS_END AnnotateIgnoreReadsEnd(__FILE__, __LINE__)
# define CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN AnnotateIgnoreWritesBegin(__FILE__, __LINE__)
# define CDS_TSAN_ANNOTATE_IGNORE_WRITES_END AnnotateIgnoreWritesEnd(__FILE__, __LINE__)
# define CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN \
CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN; \
CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN
# define CDS_TSAN_ANNOTATE_IGNORE_RW_END \
CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;\
CDS_TSAN_ANNOTATE_IGNORE_READS_END
# define CDS_TSAN_ANNOTATE_NEW_MEMORY( addr, sz ) AnnotateNewMemory( __FILE__, __LINE__, reinterpret_cast<void *>(addr), sz )
// Publish/unpublish - DEPRECATED
#if 0
# define CDS_TSAN_ANNOTATE_PUBLISH_MEMORY_RANGE( addr, sz ) AnnotatePublishMemoryRange( __FILE__, __LINE__, reinterpret_cast<void *>(addr), sz )
# define CDS_TSAN_ANNOTATE_UNPUBLISH_MEMORY_RANGE( addr, sz ) AnnotateUnpublishMemoryRange( __FILE__, __LINE__, reinterpret_cast<void *>(addr), sz )
#endif
# define CDS_TSAN_ANNOTATE_MUTEX_CREATE( addr ) AnnotateRWLockCreate( __FILE__, __LINE__, reinterpret_cast<void *>(addr))
# define CDS_TSAN_ANNOTATE_MUTEX_DESTROY( addr ) AnnotateRWLockDestroy( __FILE__, __LINE__, reinterpret_cast<void *>(addr))
// must be called after actual acquire
# define CDS_TSAN_ANNOTATE_MUTEX_ACQUIRED( addr ) AnnotateRWLockAcquired( __FILE__, __LINE__, reinterpret_cast<void *>(addr), 1 )
// must be called before actual release
# define CDS_TSAN_ANNOTATE_MUTEX_RELEASED( addr ) AnnotateRWLockReleased( __FILE__, __LINE__, reinterpret_cast<void *>(addr), 1 )
// provided by TSan
extern "C" {
void AnnotateHappensBefore(const char *f, int l, void *addr);
void AnnotateHappensAfter(const char *f, int l, void *addr);
void AnnotateIgnoreReadsBegin(const char *f, int l);
void AnnotateIgnoreReadsEnd(const char *f, int l);
void AnnotateIgnoreWritesBegin(const char *f, int l);
void AnnotateIgnoreWritesEnd(const char *f, int l);
#if 0
void AnnotatePublishMemoryRange( const char *f, int l, void * mem, size_t size );
void AnnotateUnpublishMemoryRange( const char *f, int l, void * addr, size_t size );
#endif
void AnnotateNewMemory( const char *f, int l, void * mem, size_t size );
void AnnotateRWLockCreate( const char *f, int l, void* m );
void AnnotateRWLockDestroy( const char *f, int l, void* m );
void AnnotateRWLockAcquired( const char *f, int l, void *m, long is_w );
void AnnotateRWLockReleased( const char *f, int l, void *m, long is_w );
}
#else // CDS_THREAD_SANITIZER_ENABLED
# define CDS_TSAN_ANNOTATE_HAPPENS_BEFORE(addr)
# define CDS_TSAN_ANNOTATE_HAPPENS_AFTER(addr)
# define CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN
# define CDS_TSAN_ANNOTATE_IGNORE_READS_END
# define CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN
# define CDS_TSAN_ANNOTATE_IGNORE_WRITES_END
# define CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN
# define CDS_TSAN_ANNOTATE_IGNORE_RW_END
#if 0
# define CDS_TSAN_ANNOTATE_PUBLISH_MEMORY_RANGE( addr, sz )
# define CDS_TSAN_ANNOTATE_UNPUBLISH_MEMORY_RANGE( addr, sz )
#endif
# define CDS_TSAN_ANNOTATE_NEW_MEMORY( addr, sz )
# define CDS_TSAN_ANNOTATE_MUTEX_CREATE( addr )
# define CDS_TSAN_ANNOTATE_MUTEX_DESTROY( addr )
# define CDS_TSAN_ANNOTATE_MUTEX_ACQUIRED( addr )
# define CDS_TSAN_ANNOTATE_MUTEX_RELEASED( addr )
#endif
//@endcond
#endif // #ifndef CDSLIB_COMPILER_FEATURE_TSAN_H

View File

@ -0,0 +1,35 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_GCC_AMD64_BACKOFF_H
#define CDSLIB_COMPILER_GCC_AMD64_BACKOFF_H
//@cond none
namespace cds { namespace backoff {
namespace gcc { namespace amd64 {
# define CDS_backoff_nop_defined
static inline void backoff_nop()
{
asm volatile ( "nop;" );
}
# define CDS_backoff_hint_defined
static inline void backoff_hint()
{
asm volatile ( "pause;" );
}
}} // namespace gcc::amd64
namespace platform {
using namespace gcc::amd64;
}
}} // namespace cds::backoff
//@endcond
#endif // #ifndef CDSLIB_COMPILER_GCC_AMD64_BACKOFF_H

View File

@ -0,0 +1,159 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_GCC_AMD64_BITOP_H
#define CDSLIB_COMPILER_GCC_AMD64_BITOP_H
//@cond none
namespace cds {
namespace bitop { namespace platform { namespace gcc { namespace amd64 {
// MSB - return index (1..32) of most significant bit in nArg. If nArg == 0 return 0
# define cds_bitop_msb32_DEFINED
static inline int msb32( uint32_t nArg )
{
int nRet;
__asm__ __volatile__ (
"bsrl %[nArg], %[nRet] ;\n\t"
"jnz 1f ;\n\t"
"xorl %[nRet], %[nRet] ;\n\t"
"subl $1, %[nRet] ;\n\t"
"1:"
"addl $1, %[nRet] ;\n\t"
: [nRet] "=a" (nRet)
: [nArg] "r" (nArg)
: "cc"
);
return nRet;
}
# define cds_bitop_msb32nz_DEFINED
static inline int msb32nz( uint32_t nArg )
{
assert( nArg != 0 );
int nRet;
__asm__ __volatile__ (
"bsrl %[nArg], %[nRet] ;"
: [nRet] "=a" (nRet)
: [nArg] "r" (nArg)
: "cc"
);
return nRet;
}
// LSB - return index (0..31) of least significant bit in nArg. If nArg == 0 return -1U
# define cds_bitop_lsb32_DEFINED
static inline int lsb32( uint32_t nArg )
{
int nRet;
__asm__ __volatile__ (
"bsfl %[nArg], %[nRet] ;"
"jnz 1f ;"
"xorl %[nRet], %[nRet] ;"
"subl $1, %[nRet] ;"
"1:"
"addl $1, %[nRet] ;"
: [nRet] "=a" (nRet)
: [nArg] "r" (nArg)
: "cc"
);
return nRet;
}
// LSB - return index (0..31) of least significant bit in nArg.
// Condition: nArg != 0
# define cds_bitop_lsb32nz_DEFINED
static inline int lsb32nz( uint32_t nArg )
{
assert( nArg != 0 );
int nRet;
__asm__ __volatile__ (
"bsfl %[nArg], %[nRet] ;"
: [nRet] "=a" (nRet)
: [nArg] "r" (nArg)
: "cc"
);
return nRet;
}
# define cds_bitop_msb64_DEFINED
static inline int msb64( uint64_t nArg )
{
uint64_t nRet;
asm volatile (
"bsrq %[nArg], %[nRet] ;\n\t"
"jnz 1f ;\n\t"
"xorq %[nRet], %[nRet] ;\n\t"
"subq $1, %[nRet] ;\n\t"
"1:"
"addq $1, %[nRet] ;\n\t"
: [nRet] "=a" (nRet)
: [nArg] "r" (nArg)
: "cc"
);
return (int) nRet;
}
# define cds_bitop_msb64nz_DEFINED
static inline int msb64nz( uint64_t nArg )
{
assert( nArg != 0 );
uint64_t nRet;
__asm__ __volatile__ (
"bsrq %[nArg], %[nRet] ;"
: [nRet] "=a" (nRet)
: [nArg] "r" (nArg)
: "cc"
);
return (int) nRet;
}
// LSB - return index (0..31) of least significant bit in nArg. If nArg == 0 return -1U
# define cds_bitop_lsb64_DEFINED
static inline int lsb64( uint64_t nArg )
{
uint64_t nRet;
__asm__ __volatile__ (
"bsfq %[nArg], %[nRet] ;"
"jnz 1f ;"
"xorq %[nRet], %[nRet] ;"
"subq $1, %[nRet] ;"
"1:"
"addq $1, %[nRet] ;"
: [nRet] "=a" (nRet)
: [nArg] "r" (nArg)
: "cc"
);
return (int) nRet;
}
// LSB - return index (0..31) of least significant bit in nArg.
// Condition: nArg != 0
# define cds_bitop_lsb64nz_DEFINED
static inline int lsb64nz( uint64_t nArg )
{
assert( nArg != 0 );
uint64_t nRet;
__asm__ __volatile__ (
"bsfq %[nArg], %[nRet] ;"
: [nRet] "=a" (nRet)
: [nArg] "r" (nArg)
: "cc"
);
return (int) nRet;
}
}} // namespace gcc::amd64
using namespace gcc::amd64;
}}} // namespace cds::bitop::platform
//@endcond
#endif // #ifndef CDSLIB_COMPILER_GCC_AMD64_BITOP_H

View File

@ -0,0 +1,203 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_GCC_AMD64_CXX11_ATOMIC_H
#define CDSLIB_COMPILER_GCC_AMD64_CXX11_ATOMIC_H
#include <cstdint>
#include <cds/compiler/gcc/x86/cxx11_atomic32.h>
//@cond
namespace cds { namespace cxx11_atomic {
namespace platform { inline namespace gcc { inline namespace amd64 {
//-----------------------------------------------------------------------------
// 64bit primitives
//-----------------------------------------------------------------------------
template <typename T>
static inline bool cas64_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
static_assert( sizeof(T) == 8, "Illegal size of operand" );
assert( cds::details::is_aligned( pDest, 8 ));
T prev = expected;
fence_before(mo_success);
__asm__ __volatile__ (
"lock ; cmpxchgq %[desired], %[pDest]"
: [prev] "+a" (prev), [pDest] "+m" (*pDest)
: [desired] "r" (desired)
);
bool success = (prev == expected);
expected = prev;
if (success)
fence_after(mo_success);
else
fence_after(mo_fail);
return success;
}
template <typename T>
static inline bool cas64_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
return cas64_strong( pDest, expected, desired, mo_success, mo_fail );
}
template <typename T>
static inline T load64( T volatile const * pSrc, memory_order order ) noexcept
{
static_assert( sizeof(T) == 8, "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_consume
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
assert( pSrc );
assert( cds::details::is_aligned( pSrc, 8 ));
T v = *pSrc;
fence_after_load( order );
return v;
}
template <typename T>
static inline T exchange64( T volatile * pDest, T v, memory_order order ) noexcept
{
static_assert( sizeof(T) == 8, "Illegal size of operand" );
assert( cds::details::is_aligned( pDest, 8 ));
fence_before(order);
__asm__ __volatile__ (
"xchgq %[v], %[pDest]"
: [v] "+r" (v), [pDest] "+m" (*pDest)
);
fence_after(order);
return v;
}
template <typename T>
static inline void store64( T volatile * pDest, T val, memory_order order ) noexcept
{
static_assert( sizeof(T) == 8, "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_release
|| order == memory_order_seq_cst
);
assert( pDest );
assert( cds::details::is_aligned( pDest, 8 ));
if (order != memory_order_seq_cst) {
fence_before(order);
*pDest = val;
}
else {
exchange64( pDest, val, order);
}
}
# define CDS_ATOMIC_fetch64_add_defined
template <typename T>
static inline T fetch64_add( T volatile * pDest, T v, memory_order order) noexcept
{
static_assert( sizeof(T) == 8, "Illegal size of operand" );
assert( cds::details::is_aligned( pDest, 8 ));
fence_before(order);
__asm__ __volatile__ (
"lock ; xaddq %[v], %[pDest]"
: [v] "+r" (v), [pDest] "+m" (*pDest)
);
fence_after(order);
return v;
}
# define CDS_ATOMIC_fetch64_sub_defined
template <typename T>
static inline T fetch64_sub( T volatile * pDest, T v, memory_order order) noexcept
{
static_assert( sizeof(T) == 8, "Illegal size of operand" );
assert( cds::details::is_aligned( pDest, 8 ));
fence_before(order);
__asm__ __volatile__ (
"negq %[v] ; \n"
"lock ; xaddq %[v], %[pDest]"
: [v] "+r" (v), [pDest] "+m" (*pDest)
);
fence_after(order);
return v;
}
//-----------------------------------------------------------------------------
// pointer primitives
//-----------------------------------------------------------------------------
template <typename T>
static inline T * exchange_ptr( T * volatile * pDest, T * v, memory_order order ) noexcept
{
static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" );
return (T *) exchange64( (uint64_t volatile *) pDest, (uint64_t) v, order );
}
template <typename T>
static inline void store_ptr( T * volatile * pDest, T * src, memory_order order ) noexcept
{
static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_release
|| order == memory_order_seq_cst
);
assert( pDest );
if ( order != memory_order_seq_cst ) {
fence_before( order );
*pDest = src;
}
else {
exchange_ptr( pDest, src, order );
}
}
template <typename T>
static inline T * load_ptr( T * volatile const * pSrc, memory_order order ) noexcept
{
static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_consume
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
assert( pSrc );
T * v = *pSrc;
fence_after_load( order );
return v;
}
template <typename T>
static inline bool cas_ptr_strong( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" );
return cas64_strong( (uint64_t volatile *) pDest, *reinterpret_cast<uint64_t *>( &expected ), (uint64_t) desired, mo_success, mo_fail );
}
template <typename T>
static inline bool cas_ptr_weak( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
return cas_ptr_strong( pDest, expected, desired, mo_success, mo_fail );
}
}} // namespace gcc::amd64
} // namespace platform
}} // namespace cds::cxx11_atomic
//@endcond
#endif // #ifndef CDSLIB_COMPILER_GCC_AMD64_CXX11_ATOMIC_H

View File

@ -0,0 +1,27 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_GCC_ARM7_BACKOFF_H
#define CDSLIB_COMPILER_GCC_ARM7_BACKOFF_H
//@cond none
namespace cds { namespace backoff {
namespace gcc { namespace arm7 {
# define CDS_backoff_hint_defined
static inline void backoff_hint()
{
asm volatile( "yield" ::: "memory" );
}
}} // namespace gcc::arm7
namespace platform {
using namespace gcc::arm7;
}
}} // namespace cds::backoff
//@endcond
#endif // #ifndef CDSLIB_COMPILER_GCC_ARM7_BACKOFF_H

View File

@ -0,0 +1,27 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_GCC_ARM8_BACKOFF_H
#define CDSLIB_COMPILER_GCC_ARM8_BACKOFF_H
//@cond none
namespace cds { namespace backoff {
namespace gcc { namespace arm8 {
# define CDS_backoff_hint_defined
static inline void backoff_hint()
{
asm volatile( "yield" ::: "memory" );
}
}} // namespace gcc::arm8
namespace platform {
using namespace gcc::arm8;
}
}} // namespace cds::backoff
//@endcond
#endif // #ifndef CDSLIB_COMPILER_GCC_ARM8_BACKOFF_H

View File

@ -0,0 +1,13 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_GCC_COMPILER_BARRIERS_H
#define CDSLIB_COMPILER_GCC_COMPILER_BARRIERS_H
#define CDS_COMPILER_RW_BARRIER __asm__ __volatile__ ( "" ::: "memory" )
#define CDS_COMPILER_R_BARRIER CDS_COMPILER_RW_BARRIER
#define CDS_COMPILER_W_BARRIER CDS_COMPILER_RW_BARRIER
#endif // #ifndef CDSLIB_COMPILER_GCC_COMPILER_BARRIERS_H

View File

@ -0,0 +1,170 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_GCC_COMPILER_MACRO_H
#define CDSLIB_COMPILER_GCC_COMPILER_MACRO_H
// OS interface && OS name
#ifndef CDS_OS_TYPE
# if defined( __linux__ )
# define CDS_OS_INTERFACE CDS_OSI_UNIX
# define CDS_OS_TYPE CDS_OS_LINUX
# define CDS_OS__NAME "linux"
# define CDS_OS__NICK "linux"
# elif defined( __sun__ )
# define CDS_OS_INTERFACE CDS_OSI_UNIX
# define CDS_OS_TYPE CDS_OS_SUN_SOLARIS
# define CDS_OS__NAME "Sun Solaris"
# define CDS_OS__NICK "sun"
# elif defined( __hpux__ )
# define CDS_OS_INTERFACE CDS_OSI_UNIX
# define CDS_OS_TYPE CDS_OS_HPUX
# define CDS_OS__NAME "HP-UX"
# define CDS_OS__NICK "hpux"
# elif defined( _AIX )
# define CDS_OS_INTERFACE CDS_OSI_UNIX
# define CDS_OS_TYPE CDS_OS_AIX
# define CDS_OS__NAME "AIX"
# define CDS_OS__NICK "aix"
# elif defined( __FreeBSD__ )
# define CDS_OS_INTERFACE CDS_OSI_UNIX
# define CDS_OS_TYPE CDS_OS_FREE_BSD
# define CDS_OS__NAME "FreeBSD"
# define CDS_OS__NICK "freebsd"
# elif defined( __OpenBSD__ )
# define CDS_OS_INTERFACE CDS_OSI_UNIX
# define CDS_OS_TYPE CDS_OS_OPEN_BSD
# define CDS_OS__NAME "OpenBSD"
# define CDS_OS__NICK "openbsd"
# elif defined( __NetBSD__ )
# define CDS_OS_INTERFACE CDS_OSI_UNIX
# define CDS_OS_TYPE CDS_OS_NET_BSD
# define CDS_OS__NAME "NetBSD"
# define CDS_OS__NICK "netbsd"
# elif defined(__MINGW32__) || defined( __MINGW64__)
# define CDS_OS_INTERFACE CDS_OSI_WINDOWS
# define CDS_OS_TYPE CDS_OS_MINGW
# define CDS_OS__NAME "MinGW"
# define CDS_OS__NICK "mingw"
# elif defined(__MACH__)
# define CDS_OS_INTERFACE CDS_OSI_UNIX
# define CDS_OS_TYPE CDS_OS_OSX
# define CDS_OS__NAME "OS X"
# define CDS_OS__NICK "osx"
# else
# define CDS_OS_INTERFACE CDS_OSI_UNIX
# define CDS_OS_TYPE CDS_OS_PTHREAD
# define CDS_OS__NAME "pthread"
# define CDS_OS__NICK "pthread"
# endif
#endif // #ifndef CDS_OS_TYPE
// Processor architecture
#if defined(__arm__) && !defined(__ARM_ARCH)
// GCC 4.6 does not defined __ARM_ARCH
# if defined(__ARM_ARCH_8A__) || defined(__ARM_ARCH_8S__) || defined(__aarch64__) || defined(__ARM_ARCH_ISA_A64)
# define __ARM_ARCH 8
# elif defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7S__)
# define __ARM_ARCH 7
# else
# define __ARM_ARCH 5
# endif
#endif
#if defined(__x86_64__) || defined(__amd64__) || defined(__amd64)
# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_AMD64
# define CDS_BUILD_BITS 64
# define CDS_PROCESSOR__NAME "Intel x86-64"
# define CDS_PROCESSOR__NICK "amd64"
#elif defined(__i386__)
# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_X86
# define CDS_BUILD_BITS 32
# define CDS_PROCESSOR__NAME "Intel x86"
# define CDS_PROCESSOR__NICK "x86"
#elif defined(sparc) || defined (__sparc__)
# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_SPARC
# define CDS_PROCESSOR__NAME "Sparc"
# define CDS_PROCESSOR__NICK "sparc"
# ifdef __arch64__
# define CDS_BUILD_BITS 64
# else
# error Sparc 32bit is not supported
# endif
#elif defined( __ia64__)
# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_IA64
# define CDS_BUILD_BITS 64
# define CDS_PROCESSOR__NAME "Intel IA64"
# define CDS_PROCESSOR__NICK "ia64"
#elif defined(_ARCH_PPC64)
# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_PPC64
# define CDS_BUILD_BITS 64
# define CDS_PROCESSOR__NAME "IBM PowerPC64"
# define CDS_PROCESSOR__NICK "ppc64"
#elif defined(__arm__) && __SIZEOF_POINTER__ == 4 && __ARM_ARCH >= 7 && __ARM_ARCH < 8
# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_ARM7
# define CDS_BUILD_BITS 32
# define CDS_PROCESSOR__NAME "ARM v7"
# define CDS_PROCESSOR__NICK "arm7"
#elif ( defined(__arm__) || defined(__aarch64__)) && __ARM_ARCH >= 8
# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_ARM8
# define CDS_BUILD_BITS 64
# define CDS_PROCESSOR__NAME "ARM v8"
# define CDS_PROCESSOR__NICK "arm8"
#elif defined(__arm__) || defined(__aarch64__)
# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_ARM8
# define CDS_PROCESSOR__NAME "ARM"
# define CDS_PROCESSOR__NICK "arm"
# if __SIZEOF_POINTER__ == 8
# define CDS_BUILD_BITS 64
# else
# define CDS_BUILD_BITS 32
# endif
#else
# if defined(CDS_USE_LIBCDS_ATOMIC)
# error "Libcds does not support atomic implementation for the processor architecture. Try to use C++11-compatible compiler and remove CDS_USE_LIBCDS_ATOMIC flag from compiler command line"
# else
# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_UNKNOWN
# define CDS_BUILD_BITS 32
# define CDS_PROCESSOR__NAME "unknown"
# define CDS_PROCESSOR__NICK "unknown"
# endif
#endif
#if CDS_OS_TYPE == CDS_OS_MINGW
# ifdef CDS_BUILD_LIB
# define CDS_EXPORT_API __declspec(dllexport)
# elif !defined(CDS_BUILD_STATIC_LIB)
# define CDS_EXPORT_API __declspec(dllimport)
# endif
#else
# ifndef __declspec
# define __declspec(_x)
# endif
#endif
// Byte order
#if !defined(CDS_ARCH_LITTLE_ENDIAN) && !defined(CDS_ARCH_BIG_ENDIAN)
# ifdef __BYTE_ORDER__
# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
# define CDS_ARCH_LITTLE_ENDIAN
# elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
# define CDS_ARCH_BIG_ENDIAN
# endif
# else
# warning "Undefined byte order for current architecture (no __BYTE_ORDER__ preprocessor definition)"
# endif
#endif
// Sanitizer attributes
// Example: CDS_DISABLE_SANITIZE( "function" )
#ifdef CDS_ADDRESS_SANITIZER_ENABLED
# define CDS_SUPPRESS_SANITIZE( ... ) __attribute__(( no_sanitize( __VA_ARGS__ )))
#else
# define CDS_SUPPRESS_SANITIZE( ... )
#endif
#endif // #ifndef CDSLIB_COMPILER_GCC_COMPILER_MACRO_H

107
extern/libcds/cds/compiler/gcc/defs.h vendored Normal file
View File

@ -0,0 +1,107 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_GCC_DEFS_H
#define CDSLIB_COMPILER_GCC_DEFS_H
// Compiler version
#define CDS_COMPILER_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
#if CDS_COMPILER_VERSION < 40800
# error "Compiler version error. GCC version 4.8.0 and above is supported"
#endif
// Compiler name
#ifdef __VERSION__
# define CDS_COMPILER__NAME ("GNU C++ " __VERSION__)
#else
# define CDS_COMPILER__NAME "GNU C++"
#endif
#define CDS_COMPILER__NICK "gcc"
#if __cplusplus < CDS_CPLUSPLUS_11
# error C++11 and above is required
#endif
#include <cds/compiler/gcc/compiler_macro.h>
#define alignof __alignof__
// ***************************************
// C++11 features
// C++11 thread_local keyword
#define CDS_CXX11_THREAD_LOCAL_SUPPORT
// *************************************************
// Features
// If you run under Thread Sanitizer, pass -DCDS_THREAD_SANITIZER_ENABLED in compiler command line
// UPD: Seems, GCC 5+ has predefined macro __SANITIZE_THREAD__, see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=64354
#if defined(__SANITIZE_THREAD__) && !defined(CDS_THREAD_SANITIZER_ENABLED)
# define CDS_THREAD_SANITIZER_ENABLED
#endif
// *************************************************
// Alignment macro
#define CDS_TYPE_ALIGNMENT(n) __attribute__ ((aligned (n)))
#define CDS_CLASS_ALIGNMENT(n) __attribute__ ((aligned (n)))
#define CDS_DATA_ALIGNMENT(n) __attribute__ ((aligned (n)))
// Attributes
#if CDS_COMPILER_VERSION >= 40900
# if __cplusplus == CDS_CPLUSPLUS_11 // C++11
# define CDS_DEPRECATED( reason ) [[gnu::deprecated(reason)]]
# else // C++14
# define CDS_DEPRECATED( reason ) [[deprecated(reason)]]
# endif
#else
// GCC 4.8
# define CDS_DEPRECATED( reason ) __attribute__((deprecated( reason )))
#endif
#define CDS_NORETURN __attribute__((__noreturn__))
// likely/unlikely
#define cds_likely( expr ) __builtin_expect( !!( expr ), 1 )
#define cds_unlikely( expr ) __builtin_expect( !!( expr ), 0 )
// Exceptions
#if defined( __EXCEPTIONS ) && __EXCEPTIONS == 1
# define CDS_EXCEPTION_ENABLED
#endif
// double-width CAS support
// note: gcc-4.8 does not support double-word atomics
// gcc-4.9: a lot of crashes when use DCAS
// gcc-7: 128-bit atomic is not lock-free, see https://gcc.gnu.org/ml/gcc/2017-01/msg00167.html
// You can manually suppress wide-atomic support by defining in compiler command line:
// for 64bit platform: -DCDS_DISABLE_128BIT_ATOMIC
// for 32bit platform: -DCDS_DISABLE_64BIT_ATOMIC
#if CDS_COMPILER_VERSION >= 50000
# if CDS_BUILD_BITS == 64
# if !defined( CDS_DISABLE_128BIT_ATOMIC ) && defined( __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16 ) && CDS_COMPILER_VERSION < 70000
# define CDS_DCAS_SUPPORT
# endif
# else
# if !defined( CDS_DISABLE_64BIT_ATOMIC ) && defined( __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 )
# define CDS_DCAS_SUPPORT
# endif
# endif
#endif
//if constexpr support (C++17)
#ifndef constexpr_if
# if defined( __cpp_if_constexpr ) && __cpp_if_constexpr >= 201606
# define constexpr_if if constexpr
# endif
#endif
#include <cds/compiler/gcc/compiler_barriers.h>
#endif // #ifndef CDSLIB_COMPILER_GCC_DEFS_H

View File

@ -0,0 +1,34 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_GCC_IA64_BACKOFF_H
#define CDSLIB_COMPILER_GCC_IA64_BACKOFF_H
//@cond none
namespace cds { namespace backoff {
namespace gcc { namespace ia64 {
# define CDS_backoff_hint_defined
static inline void backoff_hint()
{
asm volatile ( "hint @pause;;" );
}
# define CDS_backoff_nop_defined
static inline void backoff_nop()
{
asm volatile ( "nop;;" );
}
}} // namespace gcc::ia64
namespace platform {
using namespace gcc::ia64;
}
}} // namespace cds::backoff
//@endcond
#endif // #ifndef CDSLIB_COMPILER_GCC_IA64_BACKOFF_H

View File

@ -0,0 +1,65 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_GCC_IA64_BITOP_H
#define CDSLIB_COMPILER_GCC_IA64_BITOP_H
//@cond none
namespace cds {
namespace bitop { namespace platform { namespace gcc { namespace ia64 {
// MSB - return index (1..32) of most significant bit in x. If x == 0 return 0
# define cds_bitop_msb32_DEFINED
static inline int msb32( uint32_t nArg )
{
if ( !nArg )
return 0;
uint64_t x = nArg;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
uint64_t nRes;
asm __volatile__( "popcnt %0=%1\n\t" : "=r" (nRes) : "r" (x));
return (int) nRes;
}
// It is not compiled on HP-UX. Why?..
#if CDS_OS_TYPE != CDS_OS_HPUX
// MSB - return index (0..31) of most significant bit in nArg.
// !!! nArg != 0
# define cds_bitop_msb32nz_DEFINED
static inline int msb32nz( uint32_t nArg )
{
assert( nArg != 0 );
long double d = nArg;
long nExp;
asm __volatile__("getf.exp %0=%1\n\t" : "=r"(nExp) : "f"(d));
return (int) (nExp - 0xffff);
}
// MSB - return index (0..63) of most significant bit in nArg.
// !!! nArg != 0
# define cds_bitop_msb64nz_DEFINED
static inline int msb64nz( uint64_t nArg )
{
assert( nArg != 0 );
long double d = nArg;
long nExp;
asm __volatile__("getf.exp %0=%1\n\t" : "=r" (nExp) : "f" (d));
return (int) (nExp - 0xffff);
}
#endif // #if CDS_OS_TYPE != CDS_OS_HPUX
}} // namespace gcc::ia64
using namespace gcc::ia64;
}}} // namespace cds::bitop::platform
//@endcond
#endif // #ifndef CDSLIB_COMPILER_GCC_IA64_BITOP_H

View File

@ -0,0 +1,653 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_GCC_IA64_CXX11_ATOMIC_H
#define CDSLIB_COMPILER_GCC_IA64_CXX11_ATOMIC_H
/*
Source:
1. load/store: http://www.decadent.org.uk/pipermail/cpp-threads/2008-December/001932.html
2. Mapping to C++ Memory Model: http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
*/
#include <cstdint>
//@cond
namespace cds { namespace cxx11_atomic {
namespace platform { inline namespace gcc { inline namespace ia64 {
static inline void itanium_full_fence() noexcept
{
__asm__ __volatile__ ( "mf \n\t" ::: "memory" );
}
static inline void fence_before( memory_order order ) noexcept
{
switch(order) {
case memory_order_relaxed:
case memory_order_consume:
case memory_order_acquire:
break;
case memory_order_release:
case memory_order_acq_rel:
CDS_COMPILER_RW_BARRIER;
break;
case memory_order_seq_cst:
itanium_full_fence();
break;
}
}
static inline void fence_after( memory_order order ) noexcept
{
switch(order) {
case memory_order_acquire:
case memory_order_acq_rel:
CDS_COMPILER_RW_BARRIER;
break;
case memory_order_relaxed:
case memory_order_consume:
case memory_order_release:
break;
case memory_order_seq_cst:
itanium_full_fence();
break;
}
}
//-----------------------------------------------------------------------------
// fences
//-----------------------------------------------------------------------------
static inline void thread_fence(memory_order order) noexcept
{
switch(order)
{
case memory_order_relaxed:
case memory_order_consume:
break;
case memory_order_release:
case memory_order_acquire:
case memory_order_acq_rel:
CDS_COMPILER_RW_BARRIER;
break;
case memory_order_seq_cst:
itanium_full_fence();
break;
default:;
}
}
static inline void signal_fence(memory_order order) noexcept
{
// C++11: 29.8.8: only compiler optimization, no hardware instructions
switch(order)
{
case memory_order_relaxed:
break;
case memory_order_consume:
case memory_order_release:
case memory_order_acquire:
case memory_order_acq_rel:
case memory_order_seq_cst:
CDS_COMPILER_RW_BARRIER;
break;
default:;
}
}
#define CDS_ITANIUM_ATOMIC_LOAD( n_bytes, n_bits ) \
template <typename T> \
static inline T load##n_bits( T volatile const * pSrc, memory_order order ) noexcept \
{ \
static_assert( sizeof(T) == n_bytes, "Illegal size of operand" ) ; \
assert( order == memory_order_relaxed \
|| order == memory_order_consume \
|| order == memory_order_acquire \
|| order == memory_order_seq_cst \
) ; \
assert( pSrc ) ; \
T val ; \
__asm__ __volatile__ ( \
"ld" #n_bytes ".acq %[val] = [%[pSrc]] \n\t" \
: [val] "=r" (val) \
: [pSrc] "r" (pSrc) \
: "memory" \
) ; \
return val ; \
}
#define CDS_ITANIUM_ATOMIC_STORE( n_bytes, n_bits ) \
template <typename T> \
static inline void store##n_bits( T volatile * pDest, T val, memory_order order ) noexcept \
{ \
static_assert( sizeof(T) == n_bytes, "Illegal size of operand" ) ; \
assert( order == memory_order_relaxed \
|| order == memory_order_release \
|| order == memory_order_seq_cst \
) ; \
assert( pDest ) ; \
if ( order == memory_order_seq_cst ) { \
__asm__ __volatile__ ( \
"st" #n_bytes ".rel [%[pDest]] = %[val] \n\t" \
"mf \n\t" \
:: [pDest] "r" (pDest), [val] "r" (val) \
: "memory" \
) ; \
} \
else { \
__asm__ __volatile__ ( \
"st" #n_bytes ".rel [%[pDest]] = %[val] \n\t" \
:: [pDest] "r" (pDest), [val] "r" (val) \
: "memory" \
) ; \
fence_after(order) ; \
} \
}
#define CDS_ITANIUM_ATOMIC_CAS( n_bytes, n_bits ) \
template <typename T> \
static inline bool cas##n_bits##_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order /*mo_fail*/ ) noexcept \
{ \
static_assert( sizeof(T) == n_bytes, "Illegal size of operand" ) ; \
T current ; \
switch(mo_success) { \
case memory_order_relaxed: \
case memory_order_consume: \
case memory_order_acquire: \
__asm__ __volatile__ ( \
"mov ar.ccv = %[expected] ;;\n\t" \
"cmpxchg" #n_bytes ".acq %[current] = [%[pDest]], %[desired], ar.ccv\n\t" \
: [current] "=r" (current) \
: [pDest] "r" (pDest), [expected] "r" (expected), [desired] "r" (desired) \
: "ar.ccv", "memory" \
); \
break ; \
case memory_order_release: \
__asm__ __volatile__ ( \
"mov ar.ccv = %[expected] ;;\n\t" \
"cmpxchg" #n_bytes ".rel %[current] = [%[pDest]], %[desired], ar.ccv\n\t" \
: [current] "=r" (current) \
: [pDest] "r" (pDest), [expected] "r" (expected), [desired] "r" (desired) \
: "ar.ccv", "memory" \
); \
break ; \
case memory_order_acq_rel: \
case memory_order_seq_cst: \
__asm__ __volatile__ ( \
"mov ar.ccv = %[expected] ;;\n\t" \
"cmpxchg" #n_bytes ".rel %[current] = [%[pDest]], %[desired], ar.ccv\n\t" \
"mf \n\t" \
: [current] "=r" (current) \
: [pDest] "r" (pDest), [expected] "r" (expected), [desired] "r" (desired) \
: "ar.ccv", "memory" \
); \
break; \
default: \
assert(false); \
} \
bool bSuccess = expected == current ; \
expected = current ; \
return bSuccess ; \
} \
template <typename T> \
static inline bool cas##n_bits##_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept \
{ return cas##n_bits##_strong( pDest, expected, desired, mo_success, mo_fail ); }
// xchg is performed with acquire semantics
#define CDS_ITANIUM_ATOMIC_EXCHANGE( n_bytes, n_bits ) \
template <typename T> \
static inline T exchange##n_bits( T volatile * pDest, T val, memory_order order ) noexcept \
{ \
static_assert( sizeof(T) == n_bytes, "Illegal size of operand" ) ; \
assert( pDest ) ; \
T current ; \
switch(order) \
{ \
case memory_order_relaxed: \
case memory_order_consume: \
case memory_order_acquire: \
__asm__ __volatile__ ( \
"xchg" #n_bytes " %[current] = [%[pDest]], %[val]\n\t" \
: [current] "=r" (current) \
: [pDest] "r" (pDest), [val] "r" (val) \
: "memory" \
); \
break; \
case memory_order_acq_rel: \
case memory_order_release: \
case memory_order_seq_cst: \
__asm__ __volatile__ ( \
"mf \n\t" \
"xchg" #n_bytes " %[current] = [%[pDest]], %[val]\n\t" \
: [current] "=r" (current) \
: [pDest] "r" (pDest), [val] "r" (val) \
: "memory" \
); \
break; \
default: assert(false); \
} \
return current ; \
}
#define CDS_ITANIUM_ATOMIC_FETCH_ADD( n_bytes, n_add ) \
switch (order) { \
case memory_order_relaxed: \
case memory_order_consume: \
case memory_order_acquire: \
__asm__ __volatile__ ( \
"fetchadd" #n_bytes ".acq %[cur] = [%[pDest]], " #n_add " \n\t" \
: [cur] "=r" (cur) \
: [pDest] "r" (pDest) \
: "memory" \
); \
break ; \
case memory_order_release: \
__asm__ __volatile__ ( \
"fetchadd" #n_bytes ".rel %[cur] = [%[pDest]], " #n_add " \n\t" \
: [cur] "=r" (cur) \
: [pDest] "r" (pDest) \
: "memory" \
); \
break ; \
case memory_order_acq_rel: \
case memory_order_seq_cst: \
__asm__ __volatile__ ( \
"fetchadd" #n_bytes ".rel %[cur] = [%[pDest]], " #n_add " \n\t" \
"mf \n\t" \
: [cur] "=r" (cur) \
: [pDest] "r" (pDest) \
: "memory" \
); \
break ; \
default: \
assert(false); \
}
//-----------------------------------------------------------------------------
// 8bit primitives
//-----------------------------------------------------------------------------
CDS_ITANIUM_ATOMIC_LOAD( 1, 8 )
CDS_ITANIUM_ATOMIC_STORE( 1, 8 )
CDS_ITANIUM_ATOMIC_CAS( 1, 8 )
CDS_ITANIUM_ATOMIC_EXCHANGE( 1, 8 )
//-----------------------------------------------------------------------------
// 16bit primitives
//-----------------------------------------------------------------------------
CDS_ITANIUM_ATOMIC_LOAD( 2, 16 )
CDS_ITANIUM_ATOMIC_STORE( 2, 16 )
CDS_ITANIUM_ATOMIC_CAS( 2, 16 )
CDS_ITANIUM_ATOMIC_EXCHANGE( 2, 16 )
//-----------------------------------------------------------------------------
// 32bit primitives
//-----------------------------------------------------------------------------
CDS_ITANIUM_ATOMIC_LOAD( 4, 32 )
CDS_ITANIUM_ATOMIC_STORE( 4, 32 )
CDS_ITANIUM_ATOMIC_CAS( 4, 32 )
CDS_ITANIUM_ATOMIC_EXCHANGE( 4, 32 )
# define CDS_ATOMIC_fetch32_add_defined
template <typename T>
static inline T fetch32_add( T volatile * pDest, T val, memory_order order) noexcept
{
static_assert( sizeof(T) == 4, "Illegal size of operand" );
assert( pDest );
T cur;
switch ( val ) {
case 1:
CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, 1 );
break;
case 4:
CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, 4 );
break;
case 8:
CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, 8 );
break;
case 16:
CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, 16 );
break;
default:
cur = load32( pDest, memory_order_relaxed );
do {} while ( !cas32_strong( pDest, cur, cur + val, order, memory_order_relaxed ));
break;
}
return cur;
}
# define CDS_ATOMIC_fetch32_sub_defined
template <typename T>
static inline T fetch32_sub( T volatile * pDest, T val, memory_order order) noexcept
{
static_assert( sizeof(T) == 4, "Illegal size of operand" );
assert( pDest );
T cur;
switch ( val ) {
case 1:
CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, -1 );
break;
case 4:
CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, -4 );
break;
case 8:
CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, -8 );
break;
case 16:
CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, -16 );
break;
default:
cur = load32( pDest, memory_order_relaxed );
do {} while ( !cas32_strong( pDest, cur, cur - val, order, memory_order_relaxed ));
break;
}
return cur;
}
//-----------------------------------------------------------------------------
// 64bit primitives
//-----------------------------------------------------------------------------
CDS_ITANIUM_ATOMIC_LOAD( 8, 64 )
CDS_ITANIUM_ATOMIC_STORE( 8, 64 )
CDS_ITANIUM_ATOMIC_CAS( 8, 64 )
CDS_ITANIUM_ATOMIC_EXCHANGE( 8, 64 )
# define CDS_ATOMIC_fetch64_add_defined
template <typename T>
static inline T fetch64_add( T volatile * pDest, T val, memory_order order) noexcept
{
static_assert( sizeof(T) == 8, "Illegal size of operand" );
assert( pDest );
T cur;
switch ( val ) {
case 1:
CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 1 );
break;
case 4:
CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 4 );
break;
case 8:
CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 8 );
break;
case 16:
CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 16 );
break;
default:
cur = load64( pDest, memory_order_relaxed );
do {} while ( !cas64_strong( pDest, cur, cur + val, order, memory_order_relaxed ));
break;
}
return cur;
}
# define CDS_ATOMIC_fetch64_sub_defined
template <typename T>
static inline T fetch64_sub( T volatile * pDest, T val, memory_order order) noexcept
{
static_assert( sizeof(T) == 8, "Illegal size of operand" );
assert( pDest );
T cur;
switch ( val ) {
case 1:
CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -1 );
break;
case 4:
CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -4 );
break;
case 8:
CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -8 );
break;
case 16:
CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -16 );
break;
default:
cur = load64( pDest, memory_order_relaxed );
do {} while ( !cas64_strong( pDest, cur, cur - val, order, memory_order_relaxed ));
break;
}
return cur;
}
//-----------------------------------------------------------------------------
// pointer primitives
//-----------------------------------------------------------------------------
template <typename T>
static inline T * load_ptr( T * volatile const * pSrc, memory_order order ) noexcept
{
assert( order == memory_order_relaxed
|| order == memory_order_consume
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
assert( pSrc );
T * val;
__asm__ __volatile__ (
"ld8.acq %[val] = [%[pSrc]] \n\t"
: [val] "=r" (val)
: [pSrc] "r" (pSrc)
: "memory"
);
return val;
}
template <typename T>
static inline void store_ptr( T * volatile * pDest, T * val, memory_order order ) noexcept
{
assert( order == memory_order_relaxed
|| order == memory_order_release
|| order == memory_order_seq_cst
);
assert( pDest );
if ( order == memory_order_seq_cst ) {
__asm__ __volatile__ (
"st8.rel [%[pDest]] = %[val] \n\t"
"mf \n\t"
:: [pDest] "r" (pDest), [val] "r" (val)
: "memory"
);
}
else {
__asm__ __volatile__ (
"st8.rel [%[pDest]] = %[val] \n\t"
:: [pDest] "r" (pDest), [val] "r" (val)
: "memory"
);
fence_after(order);
}
}
template <typename T>
static inline bool cas_ptr_strong( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
static_assert( sizeof(T *) == 8, "Illegal size of operand" );
assert( pDest );
T * current;
switch(mo_success) {
case memory_order_relaxed:
case memory_order_consume:
case memory_order_acquire:
__asm__ __volatile__ (
"mov ar.ccv = %[expected] ;;\n\t"
"cmpxchg8.acq %[current] = [%[pDest]], %[desired], ar.ccv\n\t"
: [current] "=r" (current)
: [pDest] "r" (pDest), [expected] "r" (expected), [desired] "r" (desired)
: "ar.ccv", "memory"
);
break;
case memory_order_release:
__asm__ __volatile__ (
"mov ar.ccv = %[expected] ;;\n\t"
"cmpxchg8.rel %[current] = [%[pDest]], %[desired], ar.ccv\n\t"
: [current] "=r" (current)
: [pDest] "r" (pDest), [expected] "r" (expected), [desired] "r" (desired)
: "ar.ccv", "memory"
);
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
__asm__ __volatile__ (
"mov ar.ccv = %[expected] ;;\n\t"
"cmpxchg8.rel %[current] = [%[pDest]], %[desired], ar.ccv\n\t"
"mf \n\t"
: [current] "=r" (current)
: [pDest] "r" (pDest), [expected] "r" (expected), [desired] "r" (desired)
: "ar.ccv", "memory"
);
break;
default:
assert(false);
}
bool bSuccess = expected == current;
expected = current;
if ( !bSuccess )
fence_after( mo_fail );
return bSuccess;
}
template <typename T>
static inline bool cas_ptr_weak( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
return cas_ptr_strong( pDest, expected, desired, mo_success, mo_fail );
}
template <typename T>
static inline T * exchange_ptr( T * volatile * pDest, T * val, memory_order order ) noexcept
{
static_assert( sizeof(T *) == 8, "Illegal size of operand" );
assert( pDest );
T * current;
switch(order) {
case memory_order_relaxed:
case memory_order_consume:
case memory_order_acquire:
__asm__ __volatile__ (
"xchg8 %[current] = [%[pDest]], %[val]\n\t"
: [current] "=r" (current)
: [pDest] "r" (pDest), [val] "r" (val)
: "memory"
);
break;
case memory_order_acq_rel:
case memory_order_release:
case memory_order_seq_cst:
__asm__ __volatile__ (
"mf \n\t"
"xchg8 %[current] = [%[pDest]], %[val]\n\t"
: [current] "=r" (current)
: [pDest] "r" (pDest), [val] "r" (val)
: "memory"
);
break;
default: assert(false);
}
return current;
}
template <typename T> struct atomic_pointer_sizeof { enum { value = sizeof(T) }; };
template <> struct atomic_pointer_sizeof<void> { enum { value = 1 }; };
// It does not work properly
// atomic.fetch_add( ... ) returns nullptr, why?..
//# define CDS_ATOMIC_fetch_ptr_add_defined
template <typename T>
static inline T * fetch_ptr_add( T * volatile * pDest, ptrdiff_t val, memory_order order) noexcept
{
static_assert( sizeof(T *) == 8, "Illegal size of operand" );
assert( pDest );
T * cur;
val *= atomic_pointer_sizeof<T>::value;
switch ( val ) {
case 1:
CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 1 );
break;
case 4:
CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 4 );
break;
case 8:
CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 8 );
break;
case 16:
CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 16 );
break;
default:
cur = load_ptr( pDest, memory_order_relaxed );
do {} while ( !cas_ptr_strong( pDest, cur, reinterpret_cast<T *>(reinterpret_cast<uint8_t *>(cur) + val), order, memory_order_relaxed ));
break;
}
return cur;
}
// It does not work properly
// atomic.fetch_sub( ... ) returns nullptr, why?..
//# define CDS_ATOMIC_fetch_ptr_sub_defined
template <typename T>
static inline T * fetch_ptr_sub( T * volatile * pDest, ptrdiff_t val, memory_order order) noexcept
{
static_assert( sizeof(T *) == 8, "Illegal size of operand" );
assert( pDest );
T * cur;
val *= atomic_pointer_sizeof<T>::value;
switch ( val ) {
case 1:
CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -1 );
break;
case 4:
CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -4 );
break;
case 8:
CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -8 );
break;
case 16:
CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -16 );
break;
default:
cur = load_ptr( pDest, memory_order_relaxed );
do {} while ( !cas_ptr_strong( pDest, cur, reinterpret_cast<T *>(reinterpret_cast<uint8_t *>(cur) - val), order, memory_order_relaxed ));
break;
}
return cur;
}
//-----------------------------------------------------------------------------
// atomic flag primitives
//-----------------------------------------------------------------------------
typedef bool atomic_flag_type;
static inline bool atomic_flag_tas( atomic_flag_type volatile * pFlag, memory_order order ) noexcept
{
return exchange8( pFlag, true, order );
}
static inline void atomic_flag_clear( atomic_flag_type volatile * pFlag, memory_order order ) noexcept
{
store8( pFlag, false, order );
}
#undef CDS_ITANIUM_ATOMIC_LOAD
#undef CDS_ITANIUM_ATOMIC_STORE
#undef CDS_ITANIUM_ATOMIC_CAS
#undef CDS_ITANIUM_ATOMIC_EXCHANGE
#undef CDS_ITANIUM_ATOMIC_FETCH_ADD
}} // namespace gcc::ia64
} // namespace platform
}} // namespace cds::cxx11_atomic
//@endcond
#endif // #ifndef CDSLIB_COMPILER_GCC_IA64_CXX11_ATOMIC_H

View File

@ -0,0 +1,29 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_GCC_PPC64_BACKOFF_H
#define CDSLIB_COMPILER_GCC_PPC64_BACKOFF_H
//@cond none
namespace cds { namespace backoff {
namespace gcc { namespace ppc64 {
# define CDS_backoff_hint_defined
static inline void backoff_hint()
{
// Provide a hint that performance will probably be improved
// if shared resources dedicated to the executing processor are released for use by other processors
asm volatile( "or 27,27,27 # yield" );
}
}} // namespace gcc::ppc64
namespace platform {
using namespace gcc::ppc64;
}
}} // namespace cds::backoff
//@endcond
#endif // #ifndef CDSLIB_COMPILER_GCC_PPC64_BACKOFF_H

View File

@ -0,0 +1,20 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_GCC_PPC64_BITOP_H
#define CDSLIB_COMPILER_GCC_PPC64_BITOP_H
//@cond none
namespace cds {
namespace bitop { namespace platform { namespace gcc { namespace ppc64 {
}} // namespace gcc::ppc64
using namespace gcc::ppc64;
}}} // namespace cds::bitop::platform
//@endcond
#endif // #ifndef CDSLIB_COMPILER_GCC_PPC64_BITOP_H

View File

@ -0,0 +1,29 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_GCC_SPARC_BACKOFF_H
#define CDSLIB_COMPILER_GCC_SPARC_BACKOFF_H
//@cond none
namespace cds { namespace backoff {
namespace gcc { namespace Sparc {
# define CDS_backoff_nop_defined
static inline void backoff_nop()
{
asm volatile ( "nop;" );
}
}} // namespace gcc::Sparc
namespace platform {
using namespace gcc::Sparc;
}
}} // namespace cds::backoff
//@endcond
#endif // #ifndef CDSLIB_COMPILER_GCC_SPARC_BACKOFF_H

View File

@ -0,0 +1,45 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_GCC_SPARC_BITOP_H
#define CDSLIB_COMPILER_GCC_SPARC_BITOP_H
//@cond none
namespace cds {
namespace bitop { namespace platform { namespace gcc { namespace Sparc {
// MSB - return index (1..64) of most significant bit in nArg. If nArg == 0 return 0
// Source: UltraSPARC Architecture 2007
//
// Test result: this variant and its variation about 100 times slower then generic implementation :-(
static inline int sparc_msb64( uint64_t nArg )
{
uint64_t result;
asm volatile (
"neg %[nArg], %[result] \n\t"
"xnor %[nArg], %[result], %%g5 \n\t"
"popc %%g5, %[result] \n\t"
"movrz %[nArg], %%g0, %[result] \n\t"
: [result] "=r" (result)
: [nArg] "r" (nArg)
: "g5"
);
return result;
}
// MSB - return index (1..32) of most significant bit in nArg. If nArg == 0 return 0
static inline int sparc_msb32( uint32_t nArg )
{
return sparc_msb64( (uint64_t) nArg );
}
}} // namespace gcc::Sparc
using namespace gcc::Sparc;
}}} // namespace cds::bitop::platform
//@endcond
#endif // #ifndef CDSLIB_COMPILER_GCC_SPARC_BITOP_H

View File

@ -0,0 +1,610 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_GCC_SPARC_CXX11_ATOMIC_H
#define CDSLIB_COMPILER_GCC_SPARC_CXX11_ATOMIC_H
#include <cstdint>
/*
Source:
1. [Doug Lea "JSR-133 Cookbook for Compiler Writers]:
Acquire semantics: load; LoadLoad+LoadStore
Release semantics: LoadStore+StoreStore; store
2. boost::atomic library by Helge Bahman
3. OpenSparc source code
*/
#if CDS_OS_TYPE == CDS_OS_LINUX
# define CDS_SPARC_RMO_MEMORY_MODEL
#endif
#define CDS_SPARC_MB_FULL "membar #Sync \n\t"
#ifdef CDS_SPARC_RMO_MEMORY_MODEL
// RMO memory model (Linux only?..) Untested
# define CDS_SPARC_MB_LL_LS "membar #LoadLoad|#LoadStore \n\t"
# define CDS_SPARC_MB_LS_SS "membar #LoadStore|#StoreStore \n\t"
# define CDS_SPARC_MB_LL_LS_SS "membar #LoadLoad|#LoadStore|#StoreStore \n\t"
#else
// TSO memory model (default; Solaris uses this model)
# define CDS_SPARC_MB_LL_LS
# define CDS_SPARC_MB_LS_SS
# define CDS_SPARC_MB_LL_LS_SS
#endif
#define CDS_SPARC_MB_ACQ CDS_SPARC_MB_LL_LS
#define CDS_SPARC_MB_REL CDS_SPARC_MB_LS_SS
#define CDS_SPARC_MB_ACQ_REL CDS_SPARC_MB_LL_LS_SS
#define CDS_SPARC_MB_SEQ_CST CDS_SPARC_MB_FULL
//@cond
namespace cds { namespace cxx11_atomic {
namespace platform { inline namespace gcc { inline namespace Sparc {
static inline void fence_before( memory_order order ) noexcept
{
switch(order) {
case memory_order_relaxed:
case memory_order_acquire:
case memory_order_consume:
break;
case memory_order_release:
case memory_order_acq_rel:
__asm__ __volatile__ ( "" CDS_SPARC_MB_REL ::: "memory" );
break;
case memory_order_seq_cst:
__asm__ __volatile__ ( "" CDS_SPARC_MB_FULL ::: "memory" );
break;
}
}
static inline void fence_after( memory_order order ) noexcept
{
switch(order) {
case memory_order_relaxed:
case memory_order_consume:
case memory_order_release:
break;
case memory_order_acquire:
case memory_order_acq_rel:
__asm__ __volatile__ ( "" CDS_SPARC_MB_ACQ ::: "memory" );
break;
case memory_order_seq_cst:
__asm__ __volatile__ ( "" CDS_SPARC_MB_FULL ::: "memory" );
break;
}
}
//-----------------------------------------------------------------------------
// fences
//-----------------------------------------------------------------------------
static inline void thread_fence(memory_order order) noexcept
{
switch(order)
{
case memory_order_relaxed:
case memory_order_consume:
break;
case memory_order_acquire:
__asm__ __volatile__ ( "" CDS_SPARC_MB_ACQ ::: "memory" );
break;
case memory_order_release:
__asm__ __volatile__ ( "" CDS_SPARC_MB_REL ::: "memory" );
break;
case memory_order_acq_rel:
__asm__ __volatile__ ( "" CDS_SPARC_MB_ACQ_REL ::: "memory" );
break;
case memory_order_seq_cst:
__asm__ __volatile__ ( "" CDS_SPARC_MB_SEQ_CST ::: "memory" );
break;
default:;
}
}
static inline void signal_fence(memory_order order) noexcept
{
// C++11: 29.8.8: only compiler optimization, no hardware instructions
switch(order)
{
case memory_order_relaxed:
break;
case memory_order_consume:
case memory_order_release:
case memory_order_acquire:
case memory_order_acq_rel:
case memory_order_seq_cst:
CDS_COMPILER_RW_BARRIER;
break;
default:;
}
}
//-----------------------------------------------------------------------------
// atomic flag primitives
//-----------------------------------------------------------------------------
typedef unsigned char atomic_flag_type;
static inline bool atomic_flag_tas( atomic_flag_type volatile * pFlag, memory_order order ) noexcept
{
atomic_flag_type fCur;
fence_before( order );
__asm__ __volatile__(
"ldstub [%[pFlag]], %[fCur] \n\t"
: [fCur] "=r"(fCur)
: [pFlag] "r"(pFlag)
: "memory", "cc"
);
fence_after( order );
return fCur != 0;
}
static inline void atomic_flag_clear( atomic_flag_type volatile * pFlag, memory_order order ) noexcept
{
fence_before( order );
__asm__ __volatile__(
CDS_SPARC_MB_REL
"stub %%g0, [%[pFlag]] \n\t"
:: [pFlag] "r"(pFlag)
: "memory"
);
fence_after( order );
}
//-----------------------------------------------------------------------------
// 32bit primitives
//-----------------------------------------------------------------------------
template <typename T>
static inline void store32( T volatile * pDest, T src, memory_order order ) noexcept
{
static_assert( sizeof(T) == 4, "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_release
|| order == memory_order_seq_cst
);
assert( pDest );
fence_before(order);
*pDest = src;
fence_after(order);
}
template <typename T>
static inline T load32( T volatile const * pSrc, memory_order order ) noexcept
{
static_assert( sizeof(T) == 4, "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_consume
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
assert( pSrc );
fence_before(order);
T v = *pSrc;
fence_after(order);
return v;
}
template <typename T>
static inline bool cas32_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
static_assert( sizeof(T) == 4, "Illegal size of operand" );
assert( pDest );
fence_before( mo_success );
__asm__ __volatile__(
"cas [%[pDest]], %[expected], %[desired]"
: [desired] "+r" (desired)
: [pDest] "r" (pDest), [expected] "r" (expected)
: "memory"
);
// desired contains current value
bool bSuccess = desired == expected;
if ( bSuccess )
fence_after( mo_success );
else {
fence_after(mo_fail);
expected = desired;
}
return bSuccess;
}
template <typename T>
static inline bool cas32_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
return cas32_strong( pDest, expected, desired, mo_success, mo_fail );
}
template <typename T>
static inline T exchange32( T volatile * pDest, T v, memory_order order ) noexcept
{
static_assert( sizeof(T) == 4, "Illegal size of operand" );
assert( pDest );
// This primitive could be implemented via "swap" instruction but "swap" is deprecated in UltraSparc
T cur = load32( pDest, memory_order_relaxed );
do {} while ( !cas32_strong( pDest, cur, v, order, memory_order_relaxed ));
return cur;
}
//-----------------------------------------------------------------------------
// 64bit primitives
//-----------------------------------------------------------------------------
template <typename T>
static inline T load64( T volatile const * pSrc, memory_order order ) noexcept
{
static_assert( sizeof(T) == 8, "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_consume
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
assert( pSrc );
fence_before(order);
T v = *pSrc;
fence_after(order);
return v;
}
template <typename T>
static inline void store64( T volatile * pDest, T val, memory_order order ) noexcept
{
static_assert( sizeof(T) == 8, "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_release
|| order == memory_order_seq_cst
);
assert( pDest );
fence_before(order);
*pDest = val;
fence_after(order);
}
template <typename T>
static inline bool cas64_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
static_assert( sizeof(T) == 8, "Illegal size of operand" );
assert( pDest );
fence_before( mo_success );
__asm__ __volatile__(
"casx [%[pDest]], %[expected], %[desired]"
: [desired] "+r" (desired)
: [pDest] "r" (pDest), [expected] "r" (expected)
: "memory"
);
// desired contains current value
bool bSuccess = desired == expected;
if ( bSuccess ) {
fence_after( mo_success );
}
else {
fence_after(mo_fail);
expected = desired;
}
return bSuccess;
}
template <typename T>
static inline bool cas64_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
return cas64_strong( pDest, expected, desired, mo_success, mo_fail );
}
template <typename T>
static inline T exchange64( T volatile * pDest, T v, memory_order order ) noexcept
{
static_assert( sizeof(T) == 8, "Illegal size of operand" );
assert( pDest );
T cur = load64( pDest, memory_order_relaxed );
do {} while ( !cas64_strong( pDest, cur, v, order, memory_order_relaxed ));
return cur;
}
//-----------------------------------------------------------------------------
// 8bit primitives
//-----------------------------------------------------------------------------
template <typename T>
static inline void store8( T volatile * pDest, T src, memory_order order ) noexcept
{
static_assert( sizeof(T) == 1, "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_release
|| order == memory_order_seq_cst
);
assert( pDest );
fence_before( order );
*pDest = src;
fence_after( order );
}
template <typename T>
static inline T load8( T volatile const * pSrc, memory_order order ) noexcept
{
static_assert( sizeof(T) == 1, "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_consume
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
assert( pSrc );
fence_before( order );
T v = *pSrc;
fence_after( order );
return v;
}
template <typename T>
static inline bool cas8_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
static_assert( sizeof(T) == 1, "Illegal size of operand" );
assert( pDest );
union u32 {
uint32_t w;
T c[4];
};
static_assert( sizeof(u32) == sizeof(uint32_t), "Argument size error" );
u32 volatile * pDest32 = (u32 *)( uintptr_t( pDest ) & ~0x03 );
size_t const nCharIdx = (size_t)( uintptr_t( pDest ) & 0x03 );
u32 uExpected;
u32 uDesired;
bool bSuccess;
for (;;) {
uExpected.w =
uDesired.w = pDest32->w;
uExpected.c[nCharIdx] = expected;
uDesired.c[nCharIdx] = desired;
bSuccess = cas32_weak( reinterpret_cast<uint32_t volatile *>(pDest32), uExpected.w, uDesired.w, mo_success, mo_fail );
if ( bSuccess || uExpected.c[nCharIdx] != expected )
break;
}
expected = uExpected.c[nCharIdx];
return bSuccess;
}
template <typename T>
static inline bool cas8_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
static_assert( sizeof(T) == 1, "Illegal size of operand" );
assert( pDest );
union u32 {
uint32_t w;
T c[4];
};
static_assert( sizeof(u32) == sizeof(uint32_t), "Argument size error" );
u32 volatile * pDest32 = (u32 *)( uintptr_t( pDest ) & ~0x03 );
size_t const nCharIdx = (size_t)( uintptr_t( pDest ) & 0x03 );
u32 uExpected;
u32 uDesired;
uExpected.w =
uDesired.w = pDest32->w;
uExpected.c[nCharIdx] = expected;
uDesired.c[nCharIdx] = desired;
bool bSuccess = cas32_weak( reinterpret_cast<uint32_t volatile *>(pDest32), uExpected.w, uDesired.w, mo_success, mo_fail );
expected = uExpected.c[nCharIdx];
return bSuccess;
}
template <typename T>
static inline T exchange8( T volatile * pDest, T v, memory_order order ) noexcept
{
static_assert( sizeof(T) == 1, "Illegal size of operand" );
assert( pDest );
T cur = load8( pDest, memory_order_relaxed );
do {} while ( !cas8_strong( pDest, cur, v, order, memory_order_relaxed ));
return cur;
}
//-----------------------------------------------------------------------------
// 16bit primitives
//-----------------------------------------------------------------------------
template <typename T>
static inline T load16( T volatile const * pSrc, memory_order order ) noexcept
{
static_assert( sizeof(T) == 2, "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_consume
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
assert( pSrc );
fence_before( order );
T v = *pSrc;
fence_after( order );
return v;
}
template <typename T>
static inline void store16( T volatile * pDest, T src, memory_order order ) noexcept
{
static_assert( sizeof(T) == 2, "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_release
|| order == memory_order_seq_cst
);
assert( pDest );
fence_before(order);
*pDest = src;
fence_after(order);
}
template <typename T>
static inline bool cas16_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
static_assert( sizeof(T) == 2, "Illegal size of operand" );
assert( pDest );
union u32 {
uint32_t w;
T c[2];
};
static_assert( sizeof(u32) == sizeof(uint32_t), "Argument size error" );
u32 volatile * pDest32 = (u32 *)( uintptr_t( pDest ) & ~0x03 );
size_t const nIdx = (size_t)( (uintptr_t( pDest ) >> 1) & 0x01 );
u32 uExpected;
u32 uDesired;
bool bSuccess;
for (;;) {
uExpected.w =
uDesired.w = pDest32->w;
uExpected.c[nIdx] = expected;
uDesired.c[nIdx] = desired;
bSuccess = cas32_weak( reinterpret_cast<uint32_t volatile *>(pDest32), uExpected.w, uDesired.w, mo_success, mo_fail );
if ( bSuccess || uExpected.c[nIdx] != expected )
break;
}
expected = uExpected.c[nIdx];
return bSuccess;
}
template <typename T>
static inline bool cas16_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
static_assert( sizeof(T) == 2, "Illegal size of operand" );
assert( pDest );
union u32 {
uint32_t w;
T c[2];
};
static_assert( sizeof(u32) == sizeof(uint32_t), "Argument size error" );
u32 volatile * pDest32 = (u32 *)( uintptr_t( pDest ) & ~0x03 );
size_t const nIdx = (size_t)( (uintptr_t( pDest ) >> 1) & 0x01 );
u32 uExpected;
u32 uDesired;
uExpected.w =
uDesired.w = pDest32->w;
uExpected.c[nIdx] = expected;
uDesired.c[nIdx] = desired;
bool bSuccess = cas32_weak( reinterpret_cast<uint32_t volatile *>(pDest32), uExpected.w, uDesired.w, mo_success, mo_fail );
expected = uExpected.c[nIdx];
return bSuccess;
}
template <typename T>
static inline T exchange16( T volatile * pDest, T v, memory_order order ) noexcept
{
static_assert( sizeof(T) == 2, "Illegal size of operand" );
assert( pDest );
T cur = load16( pDest, memory_order_relaxed );
do {} while ( !cas16_strong( pDest, cur, v, order, memory_order_relaxed ));
return cur;
}
//-----------------------------------------------------------------------------
// pointer primitives
//-----------------------------------------------------------------------------
template <typename T>
static inline void store_ptr( T * volatile * pDest, T * src, memory_order order ) noexcept
{
static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_release
|| order == memory_order_seq_cst
);
assert( pDest );
fence_before(order);
*pDest = src;
fence_after(order);
}
template <typename T>
static inline T * load_ptr( T * volatile const * pSrc, memory_order order ) noexcept
{
static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_consume
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
assert( pSrc );
fence_before( order );
T * v = *pSrc;
fence_after( order );
return v;
}
template <typename T>
static inline bool cas_ptr_strong( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" );
return cas64_strong( (uint64_t volatile *) pDest, *reinterpret_cast<uint64_t *>( &expected ), (uint64_t) desired, mo_success, mo_fail );
}
template <typename T>
static inline bool cas_ptr_weak( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
return cas_ptr_strong( pDest, expected, desired, mo_success, mo_fail );
}
template <typename T>
static inline T * exchange_ptr( T * volatile * pDest, T * v, memory_order order ) noexcept
{
static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" );
return (T *) exchange64( (uint64_t volatile *) pDest, (uint64_t) v, order );
}
}} // namespace gcc::Sparc
} // namespace platform
}} // namespace cds::cxx11_atomic
//@endcond
#undef CDS_SPARC_MB_ACQ
#undef CDS_SPARC_MB_REL
#undef CDS_SPARC_MB_SEQ_CST
#undef CDS_SPARC_MB_FULL
#undef CDS_SPARC_MB_LL_LS
#undef CDS_SPARC_MB_LS_SS
#undef CDS_SPARC_MB_LL_LS_SS
#endif // #ifndef CDSLIB_COMPILER_GCC_AMD64_CXX11_ATOMIC_H

View File

@ -0,0 +1,35 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_GCC_X86_BACKOFF_H
#define CDSLIB_COMPILER_GCC_X86_BACKOFF_H
//@cond none
namespace cds { namespace backoff {
namespace gcc { namespace x86 {
# define CDS_backoff_nop_defined
static inline void backoff_nop()
{
asm volatile ( "nop;" );
}
# define CDS_backoff_hint_defined
static inline void backoff_hint()
{
asm volatile ( "pause;" );
}
}} // namespace gcc::x86
namespace platform {
using namespace gcc::x86;
}
}} // namespace cds::backoff
//@endcond
#endif // #ifndef CDSLIB_COMPILER_GCC_X86_BACKOFF_H

View File

@ -0,0 +1,89 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_GCC_X86_BITOP_H
#define CDSLIB_COMPILER_GCC_X86_BITOP_H
//@cond none
namespace cds {
namespace bitop { namespace platform { namespace gcc { namespace x86 {
// MSB - return index (1..32) of most significant bit in nArg. If nArg == 0 return 0
# define cds_bitop_msb32_DEFINED
static inline int msb32( uint32_t nArg )
{
int nRet;
__asm__ __volatile__ (
"bsrl %[nArg], %[nRet] ;\n\t"
"jnz 1f ;\n\t"
"xorl %[nRet], %[nRet] ;\n\t"
"subl $1, %[nRet] ;\n\t"
"1:"
"addl $1, %[nRet] ;\n\t"
: [nRet] "=a" (nRet)
: [nArg] "r" (nArg)
: "cc"
);
return nRet;
}
# define cds_bitop_msb32nz_DEFINED
static inline int msb32nz( uint32_t nArg )
{
assert( nArg != 0 );
int nRet;
__asm__ __volatile__ (
"bsrl %[nArg], %[nRet] ;"
: [nRet] "=a" (nRet)
: [nArg] "r" (nArg)
: "cc"
);
return nRet;
}
// LSB - return index (0..31) of least significant bit in nArg. If nArg == 0 return -1U
# define cds_bitop_lsb32_DEFINED
static inline int lsb32( uint32_t nArg )
{
int nRet;
__asm__ __volatile__ (
"bsfl %[nArg], %[nRet] ;"
"jnz 1f ;"
"xorl %[nRet], %[nRet] ;"
"subl $1, %[nRet] ;"
"1:"
"addl $1, %[nRet] ;"
: [nRet] "=a" (nRet)
: [nArg] "r" (nArg)
: "cc"
);
return nRet;
}
// LSB - return index (0..31) of least significant bit in nArg.
// Condition: nArg != 0
# define cds_bitop_lsb32nz_DEFINED
static inline int lsb32nz( uint32_t nArg )
{
assert( nArg != 0 );
int nRet;
__asm__ __volatile__ (
"bsfl %[nArg], %[nRet] ;"
: [nRet] "=a" (nRet)
: [nArg] "r" (nArg)
: "cc"
);
return nRet;
}
}} // namespace gcc::x86
using namespace gcc::x86;
}}} // namespace cds::bitop::platform
//@endcond
#endif // #ifndef CDSLIB_ARH_X86_GCC_BITOP_H

View File

@ -0,0 +1,185 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_GCC_X86_CXX11_ATOMIC_H
#define CDSLIB_COMPILER_GCC_X86_CXX11_ATOMIC_H
#include <cstdint>
#include <cds/compiler/gcc/x86/cxx11_atomic32.h>
//@cond
namespace cds { namespace cxx11_atomic {
namespace platform { inline namespace gcc { inline namespace x86 {
//-----------------------------------------------------------------------------
// 64bit primitives
//-----------------------------------------------------------------------------
template <typename T>
static inline bool cas64_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
static_assert( sizeof(T) == 8, "Illegal size of operand" );
assert( cds::details::is_aligned( pDest, 8 ));
uint32_t ebxStore;
T prev = expected;
fence_before(mo_success);
// We must save EBX in PIC mode
__asm__ __volatile__ (
"movl %%ebx, %[ebxStore]\n"
"movl %[desiredLo], %%ebx\n"
"lock; cmpxchg8b 0(%[pDest])\n"
"movl %[ebxStore], %%ebx\n"
: [prev] "=A" (prev), [ebxStore] "=m" (ebxStore)
: [desiredLo] "D" ((int)desired), [desiredHi] "c" ((int)(desired >> 32)), [pDest] "S" (pDest), "0" (prev)
: "memory");
bool success = (prev == expected);
if (success)
fence_after(mo_success);
else {
fence_after(mo_fail);
expected = prev;
}
return success;
}
template <typename T>
static inline bool cas64_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
return cas64_strong( pDest, expected, desired, mo_success, mo_fail );
}
template <typename T>
static inline T load64( T volatile const * pSrc, memory_order order ) noexcept
{
static_assert( sizeof(T) == 8, "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_consume
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
assert( pSrc );
assert( cds::details::is_aligned( pSrc, 8 ));
CDS_UNUSED( order );
T CDS_DATA_ALIGNMENT(8) v;
__asm__ __volatile__(
"movq (%[pSrc]), %[v] ; \n\t"
: [v] "=x" (v)
: [pSrc] "r" (pSrc)
:
);
return v;
}
template <typename T>
static inline T exchange64( T volatile * pDest, T v, memory_order order ) noexcept
{
static_assert( sizeof(T) == 8, "Illegal size of operand" );
assert( cds::details::is_aligned( pDest, 8 ));
T cur = load64( pDest, memory_order_relaxed );
do {
} while (!cas64_weak( pDest, cur, v, order, memory_order_relaxed ));
return cur;
}
template <typename T>
static inline void store64( T volatile * pDest, T val, memory_order order ) noexcept
{
static_assert( sizeof(T) == 8, "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_release
|| order == memory_order_seq_cst
);
assert( pDest );
assert( cds::details::is_aligned( pDest, 8 ));
if ( order != memory_order_seq_cst ) {
fence_before( order );
// Atomically stores 64bit value by SSE instruction
__asm__ __volatile__(
"movq %[val], (%[pDest]) ; \n\t"
:
: [val] "x" (val), [pDest] "r" (pDest)
: "memory"
);
}
else {
exchange64( pDest, val, order );
}
}
//-----------------------------------------------------------------------------
// pointer primitives
//-----------------------------------------------------------------------------
template <typename T>
static inline T * exchange_ptr( T * volatile * pDest, T * v, memory_order order ) noexcept
{
static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" );
return (T *) exchange32( (uint32_t volatile *) pDest, (uint32_t) v, order );
}
template <typename T>
static inline void store_ptr( T * volatile * pDest, T * src, memory_order order ) noexcept
{
static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_release
|| order == memory_order_seq_cst
);
assert( pDest );
if ( order != memory_order_seq_cst ) {
fence_before( order );
*pDest = src;
}
else {
exchange_ptr( pDest, src, order );
}
}
template <typename T>
static inline T * load_ptr( T * volatile const * pSrc, memory_order order ) noexcept
{
static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_consume
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
assert( pSrc );
T * v = *pSrc;
fence_after_load( order );
return v;
}
template <typename T>
static inline bool cas_ptr_strong( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" );
return cas32_strong( (uint32_t volatile *) pDest, *reinterpret_cast<uint32_t *>( &expected ), (uint32_t) desired, mo_success, mo_fail );
}
template <typename T>
static inline bool cas_ptr_weak( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
return cas_ptr_strong( pDest, expected, desired, mo_success, mo_fail );
}
}} // namespace gcc::x86
} // namespace platform
}} // namespace cds::cxx11_atomic
//@endcond
#endif // #ifndef CDSLIB_COMPILER_GCC_X86_CXX11_ATOMIC_H

View File

@ -0,0 +1,477 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_GCC_X86_CXX11_ATOMIC32_H
#define CDSLIB_COMPILER_GCC_X86_CXX11_ATOMIC32_H
#include <cstdint>
#include <cds/details/is_aligned.h>
//@cond
namespace cds { namespace cxx11_atomic {
namespace platform { inline namespace gcc { inline namespace x86 {
static inline void fence_before( memory_order order ) noexcept
{
switch(order) {
case memory_order_relaxed:
case memory_order_acquire:
case memory_order_consume:
break;
case memory_order_release:
case memory_order_acq_rel:
CDS_COMPILER_RW_BARRIER;
break;
case memory_order_seq_cst:
CDS_COMPILER_RW_BARRIER;
break;
}
}
static inline void fence_after( memory_order order ) noexcept
{
switch(order) {
case memory_order_acquire:
case memory_order_acq_rel:
CDS_COMPILER_RW_BARRIER;
break;
case memory_order_relaxed:
case memory_order_consume:
case memory_order_release:
break;
case memory_order_seq_cst:
CDS_COMPILER_RW_BARRIER;
break;
}
}
static inline void fence_after_load(memory_order order) noexcept
{
switch(order) {
case memory_order_relaxed:
case memory_order_release:
break;
case memory_order_acquire:
case memory_order_acq_rel:
CDS_COMPILER_RW_BARRIER;
break;
case memory_order_consume:
break;
case memory_order_seq_cst:
__asm__ __volatile__ ( "mfence" ::: "memory" );
break;
default:;
}
}
//-----------------------------------------------------------------------------
// fences
//-----------------------------------------------------------------------------
static inline void thread_fence(memory_order order) noexcept
{
switch(order)
{
case memory_order_relaxed:
case memory_order_consume:
break;
case memory_order_release:
case memory_order_acquire:
case memory_order_acq_rel:
CDS_COMPILER_RW_BARRIER;
break;
case memory_order_seq_cst:
__asm__ __volatile__ ( "mfence" ::: "memory" );
break;
default:;
}
}
static inline void signal_fence(memory_order order) noexcept
{
// C++11: 29.8.8: only compiler optimization, no hardware instructions
switch(order)
{
case memory_order_relaxed:
break;
case memory_order_consume:
case memory_order_release:
case memory_order_acquire:
case memory_order_acq_rel:
case memory_order_seq_cst:
CDS_COMPILER_RW_BARRIER;
break;
default:;
}
}
//-----------------------------------------------------------------------------
// 8bit primitives
//-----------------------------------------------------------------------------
template <typename T>
static inline bool cas8_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
static_assert( sizeof(T) == 1, "Illegal size of operand" );
T prev = expected;
fence_before(mo_success);
__asm__ __volatile__ (
"lock ; cmpxchgb %[desired], %[pDest]"
: [prev] "+a" (prev), [pDest] "+m" (*pDest)
: [desired] "q" (desired)
);
bool success = (prev == expected);
expected = prev;
if (success)
fence_after(mo_success);
else
fence_after(mo_fail);
return success;
}
template <typename T>
static inline bool cas8_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
return cas8_strong( pDest, expected, desired, mo_success, mo_fail );
}
template <typename T>
static inline T exchange8( T volatile * pDest, T v, memory_order order ) noexcept
{
static_assert( sizeof(T) == 1, "Illegal size of operand" );
fence_before(order);
__asm__ __volatile__ (
"xchgb %[v], %[pDest]"
: [v] "+q" (v), [pDest] "+m" (*pDest)
);
fence_after(order);
return v;
}
template <typename T>
static inline void store8( T volatile * pDest, T src, memory_order order ) noexcept
{
static_assert( sizeof(T) == 1, "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_release
|| order == memory_order_seq_cst
);
assert( pDest != NULL );
if ( order != memory_order_seq_cst ) {
fence_before( order );
*pDest = src;
}
else {
exchange8( pDest, src, order );
}
}
template <typename T>
static inline T load8( T volatile const * pSrc, memory_order order ) noexcept
{
static_assert( sizeof(T) == 1, "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_consume
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
assert( pSrc != NULL );
T v = *pSrc;
fence_after_load( order );
return v;
}
# define CDS_ATOMIC_fetch8_add_defined
template <typename T>
static inline T fetch8_add( T volatile * pDest, T val, memory_order order ) noexcept
{
fence_before(order);
__asm__ __volatile__ (
"lock ; xaddb %[val], %[pDest]"
: [val] "+q" (val), [pDest] "+m" (*pDest)
);
fence_after(order);
return val;
}
# define CDS_ATOMIC_fetch8_sub_defined
template <typename T>
static inline T fetch8_sub( T volatile * pDest, T val, memory_order order ) noexcept
{
fence_before(order);
__asm__ __volatile__ (
"negb %[val] ; \n"
"lock ; xaddb %[val], %[pDest]"
: [val] "+q" (val), [pDest] "+m" (*pDest)
);
fence_after(order);
return val;
}
//-----------------------------------------------------------------------------
// atomic flag primitives
//-----------------------------------------------------------------------------
typedef bool atomic_flag_type;
static inline bool atomic_flag_tas( atomic_flag_type volatile * pFlag, memory_order order ) noexcept
{
return exchange8( pFlag, true, order );
}
static inline void atomic_flag_clear( atomic_flag_type volatile * pFlag, memory_order order ) noexcept
{
store8( pFlag, false, order );
}
//-----------------------------------------------------------------------------
// 16bit primitives
//-----------------------------------------------------------------------------
template <typename T>
static inline T exchange16( T volatile * pDest, T v, memory_order order ) noexcept
{
static_assert( sizeof(T) == 2, "Illegal size of operand" );
assert( cds::details::is_aligned( pDest, 2 ));
fence_before(order);
__asm__ __volatile__ (
"xchgw %[v], %[pDest]"
: [v] "+q" (v), [pDest] "+m" (*pDest)
);
fence_after(order);
return v;
}
template <typename T>
static inline void store16( T volatile * pDest, T src, memory_order order ) noexcept
{
static_assert( sizeof(T) == 2, "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_release
|| order == memory_order_seq_cst
);
assert( pDest != NULL );
assert( cds::details::is_aligned( pDest, 2 ));
if ( order != memory_order_seq_cst ) {
fence_before( order );
*pDest = src;
}
else {
exchange16( pDest, src, order );
}
}
template <typename T>
static inline T load16( T volatile const * pSrc, memory_order order ) noexcept
{
static_assert( sizeof(T) == 2, "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_consume
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
assert( pSrc != NULL );
assert( cds::details::is_aligned( pSrc, 2 ));
T v = *pSrc;
fence_after_load( order );
return v;
}
template <typename T>
static inline bool cas16_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
static_assert( sizeof(T) == 2, "Illegal size of operand" );
assert( cds::details::is_aligned( pDest, 2 ));
T prev = expected;
fence_before(mo_success);
__asm__ __volatile__ (
"lock ; cmpxchgw %[desired], %[pDest]"
: [prev] "+a" (prev), [pDest] "+m" (*pDest)
: [desired] "q" (desired)
);
bool success = prev == expected;
if (success)
fence_after(mo_success);
else {
fence_after(mo_fail);
expected = prev;
}
return success;
}
template <typename T>
static inline bool cas16_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
return cas16_strong( pDest, expected, desired, mo_success, mo_fail );
}
# define CDS_ATOMIC_fetch16_add_defined
template <typename T>
static inline T fetch16_add( T volatile * pDest, T val, memory_order order ) noexcept
{
static_assert( sizeof(T) == 2, "Illegal size of operand" );
assert( cds::details::is_aligned( pDest, 2 ));
fence_before(order);
__asm__ __volatile__ (
"lock ; xaddw %[val], %[pDest]"
: [val] "+q" (val), [pDest] "+m" (*pDest)
);
fence_after(order);
return val;
}
# define CDS_ATOMIC_fetch16_sub_defined
template <typename T>
static inline T fetch16_sub( T volatile * pDest, T val, memory_order order ) noexcept
{
static_assert( sizeof(T) == 2, "Illegal size of operand" );
assert( cds::details::is_aligned( pDest, 2 ));
fence_before(order);
__asm__ __volatile__ (
"negw %[val] ; \n"
"lock ; xaddw %[val], %[pDest]"
: [val] "+q" (val), [pDest] "+m" (*pDest)
);
fence_after(order);
return val;
}
//-----------------------------------------------------------------------------
// 32bit primitives
//-----------------------------------------------------------------------------
template <typename T>
static inline T exchange32( T volatile * pDest, T v, memory_order order ) noexcept
{
static_assert( sizeof(T) == 4, "Illegal size of operand" );
assert( cds::details::is_aligned( pDest, 4 ));
fence_before(order);
__asm__ __volatile__ (
"xchgl %[v], %[pDest]"
: [v] "+r" (v), [pDest] "+m" (*pDest)
);
fence_after(order);
return v;
}
template <typename T>
static inline void store32( T volatile * pDest, T src, memory_order order ) noexcept
{
static_assert( sizeof(T) == 4, "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_release
|| order == memory_order_seq_cst
);
assert( pDest != NULL );
assert( cds::details::is_aligned( pDest, 4 ));
if ( order != memory_order_seq_cst ) {
fence_before( order );
*pDest = src;
}
else {
exchange32( pDest, src, order );
}
}
template <typename T>
static inline T load32( T volatile const * pSrc, memory_order order ) noexcept
{
static_assert( sizeof(T) == 4, "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_consume
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
assert( pSrc != NULL );
assert( cds::details::is_aligned( pSrc, 4 ));
T v = *pSrc;
fence_after_load( order );
return v;
}
template <typename T>
static inline bool cas32_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
static_assert( sizeof(T) == 4, "Illegal size of operand" );
assert( cds::details::is_aligned( pDest, 4 ));
T prev = expected;
fence_before(mo_success);
__asm__ __volatile__ (
"lock ; cmpxchgl %[desired], %[pDest]"
: [prev] "+a" (prev), [pDest] "+m" (*pDest)
: [desired] "r" (desired)
);
bool success = prev == expected;
if (success)
fence_after(mo_success);
else {
fence_after(mo_fail);
expected = prev;
}
return success;
}
template <typename T>
static inline bool cas32_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
return cas32_strong( pDest, expected, desired, mo_success, mo_fail );
}
// fetch_xxx may be emulated via cas32
// If the platform has special fetch_xxx instruction
// then it should define CDS_ATOMIC_fetch32_xxx_defined macro
# define CDS_ATOMIC_fetch32_add_defined
template <typename T>
static inline T fetch32_add( T volatile * pDest, T v, memory_order order) noexcept
{
static_assert( sizeof(T) == 4, "Illegal size of operand" );
assert( cds::details::is_aligned( pDest, 4 ));
fence_before(order);
__asm__ __volatile__ (
"lock ; xaddl %[v], %[pDest]"
: [v] "+r" (v), [pDest] "+m" (*pDest)
);
fence_after(order);
return v;
}
# define CDS_ATOMIC_fetch32_sub_defined
template <typename T>
static inline T fetch32_sub( T volatile * pDest, T v, memory_order order) noexcept
{
static_assert( sizeof(T) == 4, "Illegal size of operand" );
assert( cds::details::is_aligned( pDest, 4 ));
fence_before(order);
__asm__ __volatile__ (
"negl %[v] ; \n"
"lock ; xaddl %[v], %[pDest]"
: [v] "+r" (v), [pDest] "+m" (*pDest)
);
fence_after(order);
return v;
}
}}} // namespace platform::gcc::x86
}} // namespace cds::cxx11_atomic
//@endcond
#endif // #ifndef CDSLIB_COMPILER_GCC_X86_CXX11_ATOMIC32_H

View File

@ -0,0 +1,30 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_ICL_COMPILER_BARRIERS_H
#define CDSLIB_COMPILER_ICL_COMPILER_BARRIERS_H
#if defined(_MSC_VER) && _MSC_VER < 1700
// VC++ up to vc10
# include <intrin.h>
# pragma intrinsic(_ReadWriteBarrier)
# pragma intrinsic(_ReadBarrier)
# pragma intrinsic(_WriteBarrier)
# define CDS_COMPILER_RW_BARRIER _ReadWriteBarrier()
# define CDS_COMPILER_R_BARRIER _ReadBarrier()
# define CDS_COMPILER_W_BARRIER _WriteBarrier()
#else
// MS VC11+, linux
# include <atomic>
# define CDS_COMPILER_RW_BARRIER std::atomic_thread_fence( std::memory_order_acq_rel )
# define CDS_COMPILER_R_BARRIER CDS_COMPILER_RW_BARRIER
# define CDS_COMPILER_W_BARRIER CDS_COMPILER_RW_BARRIER
#endif
#endif // #ifndef CDSLIB_COMPILER_ICL_COMPILER_BARRIERS_H

133
extern/libcds/cds/compiler/icl/defs.h vendored Normal file
View File

@ -0,0 +1,133 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_ICL_DEFS_H
#define CDSLIB_COMPILER_ICL_DEFS_H
//@cond
// Compiler version
#ifdef __ICL
# define CDS_COMPILER_VERSION __ICL
#else
# define CDS_COMPILER_VERSION __INTEL_COMPILER
#endif
// Compiler name
// Supported compilers: MS VC 2008, 2010, 2012
//
# define CDS_COMPILER__NAME "Intel C++"
# define CDS_COMPILER__NICK "icl"
// OS name
#if defined(_WIN64)
# define CDS_OS_INTERFACE CDS_OSI_WINDOWS
# define CDS_OS_TYPE CDS_OS_WIN64
# define CDS_OS__NAME "Win64"
# define CDS_OS__NICK "Win64"
#elif defined(_WIN32)
# define CDS_OS_INTERFACE CDS_OSI_WINDOWS
# define CDS_OS_TYPE CDS_OS_WIN32
# define CDS_OS__NAME "Win32"
# define CDS_OS__NICK "Win32"
#elif defined( __linux__ )
# define CDS_OS_INTERFACE CDS_OSI_UNIX
# define CDS_OS_TYPE CDS_OS_LINUX
# define CDS_OS__NAME "linux"
# define CDS_OS__NICK "linux"
#endif
// Processor architecture
#if defined(_M_X64) || defined(_M_AMD64) || defined(__amd64__) || defined(__amd64)
# define CDS_BUILD_BITS 64
# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_AMD64
# define CDS_PROCESSOR__NAME "AMD64"
# define CDS_PROCESSOR__NICK "amd64"
#elif defined(_M_IX86) || defined(__i386__) || defined(__i386)
# define CDS_BUILD_BITS 32
# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_X86
# define CDS_PROCESSOR__NAME "Intel x86"
# define CDS_PROCESSOR__NICK "x86"
#else
# define CDS_BUILD_BITS -1
# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_UNKNOWN
# define CDS_PROCESSOR__NAME "<<Undefined>>"
# error Intel C++ compiler is supported for x86 only
#endif
#if CDS_OS_INTERFACE == CDS_OSI_WINDOWS
# define __attribute__( _x )
#endif
#if CDS_OS_INTERFACE == CDS_OSI_WINDOWS
# ifdef CDS_BUILD_LIB
# define CDS_EXPORT_API __declspec(dllexport)
# else
# define CDS_EXPORT_API __declspec(dllimport)
# endif
#endif
#if CDS_OS_INTERFACE == CDS_OSI_WINDOWS
# define alignof __alignof
#else
# define alignof __alignof__
#endif
// *************************************************
// Alignment macro
#if CDS_OS_INTERFACE == CDS_OSI_WINDOWS
# define CDS_TYPE_ALIGNMENT(n) __declspec( align(n))
# define CDS_DATA_ALIGNMENT(n) __declspec( align(n))
# define CDS_CLASS_ALIGNMENT(n) __declspec( align(n))
#else
# define CDS_TYPE_ALIGNMENT(n) __attribute__ ((aligned (n)))
# define CDS_CLASS_ALIGNMENT(n) __attribute__ ((aligned (n)))
# define CDS_DATA_ALIGNMENT(n) __attribute__ ((aligned (n)))
#endif
// Attributes
#if CDS_OS_INTERFACE == CDS_OSI_WINDOWS
# define CDS_DEPRECATED( reason ) __declspec(deprecated( reason ))
# define CDS_NORETURN __declspec(noreturn)
#else
# define CDS_DEPRECATED( reason ) __attribute__((deprecated( reason )))
# define CDS_NORETURN __attribute__((__noreturn__))
#endif
// Exceptions
#if CDS_OS_INTERFACE == CDS_OSI_WINDOWS
# if defined( _CPPUNWIND )
# define CDS_EXCEPTION_ENABLED
# endif
#else
# if defined( __EXCEPTIONS ) && __EXCEPTIONS == 1
# define CDS_EXCEPTION_ENABLED
# endif
#endif
// Byte order
#if !defined(CDS_ARCH_LITTLE_ENDIAN) && !defined(CDS_ARCH_BIG_ENDIAN)
# if CDS_OS_INTERFACE == CDS_OSI_WINDOWS
# define CDS_ARCH_LITTLE_ENDIAN
# else
# ifdef __BYTE_ORDER__
# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
# define CDS_ARCH_LITTLE_ENDIAN
# elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
# define CDS_ARCH_BIG_ENDIAN
# endif
# else
# warning "Undefined byte order for current architecture (no __BYTE_ORDER__ preprocessor definition)"
# endif
# endif
#endif
// Sanitizer attributes (not supported)
#define CDS_SUPPRESS_SANITIZE( ... )
#include <cds/compiler/icl/compiler_barriers.h>
//@endcond
#endif // #ifndef CDSLIB_COMPILER_VC_DEFS_H

View File

@ -0,0 +1,35 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_VC_AMD64_BACKOFF_H
#define CDSLIB_COMPILER_VC_AMD64_BACKOFF_H
//@cond none
#include <intrin.h>
namespace cds { namespace backoff {
namespace vc { namespace amd64 {
# define CDS_backoff_hint_defined
static inline void backoff_hint()
{
_mm_pause();
}
# define CDS_backoff_nop_defined
static inline void backoff_nop()
{
__nop();
}
}} // namespace vc::amd64
namespace platform {
using namespace vc::amd64;
}
}} // namespace cds::backoff
//@endcond
#endif // #ifndef CDSLIB_COMPILER_VC_AMD64_BACKOFF_H

View File

@ -0,0 +1,129 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_VC_AMD64_BITOP_H
#define CDSLIB_COMPILER_VC_AMD64_BITOP_H
#if _MSC_VER == 1500
/*
VC 2008 bug:
math.h(136) : warning C4985: 'ceil': attributes not present on previous declaration.
intrin.h(142) : see declaration of 'ceil'
See http://connect.microsoft.com/VisualStudio/feedback/details/381422/warning-of-attributes-not-present-on-previous-declaration-on-ceil-using-both-math-h-and-intrin-h
*/
# pragma warning(push)
# pragma warning(disable: 4985)
# include <intrin.h>
# pragma warning(pop)
#else
# include <intrin.h>
#endif
#pragma intrinsic(_BitScanReverse)
#pragma intrinsic(_BitScanForward)
#pragma intrinsic(_BitScanReverse64)
#pragma intrinsic(_BitScanForward64)
//@cond none
namespace cds {
namespace bitop { namespace platform { namespace vc { namespace amd64 {
// MSB - return index (1..32) of most significant bit in nArg. If nArg == 0 return 0
# define cds_bitop_msb32_DEFINED
static inline int msb32( uint32_t nArg )
{
unsigned long nIndex;
if ( _BitScanReverse( &nIndex, nArg ))
return (int) nIndex + 1;
return 0;
}
# define cds_bitop_msb32nz_DEFINED
static inline int msb32nz( uint32_t nArg )
{
assert( nArg != 0 );
unsigned long nIndex;
_BitScanReverse( &nIndex, nArg );
return (int) nIndex;
}
// LSB - return index (1..32) of least significant bit in nArg. If nArg == 0 return -1U
# define cds_bitop_lsb32_DEFINED
static inline int lsb32( uint32_t nArg )
{
unsigned long nIndex;
if ( _BitScanForward( &nIndex, nArg ))
return (int) nIndex + 1;
return 0;
}
# define cds_bitop_lsb32nz_DEFINED
static inline int lsb32nz( uint32_t nArg )
{
assert( nArg != 0 );
unsigned long nIndex;
_BitScanForward( &nIndex, nArg );
return (int) nIndex;
}
# define cds_bitop_msb64_DEFINED
static inline int msb64( uint64_t nArg )
{
unsigned long nIndex;
if ( _BitScanReverse64( &nIndex, nArg ))
return (int) nIndex + 1;
return 0;
}
# define cds_bitop_msb64nz_DEFINED
static inline int msb64nz( uint64_t nArg )
{
assert( nArg != 0 );
unsigned long nIndex;
_BitScanReverse64( &nIndex, nArg );
return (int) nIndex;
}
# define cds_bitop_lsb64_DEFINED
static inline int lsb64( uint64_t nArg )
{
unsigned long nIndex;
if ( _BitScanForward64( &nIndex, nArg ))
return (int) nIndex + 1;
return 0;
}
# define cds_bitop_lsb64nz_DEFINED
static inline int lsb64nz( uint64_t nArg )
{
assert( nArg != 0 );
unsigned long nIndex;
_BitScanForward64( &nIndex, nArg );
return (int) nIndex;
}
# define cds_bitop_complement32_DEFINED
static inline bool complement32( uint32_t * pArg, unsigned int nBit )
{
return _bittestandcomplement( reinterpret_cast<long *>( pArg ), nBit ) != 0;
}
# define cds_bitop_complement64_DEFINED
static inline bool complement64( uint64_t * pArg, unsigned int nBit )
{
return _bittestandcomplement64( reinterpret_cast<__int64 *>( pArg ), nBit ) != 0;
}
}} // namespace vc::amd64
using namespace vc::amd64;
}}} // namespace cds::bitop::platform
//@endcond
#endif // #ifndef CDSLIB_COMPILER_VC_AMD64_BITOP_H

View File

@ -0,0 +1,584 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_VC_AMD64_CXX11_ATOMIC_H
#define CDSLIB_COMPILER_VC_AMD64_CXX11_ATOMIC_H
#include <intrin.h>
#include <emmintrin.h> // for 128bit atomic load/store
#include <cds/details/is_aligned.h>
#pragma intrinsic( _InterlockedIncrement )
#pragma intrinsic( _InterlockedDecrement )
#pragma intrinsic( _InterlockedCompareExchange )
#pragma intrinsic( _InterlockedCompareExchangePointer )
#pragma intrinsic( _InterlockedCompareExchange16 )
#pragma intrinsic( _InterlockedCompareExchange64 )
#pragma intrinsic( _InterlockedExchange )
#pragma intrinsic( _InterlockedExchange64 )
#pragma intrinsic( _InterlockedExchangePointer )
#pragma intrinsic( _InterlockedExchangeAdd )
#pragma intrinsic( _InterlockedExchangeAdd64 )
//#pragma intrinsic( _InterlockedAnd )
//#pragma intrinsic( _InterlockedOr )
//#pragma intrinsic( _InterlockedXor )
//#pragma intrinsic( _InterlockedAnd64 )
//#pragma intrinsic( _InterlockedOr64 )
//#pragma intrinsic( _InterlockedXor64 )
#pragma intrinsic( _interlockedbittestandset )
#if _MSC_VER >= 1600
# pragma intrinsic( _InterlockedCompareExchange8 )
# pragma intrinsic( _InterlockedExchange8 )
# pragma intrinsic( _InterlockedExchange16 )
#endif
//@cond
namespace cds { namespace cxx11_atomic {
namespace platform { inline namespace vc { inline namespace amd64 {
static inline void fence_before( memory_order order ) noexcept
{
switch(order) {
case memory_order_relaxed:
case memory_order_acquire:
case memory_order_consume:
break;
case memory_order_release:
case memory_order_acq_rel:
CDS_COMPILER_RW_BARRIER;
break;
case memory_order_seq_cst:
CDS_COMPILER_RW_BARRIER;
break;
}
}
static inline void fence_after( memory_order order ) noexcept
{
switch(order) {
case memory_order_acquire:
case memory_order_acq_rel:
CDS_COMPILER_RW_BARRIER;
break;
case memory_order_relaxed:
case memory_order_consume:
case memory_order_release:
break;
case memory_order_seq_cst:
CDS_COMPILER_RW_BARRIER;
break;
}
}
static inline void full_fence()
{
// MS VC does not support inline assembler in C code.
// So, we use InterlockedExchange for full fence instead of mfence inst
long t;
_InterlockedExchange( &t, 0 );
}
static inline void fence_after_load(memory_order order) noexcept
{
switch(order) {
case memory_order_relaxed:
case memory_order_release:
break;
case memory_order_acquire:
case memory_order_acq_rel:
CDS_COMPILER_RW_BARRIER;
break;
case memory_order_consume:
break;
case memory_order_seq_cst:
full_fence();
break;
default:;
}
}
//-----------------------------------------------------------------------------
// fences
//-----------------------------------------------------------------------------
static inline void thread_fence(memory_order order) noexcept
{
switch(order)
{
case memory_order_relaxed:
case memory_order_consume:
break;
case memory_order_release:
case memory_order_acquire:
case memory_order_acq_rel:
CDS_COMPILER_RW_BARRIER;
break;
case memory_order_seq_cst:
full_fence();
break;
default:;
}
}
static inline void signal_fence(memory_order order) noexcept
{
// C++11: 29.8.8: only compiler optimization, no hardware instructions
switch(order)
{
case memory_order_relaxed:
break;
case memory_order_consume:
case memory_order_release:
case memory_order_acquire:
case memory_order_acq_rel:
case memory_order_seq_cst:
CDS_COMPILER_RW_BARRIER;
break;
default:;
}
}
//-----------------------------------------------------------------------------
// atomic flag primitives
//-----------------------------------------------------------------------------
typedef unsigned char atomic_flag_type;
static inline bool atomic_flag_tas( atomic_flag_type volatile * pFlag, memory_order /*order*/ ) noexcept
{
return _interlockedbittestandset( (long volatile *) pFlag, 0 ) != 0;
}
static inline void atomic_flag_clear( atomic_flag_type volatile * pFlag, memory_order order ) noexcept
{
assert( order != memory_order_acquire
&& order != memory_order_acq_rel
);
fence_before( order );
*pFlag = 0;
fence_after( order );
}
//-----------------------------------------------------------------------------
// 8bit primitives
//-----------------------------------------------------------------------------
#if _MSC_VER >= 1600
# pragma warning(push)
// Disable warning C4800: 'char' : forcing value to bool 'true' or 'false' (performance warning)
# pragma warning( disable: 4800 )
#endif
template <typename T>
static inline bool cas8_strong( T volatile * pDest, T& expected, T desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) noexcept
{
static_assert( sizeof(T) == 1, "Illegal size of operand" );
# if _MSC_VER >= 1600
// VC 2010 +
T prev = expected;
expected = (T) _InterlockedCompareExchange8( (char volatile*) pDest, (char) desired, (char) expected );
return expected == prev;
# else
// VC 2008
unsigned int * pnDest = (unsigned int *)( ((unsigned __int64) pDest) & ~(unsigned __int64(3)));
unsigned int nOffset = ((unsigned __int64) pDest) & 3;
unsigned int nExpected;
unsigned int nDesired;
for (;;) {
nExpected =
nDesired = *pnDest;
memcpy( reinterpret_cast<T *>(&nExpected) + nOffset, &expected, sizeof(T));
memcpy( reinterpret_cast<T *>(&nDesired) + nOffset, &desired, sizeof(T));
unsigned int nPrev = (unsigned int) _InterlockedCompareExchange( (long *) pnDest, (long) nDesired, (long) nExpected );
if ( nPrev == nExpected )
return true;
T nByte;
memcpy( &nByte, reinterpret_cast<T *>(&nPrev) + nOffset, sizeof(T));
if ( nByte != expected ) {
expected = nByte;
return false;
}
}
# endif
}
#if _MSC_VER >= 1600
# pragma warning(pop)
#endif
template <typename T>
static inline bool cas8_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
return cas8_strong( pDest, expected, desired, mo_success, mo_fail );
}
#if _MSC_VER >= 1600
# pragma warning(push)
// Disable warning C4800: 'char' : forcing value to bool 'true' or 'false' (performance warning)
# pragma warning( disable: 4800 )
#endif
template <typename T>
static inline T exchange8( T volatile * pDest, T v, memory_order order ) noexcept
{
static_assert( sizeof(T) == 1, "Illegal size of operand" );
# if _MSC_VER >= 1600
CDS_UNUSED(order);
return (T) _InterlockedExchange8( (char volatile *) pDest, (char) v );
# else
T expected = *pDest;
do {} while ( !cas8_strong( pDest, expected, v, order, memory_order_relaxed ));
return expected;
# endif
}
#if _MSC_VER >= 1600
# pragma warning(pop)
#endif
template <typename T>
static inline void store8( T volatile * pDest, T src, memory_order order ) noexcept
{
static_assert( sizeof(T) == 1, "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_release
|| order == memory_order_seq_cst
);
assert( pDest );
if ( order != memory_order_seq_cst ) {
fence_before( order );
*pDest = src;
}
else {
exchange8( pDest, src, order );
}
}
template <typename T>
static inline T load8( T volatile const * pSrc, memory_order order ) noexcept
{
static_assert( sizeof(T) == 1, "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_consume
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
assert( pSrc );
T v = *pSrc;
fence_after_load( order );
return v;
}
//-----------------------------------------------------------------------------
// 16bit primitives
//-----------------------------------------------------------------------------
template <typename T>
static inline bool cas16_strong( T volatile * pDest, T& expected, T desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) noexcept
{
static_assert( sizeof(T) == 2, "Illegal size of operand" );
assert( cds::details::is_aligned( pDest, 2 ));
// _InterlockedCompareExchange behave as read-write memory barriers
T prev = expected;
expected = (T) _InterlockedCompareExchange16( (short *) pDest, (short) desired, (short) expected );
return expected == prev;
}
template <typename T>
static inline bool cas16_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
return cas16_strong( pDest, expected, desired, mo_success, mo_fail );
}
template <typename T>
static inline T exchange16( T volatile * pDest, T v, memory_order order ) noexcept
{
static_assert( sizeof(T) == 2, "Illegal size of operand" );
assert( cds::details::is_aligned( pDest, 2 ));
# if _MSC_VER >= 1600
order;
return (T) _InterlockedExchange16( (short volatile *) pDest, (short) v );
# else
T expected = *pDest;
do {} while ( !cas16_strong( pDest, expected, v, order, memory_order_relaxed ));
return expected;
# endif
}
template <typename T>
static inline void store16( T volatile * pDest, T src, memory_order order ) noexcept
{
static_assert( sizeof(T) == 2, "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_release
|| order == memory_order_seq_cst
);
assert( pDest );
assert( cds::details::is_aligned( pDest, 2 ));
if ( order != memory_order_seq_cst ) {
fence_before( order );
*pDest = src;
}
else {
exchange16( pDest, src, order );
}
}
template <typename T>
static inline T load16( T volatile const * pSrc, memory_order order ) noexcept
{
static_assert( sizeof(T) == 2, "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_consume
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
assert( pSrc );
assert( cds::details::is_aligned( pSrc, 2 ));
T v = *pSrc;
fence_after_load( order );
return v;
}
//-----------------------------------------------------------------------------
// 32bit primitives
//-----------------------------------------------------------------------------
template <typename T>
static inline T exchange32( T volatile * pDest, T v, memory_order /*order*/ ) noexcept
{
static_assert( sizeof(T) == 4, "Illegal size of operand" );
assert( cds::details::is_aligned( pDest, 4 ));
return (T) _InterlockedExchange( (long *) pDest, (long) v );
}
template <typename T>
static inline void store32( T volatile * pDest, T src, memory_order order ) noexcept
{
static_assert( sizeof(T) == 4, "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_release
|| order == memory_order_seq_cst
);
assert( pDest );
assert( cds::details::is_aligned( pDest, 4 ));
if ( order != memory_order_seq_cst ) {
fence_before( order );
*pDest = src;
}
else {
exchange32( pDest, src, order );
}
}
template <typename T>
static inline T load32( T volatile const * pSrc, memory_order order ) noexcept
{
static_assert( sizeof(T) == 4, "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_consume
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
assert( pSrc );
assert( cds::details::is_aligned( pSrc, 4 ));
T v = *pSrc;
fence_after_load( order );
return v;
}
template <typename T>
static inline bool cas32_strong( T volatile * pDest, T& expected, T desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) noexcept
{
static_assert( sizeof(T) == 4, "Illegal size of operand" );
assert( cds::details::is_aligned( pDest, 4 ));
// _InterlockedCompareExchange behave as read-write memory barriers
T prev = expected;
expected = (T) _InterlockedCompareExchange( (long *) pDest, (long) desired, (long) expected );
return expected == prev;
}
template <typename T>
static inline bool cas32_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
return cas32_strong( pDest, expected, desired, mo_success, mo_fail );
}
// fetch_xxx may be emulated via cas32
// If the platform has special fetch_xxx instruction
// then it should define CDS_ATOMIC_fetch32_xxx_defined macro
# define CDS_ATOMIC_fetch32_add_defined
template <typename T>
static inline T fetch32_add( T volatile * pDest, T v, memory_order /*order*/) noexcept
{
static_assert( sizeof(T) == 4, "Illegal size of operand" );
assert( cds::details::is_aligned( pDest, 4 ));
// _InterlockedExchangeAdd behave as read-write memory barriers
return (T) _InterlockedExchangeAdd( (long *) pDest, (long) v );
}
//-----------------------------------------------------------------------------
// 64bit primitives
//-----------------------------------------------------------------------------
template <typename T>
static inline bool cas64_strong( T volatile * pDest, T& expected, T desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) noexcept
{
static_assert( sizeof(T) == 8, "Illegal size of operand" );
assert( cds::details::is_aligned( pDest, 8 ));
// _InterlockedCompareExchange behave as read-write memory barriers
T prev = expected;
expected = (T) _InterlockedCompareExchange64( (__int64 *) pDest, (__int64) desired, (__int64) expected );
return expected == prev;
}
template <typename T>
static inline bool cas64_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
return cas64_strong( pDest, expected, desired, mo_success, mo_fail );
}
template <typename T>
static inline T load64( T volatile const * pSrc, memory_order order ) noexcept
{
static_assert( sizeof(T) == 8, "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_consume
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
assert( pSrc );
assert( cds::details::is_aligned( pSrc, 8 ));
T v = *pSrc;
fence_after_load( order );
return v;
}
template <typename T>
static inline T exchange64( T volatile * pDest, T v, memory_order order ) noexcept
{
static_assert( sizeof(T) == 8, "Illegal size of operand" );
T cur = load64( pDest, memory_order_relaxed );
do {
} while (!cas64_weak( pDest, cur, v, order, memory_order_relaxed ));
return cur;
}
template <typename T>
static inline void store64( T volatile * pDest, T val, memory_order order ) noexcept
{
static_assert( sizeof(T) == 8, "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_release
|| order == memory_order_seq_cst
);
assert( pDest );
assert( cds::details::is_aligned( pDest, 8 ));
if ( order != memory_order_seq_cst ) {
fence_before( order );
*pDest = val;
}
else {
exchange64( pDest, val, order );
}
}
# define CDS_ATOMIC_fetch64_add_defined
template <typename T>
static inline T fetch64_add( T volatile * pDest, T v, memory_order /*order*/) noexcept
{
static_assert( sizeof(T) == 8, "Illegal size of operand" );
assert( cds::details::is_aligned( pDest, 8 ));
// _InterlockedExchangeAdd64 behave as read-write memory barriers
return (T) _InterlockedExchangeAdd64( (__int64 *) pDest, (__int64) v );
}
//-----------------------------------------------------------------------------
// pointer primitives
//-----------------------------------------------------------------------------
template <typename T>
static inline T * exchange_ptr( T * volatile * pDest, T * v, memory_order /*order*/ ) noexcept
{
static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" );
return (T *) _InterlockedExchangePointer( (void * volatile *) pDest, reinterpret_cast<void *>(v));
}
template <typename T>
static inline void store_ptr( T * volatile * pDest, T * src, memory_order order ) noexcept
{
static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_release
|| order == memory_order_seq_cst
);
assert( pDest );
if ( order != memory_order_seq_cst ) {
fence_before( order );
*pDest = src;
}
else {
exchange_ptr( pDest, src, order );
}
}
template <typename T>
static inline T * load_ptr( T * volatile const * pSrc, memory_order order ) noexcept
{
static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" );
assert( order == memory_order_relaxed
|| order == memory_order_consume
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
assert( pSrc );
T * v = *pSrc;
fence_after_load( order );
return v;
}
template <typename T>
static inline bool cas_ptr_strong( T * volatile * pDest, T *& expected, T * desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) noexcept
{
static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" );
// _InterlockedCompareExchangePointer behave as read-write memory barriers
T * prev = expected;
expected = (T *) _InterlockedCompareExchangePointer( (void * volatile *) pDest, (void *) desired, (void *) expected );
return expected == prev;
}
template <typename T>
static inline bool cas_ptr_weak( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
return cas_ptr_strong( pDest, expected, desired, mo_success, mo_fail );
}
}} // namespace vc::amd64
} // namespace platform
}} // namespace cds::cxx11_atomic
//@endcond
#endif // #ifndef CDSLIB_COMPILER_VC_AMD64_CXX11_ATOMIC_H

View File

@ -0,0 +1,19 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_VC_COMPILER_BARRIERS_H
#define CDSLIB_COMPILER_VC_COMPILER_BARRIERS_H
#include <intrin.h>
#pragma intrinsic(_ReadWriteBarrier)
#pragma intrinsic(_ReadBarrier)
#pragma intrinsic(_WriteBarrier)
#define CDS_COMPILER_RW_BARRIER _ReadWriteBarrier()
#define CDS_COMPILER_R_BARRIER _ReadBarrier()
#define CDS_COMPILER_W_BARRIER _WriteBarrier()
#endif // #ifndef CDSLIB_COMPILER_VC_COMPILER_BARRIERS_H

143
extern/libcds/cds/compiler/vc/defs.h vendored Normal file
View File

@ -0,0 +1,143 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_VC_DEFS_H
#define CDSLIB_COMPILER_VC_DEFS_H
//@cond
// Compiler version
#define CDS_COMPILER_VERSION _MSC_VER
// Compiler name
// Supported compilers: MS VC 2015 +
// C++ compiler versions:
#define CDS_COMPILER_MSVC14 1900 // 2015 vc14
#define CDS_COMPILER_MSVC14_1 1910 // 2017 vc14.1
#define CDS_COMPILER_MSVC14_1_3 1911 // 2017 vc14.1 (VS 15.3)
#define CDS_COMPILER_MSVC14_1_5 1912 // 2017 vc14.1 (VS 15.5)
#define CDS_COMPILER_MSVC15 2000 // next Visual Studio
#if CDS_COMPILER_VERSION < CDS_COMPILER_MSVC14
# error "Only MS Visual C++ 14 (2015) and above is supported"
#endif
#if _MSC_VER == 1900
# define CDS_COMPILER__NAME "MS Visual C++ 2015"
# define CDS_COMPILER__NICK "vc14"
# define CDS_COMPILER_LIBCDS_SUFFIX "vcv140"
#elif _MSC_VER < 2000
# define CDS_COMPILER__NAME "MS Visual C++ 2017"
# define CDS_COMPILER__NICK "vc141"
# define CDS_COMPILER_LIBCDS_SUFFIX "vcv141"
#else
# define CDS_COMPILER__NAME "MS Visual C++"
# define CDS_COMPILER__NICK "msvc"
# define CDS_COMPILER_LIBCDS_SUFFIX "vc"
#endif
// OS interface
#define CDS_OS_INTERFACE CDS_OSI_WINDOWS
// OS name
#if defined(_WIN64)
# define CDS_OS_TYPE CDS_OS_WIN64
# define CDS_OS__NAME "Win64"
# define CDS_OS__NICK "Win64"
#elif defined(_WIN32)
# define CDS_OS_TYPE CDS_OS_WIN32
# define CDS_OS__NAME "Win32"
# define CDS_OS__NICK "Win32"
#endif
// Processor architecture
#ifdef _M_IX86
# define CDS_BUILD_BITS 32
# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_X86
# define CDS_PROCESSOR__NAME "Intel x86"
# define CDS_PROCESSOR__NICK "x86"
#elif _M_X64
# define CDS_BUILD_BITS 64
# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_AMD64
# define CDS_PROCESSOR__NAME "AMD64"
# define CDS_PROCESSOR__NICK "amd64"
#else
# define CDS_BUILD_BITS -1
# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_UNKNOWN
# define CDS_PROCESSOR__NAME "<<Undefined>>"
# error Microsoft Visual C++ compiler is supported for x86 only
#endif
#define __attribute__( _x )
#ifndef CDS_BUILD_STATIC_LIB
# ifdef CDS_BUILD_LIB
# define CDS_EXPORT_API __declspec(dllexport)
# else
# define CDS_EXPORT_API __declspec(dllimport)
# endif
#else
# define CDS_EXPORT_API
#endif
#define alignof __alignof
// Memory leaks detection (debug build only)
#ifdef _DEBUG
# define _CRTDBG_MAP_ALLOC
# define _CRTDBG_MAPALLOC
# include <stdlib.h>
# include <crtdbg.h>
# define CDS_MSVC_MEMORY_LEAKS_DETECTING_ENABLED
#endif
// *************************************************
// Alignment macro
#define CDS_TYPE_ALIGNMENT(n) __declspec( align(n))
#define CDS_DATA_ALIGNMENT(n) __declspec( align(n))
#define CDS_CLASS_ALIGNMENT(n) __declspec( align(n))
// Attributes
#define CDS_DEPRECATED( reason ) [[deprecated( reason )]]
#define CDS_NORETURN __declspec(noreturn)
// Exceptions
#if defined( _CPPUNWIND )
# define CDS_EXCEPTION_ENABLED
#endif
// double-width CAS support
//#define CDS_DCAS_SUPPORT
// Byte order
// It seems, MSVC works only on little-endian architecture?..
#if !defined(CDS_ARCH_LITTLE_ENDIAN) && !defined(CDS_ARCH_BIG_ENDIAN)
# define CDS_ARCH_LITTLE_ENDIAN
#endif
//if constexpr support (C++17)
#ifndef constexpr_if
// Standard way to check if the compiler supports "if constexpr"
// Of course, MS VC doesn't support any standard way
# if defined __cpp_if_constexpr
# if __cpp_if_constexpr >= 201606
# define constexpr_if if constexpr
# endif
# elif CDS_COMPILER_VERSION >= CDS_COMPILER_MSVC14_1_3 && _MSVC_LANG > CDS_CPLUSPLUS_14
// MS-specific WTF.
// Don't work in /std:c++17 because /std:c++17 key defines _MSVC_LANG=201402 (c++14) in VC 15.3
# define constexpr_if if constexpr
# endif
#endif
// Sanitizer attributes (not supported)
#define CDS_SUPPRESS_SANITIZE( ... )
#include <cds/compiler/vc/compiler_barriers.h>
//@endcond
#endif // #ifndef CDSLIB_COMPILER_VC_DEFS_H

View File

@ -0,0 +1,35 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_VC_X86_BACKOFF_H
#define CDSLIB_COMPILER_VC_X86_BACKOFF_H
//@cond none
#include <intrin.h>
namespace cds { namespace backoff {
namespace vc { namespace x86 {
# define CDS_backoff_hint_defined
static inline void backoff_hint()
{
_mm_pause();
}
# define CDS_backoff_nop_defined
static inline void backoff_nop()
{
__nop();
}
}} // namespace vc::x86
namespace platform {
using namespace vc::x86;
}
}} // namespace cds::backoff
//@endcond
#endif // #ifndef CDSLIB_COMPILER_VC_X86_BACKOFF_H

View File

@ -0,0 +1,86 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_VC_X86_BITOP_H
#define CDSLIB_COMPILER_VC_X86_BITOP_H
#include <intrin.h>
#pragma intrinsic(_BitScanReverse)
#pragma intrinsic(_BitScanForward)
//@cond none
namespace cds {
namespace bitop { namespace platform { namespace vc { namespace x86 {
// MSB - return index (1..32) of most significant bit in nArg. If nArg == 0 return 0
# define cds_bitop_msb32_DEFINED
static inline int msb32( uint32_t nArg )
{
unsigned long nIndex;
if ( _BitScanReverse( &nIndex, nArg ))
return (int) nIndex + 1;
return 0;
}
# define cds_bitop_msb32nz_DEFINED
static inline int msb32nz( uint32_t nArg )
{
assert( nArg != 0 );
unsigned long nIndex;
_BitScanReverse( &nIndex, nArg );
return (int) nIndex;
}
// LSB - return index (1..32) of least significant bit in nArg. If nArg == 0 return -1U
# define cds_bitop_lsb32_DEFINED
static inline int lsb32( uint32_t nArg )
{
unsigned long nIndex;
if ( _BitScanForward( &nIndex, nArg ))
return (int) nIndex + 1;
return 0;
}
# define cds_bitop_lsb32nz_DEFINED
static inline int lsb32nz( uint32_t nArg )
{
assert( nArg != 0 );
unsigned long nIndex;
_BitScanForward( &nIndex, nArg );
return (int) nIndex;
}
// bswap - Reverses the byte order of a 32-bit word
# define cds_bitop_bswap32_DEFINED
static inline uint32_t bswap32( uint32_t nArg )
{
__asm {
mov eax, nArg;
bswap eax;
}
}
# define cds_bitop_complement32_DEFINED
static inline bool complement32( uint32_t * pArg, unsigned int nBit )
{
return _bittestandcomplement( reinterpret_cast<long *>( pArg ), nBit ) != 0;
}
# define cds_bitop_complement64_DEFINED
static inline bool complement64( uint64_t * pArg, unsigned int nBit )
{
if ( nBit < 32 )
return _bittestandcomplement( reinterpret_cast<long *>( pArg ), nBit ) != 0;
else
return _bittestandcomplement( reinterpret_cast<long *>( pArg ) + 1, nBit - 32 ) != 0;
}
}} // namespace vc::x86
using namespace vc::x86;
}}} // namespace cds::bitop::platform
//@endcond
#endif // #ifndef CDSLIB_COMPILER_VC_X86_BITOP_H

View File

@ -0,0 +1,556 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_COMPILER_VC_X86_CXX11_ATOMIC_H
#define CDSLIB_COMPILER_VC_X86_CXX11_ATOMIC_H
#include <intrin.h>
#include <emmintrin.h> // for 64bit atomic load/store
#include <cds/details/is_aligned.h>
#pragma intrinsic( _InterlockedIncrement )
#pragma intrinsic( _InterlockedDecrement )
#pragma intrinsic( _InterlockedCompareExchange )
//#pragma intrinsic( _InterlockedCompareExchangePointer ) // On the x86 architecture, _InterlockedCompareExchangePointer is a macro that calls _InterlockedCompareExchange
#pragma intrinsic( _InterlockedCompareExchange16 )
#pragma intrinsic( _InterlockedCompareExchange64 )
#pragma intrinsic( _InterlockedExchange )
//#pragma intrinsic( _InterlockedExchangePointer ) // On the x86 architecture, _InterlockedExchangePointer is a macro that calls _InterlockedExchange
#pragma intrinsic( _InterlockedExchangeAdd )
#pragma intrinsic( _InterlockedXor )
#pragma intrinsic( _InterlockedOr )
#pragma intrinsic( _InterlockedAnd )
#pragma intrinsic( _interlockedbittestandset )
#if _MSC_VER >= 1600
# pragma intrinsic( _InterlockedCompareExchange8 )
# pragma intrinsic( _InterlockedExchange8 )
# pragma intrinsic( _InterlockedExchange16 )
#endif
//@cond
namespace cds { namespace cxx11_atomic {
namespace platform { inline namespace vc { inline namespace x86 {
static inline void fence_before( memory_order order ) noexcept
{
switch(order) {
case memory_order_relaxed:
case memory_order_acquire:
case memory_order_consume:
break;
case memory_order_release:
case memory_order_acq_rel:
CDS_COMPILER_RW_BARRIER;
break;
case memory_order_seq_cst:
CDS_COMPILER_RW_BARRIER;
break;
}
}
static inline void fence_after( memory_order order ) noexcept
{
switch(order) {
case memory_order_acquire:
case memory_order_acq_rel:
CDS_COMPILER_RW_BARRIER;
break;
case memory_order_relaxed:
case memory_order_consume:
case memory_order_release:
break;
case memory_order_seq_cst:
CDS_COMPILER_RW_BARRIER;
break;
}
}
static inline void fence_after_load(memory_order order) noexcept
{
switch(order) {
case memory_order_relaxed:
case memory_order_release:
break;
case memory_order_acquire:
case memory_order_acq_rel:
CDS_COMPILER_RW_BARRIER;
break;
case memory_order_consume:
break;
case memory_order_seq_cst:
__asm { mfence };
break;
default:;
}
}
//-----------------------------------------------------------------------------
// fences
//-----------------------------------------------------------------------------
static inline void thread_fence(memory_order order) noexcept
{
switch(order)
{
case memory_order_relaxed:
case memory_order_consume:
break;
case memory_order_release:
case memory_order_acquire:
case memory_order_acq_rel:
CDS_COMPILER_RW_BARRIER;
break;
case memory_order_seq_cst:
__asm { mfence };
break;
default:;
}
}
static inline void signal_fence(memory_order order) noexcept
{
// C++11: 29.8.8: only compiler optimization, no hardware instructions
switch(order)
{
case memory_order_relaxed:
break;
case memory_order_consume:
case memory_order_release:
case memory_order_acquire:
case memory_order_acq_rel:
case memory_order_seq_cst:
CDS_COMPILER_RW_BARRIER;
break;
default:;
}
}
//-----------------------------------------------------------------------------
// atomic flag primitives
//-----------------------------------------------------------------------------
typedef unsigned char atomic_flag_type;
static inline bool atomic_flag_tas( atomic_flag_type volatile * pFlag, memory_order /*order*/ ) noexcept
{
return _interlockedbittestandset( (long volatile *) pFlag, 0 ) != 0;
}
static inline void atomic_flag_clear( atomic_flag_type volatile * pFlag, memory_order order ) noexcept
{
assert( order != memory_order_acquire
&& order != memory_order_acq_rel
);
fence_before( order );
*pFlag = 0;
fence_after( order );
}
//-----------------------------------------------------------------------------
// 8bit primitives
//-----------------------------------------------------------------------------
#if _MSC_VER >= 1600
# pragma warning(push)
// Disable warning C4800: 'char' : forcing value to bool 'true' or 'false' (performance warning)
# pragma warning( disable: 4800 )
#endif
template <typename T>
static inline bool cas8_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
static_assert( sizeof(T) == 1, "Illegal operand size" );
# if _MSC_VER >= 1600
T prev = expected;
expected = (T) _InterlockedCompareExchange8( reinterpret_cast<char volatile*>(pDest), (char) desired, (char) expected );
return expected == prev;
# else
bool bRet = false;
__asm {
mov ecx, pDest;
mov edx, expected;
mov al, byte ptr [edx];
mov ah, desired;
lock cmpxchg byte ptr [ecx], ah;
mov byte ptr [edx], al;
setz bRet;
}
return bRet;
# endif
}
#if _MSC_VER >= 1600
# pragma warning(pop)
#endif
template <typename T>
static inline bool cas8_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
return cas8_strong( pDest, expected, desired, mo_success, mo_fail );
}
#if _MSC_VER >= 1600
# pragma warning(push)
// Disable warning C4800: 'char' : forcing value to bool 'true' or 'false' (performance warning)
# pragma warning( disable: 4800 )
#endif
template <typename T>
static inline T exchange8( T volatile * pDest, T v, memory_order order ) noexcept
{
static_assert( sizeof(T) == 1, "Illegal operand size" );
# if _MSC_VER >= 1600
return (T) _InterlockedExchange8( reinterpret_cast<char volatile *>(pDest), (char) v );
# else
__asm {
mov al, v;
mov ecx, pDest;
lock xchg byte ptr [ecx], al;
}
# endif
}
#if _MSC_VER >= 1600
# pragma warning(pop)
#endif
template <typename T>
static inline void store8( T volatile * pDest, T src, memory_order order ) noexcept
{
static_assert( sizeof(T) == 1, "Illegal operand size" );
assert( order == memory_order_relaxed
|| order == memory_order_release
|| order == memory_order_seq_cst
);
assert( pDest );
if ( order != memory_order_seq_cst ) {
fence_before( order );
*pDest = src;
}
else {
exchange8( pDest, src, order );
}
}
template <typename T>
static inline T load8( T volatile const * pSrc, memory_order order ) noexcept
{
static_assert( sizeof(T) == 1, "Illegal operand size" );
assert( order == memory_order_relaxed
|| order == memory_order_consume
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
assert( pSrc );
T v = *pSrc;
fence_after_load( order );
return v;
}
//-----------------------------------------------------------------------------
// 16bit primitives
//-----------------------------------------------------------------------------
template <typename T>
static inline T exchange16( T volatile * pDest, T v, memory_order /*order*/ ) noexcept
{
static_assert( sizeof(T) == 2, "Illegal operand size" );
assert( cds::details::is_aligned( pDest, 2 ));
# if _MSC_VER >= 1600
return (T) _InterlockedExchange16( (short volatile *) pDest, (short) v );
# else
__asm {
mov ax, v;
mov ecx, pDest;
lock xchg word ptr [ecx], ax;
}
# endif
}
template <typename T>
static inline void store16( T volatile * pDest, T src, memory_order order ) noexcept
{
static_assert( sizeof(T) == 2, "Illegal operand size" );
assert( order == memory_order_relaxed
|| order == memory_order_release
|| order == memory_order_seq_cst
);
assert( pDest );
assert( cds::details::is_aligned( pDest, 2 ));
if ( order != memory_order_seq_cst ) {
fence_before( order );
*pDest = src;
}
else {
exchange16( pDest, src, order );
}
}
template <typename T>
static inline T load16( T volatile const * pSrc, memory_order order ) noexcept
{
static_assert( sizeof(T) == 2, "Illegal operand size" );
assert( order == memory_order_relaxed
|| order == memory_order_consume
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
assert( pSrc );
assert( cds::details::is_aligned( pSrc, 2 ));
T v = *pSrc;
fence_after_load( order );
return v;
}
template <typename T>
static inline bool cas16_strong( T volatile * pDest, T& expected, T desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) noexcept
{
static_assert( sizeof(T) == 2, "Illegal operand size" );
assert( cds::details::is_aligned( pDest, 2 ));
// _InterlockedCompareExchange behave as read-write memory barriers
T prev = expected;
expected = (T) _InterlockedCompareExchange16( (short *) pDest, (short) desired, (short) expected );
return expected == prev;
}
template <typename T>
static inline bool cas16_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
return cas16_strong( pDest, expected, desired, mo_success, mo_fail );
}
//-----------------------------------------------------------------------------
// 32bit primitives
//-----------------------------------------------------------------------------
template <typename T>
static inline T exchange32( T volatile * pDest, T v, memory_order /*order*/ ) noexcept
{
static_assert( sizeof(T) == 4, "Illegal operand size" );
assert( cds::details::is_aligned( pDest, 4 ));
return (T) _InterlockedExchange( (long *) pDest, (long) v );
}
template <typename T>
static inline void store32( T volatile * pDest, T src, memory_order order ) noexcept
{
static_assert( sizeof(T) == 4, "Illegal operand size" );
assert( order == memory_order_relaxed
|| order == memory_order_release
|| order == memory_order_seq_cst
);
assert( pDest );
assert( cds::details::is_aligned( pDest, 4 ));
if ( order != memory_order_seq_cst ) {
fence_before( order );
*pDest = src;
}
else {
exchange32( pDest, src, order );
}
}
template <typename T>
static inline T load32( T volatile const * pSrc, memory_order order ) noexcept
{
static_assert( sizeof(T) == 4, "Illegal operand size" );
assert( order == memory_order_relaxed
|| order == memory_order_consume
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
assert( pSrc );
assert( cds::details::is_aligned( pSrc, 4 ));
T v( *pSrc );
fence_after_load( order );
return v;
}
template <typename T>
static inline bool cas32_strong( T volatile * pDest, T& expected, T desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) noexcept
{
static_assert( sizeof(T) == 4, "Illegal operand size" );
assert( cds::details::is_aligned( pDest, 4 ));
// _InterlockedCompareExchange behave as read-write memory barriers
T prev = expected;
expected = (T) _InterlockedCompareExchange( (long *) pDest, (long) desired, (long) expected );
return expected == prev;
}
template <typename T>
static inline bool cas32_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
return cas32_strong( pDest, expected, desired, mo_success, mo_fail );
}
// fetch_xxx may be emulated via cas32
// If the platform has special fetch_xxx instruction
// then it should define CDS_ATOMIC_fetch32_xxx_defined macro
# define CDS_ATOMIC_fetch32_add_defined
template <typename T>
static inline T fetch32_add( T volatile * pDest, T v, memory_order /*order*/) noexcept
{
static_assert( sizeof(T) == 4, "Illegal operand size" );
assert( cds::details::is_aligned( pDest, 4 ));
// _InterlockedExchangeAdd behave as read-write memory barriers
return (T) _InterlockedExchangeAdd( (long *) pDest, (long) v );
}
//-----------------------------------------------------------------------------
// 64bit primitives
//-----------------------------------------------------------------------------
template <typename T>
static inline bool cas64_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
static_assert( sizeof(T) == 8, "Illegal operand size" );
assert( cds::details::is_aligned( pDest, 8 ));
// _InterlockedCompareExchange behave as read-write memory barriers
T prev = expected;
expected = (T) _InterlockedCompareExchange64( (__int64 *) pDest, (__int64) desired, (__int64) expected );
return expected == prev;
}
template <typename T>
static inline bool cas64_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
return cas64_strong( pDest, expected, desired, mo_success, mo_fail );
}
template <typename T>
static inline T load64( T volatile const * pSrc, memory_order order ) noexcept
{
static_assert( sizeof(T) == 8, "Illegal operand size" );
assert( order == memory_order_relaxed
|| order == memory_order_consume
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
assert( pSrc );
assert( cds::details::is_aligned( pSrc, 8 ));
// Atomically loads 64bit value by SSE intrinsics
__m128i volatile v = _mm_loadl_epi64( (__m128i const *) pSrc );
fence_after_load( order );
return (T) v.m128i_i64[0];
}
template <typename T>
static inline T exchange64( T volatile * pDest, T v, memory_order order ) noexcept
{
static_assert( sizeof(T) == 8, "Illegal operand size" );
T cur = load64( pDest, memory_order_relaxed );
do {
} while (!cas64_weak( pDest, cur, v, order, memory_order_relaxed ));
return cur;
}
template <typename T>
static inline void store64( T volatile * pDest, T val, memory_order order ) noexcept
{
static_assert( sizeof(T) == 8, "Illegal operand size" );
assert( order == memory_order_relaxed
|| order == memory_order_release
|| order == memory_order_seq_cst
);
assert( pDest );
assert( cds::details::is_aligned( pDest, 8 ));
if ( order != memory_order_seq_cst ) {
__m128i v;
v.m128i_i64[0] = val;
fence_before( order );
_mm_storel_epi64( (__m128i *) pDest, v );
}
else {
exchange64( pDest, val, order );
}
}
//-----------------------------------------------------------------------------
// pointer primitives
//-----------------------------------------------------------------------------
template <typename T>
static inline T * exchange_ptr( T * volatile * pDest, T * v, memory_order order ) noexcept
{
static_assert( sizeof(T *) == sizeof(void *), "Illegal operand size" );
return (T *) _InterlockedExchange( (long volatile *) pDest, (uintptr_t) v );
//return (T *) _InterlockedExchangePointer( (void * volatile *) pDest, reinterpret_cast<void *>(v));
}
template <typename T>
static inline void store_ptr( T * volatile * pDest, T * src, memory_order order ) noexcept
{
static_assert( sizeof(T *) == sizeof(void *), "Illegal operand size" );
assert( order == memory_order_relaxed
|| order == memory_order_release
|| order == memory_order_seq_cst
);
assert( pDest );
if ( order != memory_order_seq_cst ) {
fence_before( order );
*pDest = src;
}
else {
exchange_ptr( pDest, src, order );
}
}
template <typename T>
static inline T * load_ptr( T * volatile const * pSrc, memory_order order ) noexcept
{
static_assert( sizeof(T *) == sizeof(void *), "Illegal operand size" );
assert( order == memory_order_relaxed
|| order == memory_order_consume
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
assert( pSrc );
T * v = *pSrc;
fence_after_load( order );
return v;
}
template <typename T>
static inline bool cas_ptr_strong( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
static_assert( sizeof(T *) == sizeof(void *), "Illegal operand size" );
// _InterlockedCompareExchangePointer behave as read-write memory barriers
T * prev = expected;
expected = (T *) _InterlockedCompareExchange( (long volatile *) pDest, (uintptr_t) desired, (uintptr_t) prev );
return expected == prev;
}
template <typename T>
static inline bool cas_ptr_weak( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept
{
return cas_ptr_strong( pDest, expected, desired, mo_success, mo_fail );
}
}} // namespace vc::x86
} // namespace platform
}} // namespace cds::cxx11_atomic
//@endcond
#endif // #ifndef CDSLIB_COMPILER_VC_X86_CXX11_ATOMIC_H

View File

@ -0,0 +1,456 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_CONTAINER_BASKET_QUEUE_H
#define CDSLIB_CONTAINER_BASKET_QUEUE_H
#include <cds/intrusive/basket_queue.h>
#include <cds/container/details/base.h>
#include <memory>
namespace cds { namespace container {
/// BasketQueue related definitions
/** @ingroup cds_nonintrusive_helper
*/
namespace basket_queue {
/// Internal statistics
template <typename Counter = cds::intrusive::basket_queue::stat<>::counter_type >
using stat = cds::intrusive::basket_queue::stat< Counter >;
/// Dummy internal statistics
typedef cds::intrusive::basket_queue::empty_stat empty_stat;
/// BasketQueue default type traits
struct traits
{
/// Node allocator
typedef CDS_DEFAULT_ALLOCATOR allocator;
/// Back-off strategy
typedef cds::backoff::empty back_off;
/// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting
typedef atomicity::empty_item_counter item_counter;
/// Internal statistics (by default, disabled)
/**
Possible option value are: \p basket_queue::stat, \p basket_queue::empty_stat (the default),
user-provided class that supports \p %basket_queue::stat interface.
*/
typedef basket_queue::empty_stat stat;
/// C++ memory ordering model
/**
Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
or \p opt::v::sequential_consistent (sequentially consisnent memory model).
*/
typedef opt::v::relaxed_ordering memory_model;
/// Padding for internal critical atomic data. Default is \p opt::cache_line_padding
enum { padding = opt::cache_line_padding };
};
/// Metafunction converting option list to \p basket_queue::traits
/**
Supported \p Options are:
- \p opt::allocator - allocator (like \p std::allocator) used for allocating queue nodes. Default is \ref CDS_DEFAULT_ALLOCATOR
- \p opt::back_off - back-off strategy used, default is \p cds::backoff::empty.
- \p opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter (item counting disabled)
To enable item counting use \p cds::atomicity::item_counter
- \ opt::stat - the type to gather internal statistics.
Possible statistics types are: \p basket_queue::stat, \p basket_queue::empty_stat, user-provided class that supports \p %basket_queue::stat interface.
Default is \p %basket_queue::empty_stat.
- \p opt::padding - padding for internal critical atomic data. Default is \p opt::cache_line_padding
- \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
or \p opt::v::sequential_consistent (sequentially consisnent memory model).
Example: declare \p %BasketQueue with item counting and internal statistics
\code
typedef cds::container::BasketQueue< cds::gc::HP, Foo,
typename cds::container::basket_queue::make_traits<
cds::opt::item_counte< cds::atomicity::item_counter >,
cds::opt::stat< cds::intrusive::basket_queue::stat<> >
>::type
> myQueue;
\endcode
*/
template <typename... Options>
struct make_traits {
# ifdef CDS_DOXYGEN_INVOKED
typedef implementation_defined type; ///< Metafunction result
# else
typedef typename cds::opt::make_options<
typename cds::opt::find_type_traits< traits, Options... >::type
, Options...
>::type type;
# endif
};
} // namespace basket_queue
//@cond
namespace details {
template <typename GC, typename T, typename Traits>
struct make_basket_queue
{
typedef GC gc;
typedef T value_type;
typedef Traits traits;
struct node_type: public intrusive::basket_queue::node< gc >
{
value_type m_value;
node_type( const value_type& val )
: m_value( val )
{}
template <typename... Args>
node_type( Args&&... args )
: m_value( std::forward<Args>(args)...)
{}
};
typedef typename std::allocator_traits< typename traits::allocator >::template rebind_alloc< node_type > allocator_type;
//typedef typename traits::allocator::template rebind<node_type>::other allocator_type;
typedef cds::details::Allocator< node_type, allocator_type > cxx_allocator;
struct node_deallocator
{
void operator ()( node_type * pNode )
{
cxx_allocator().Delete( pNode );
}
};
struct intrusive_traits : public traits
{
typedef cds::intrusive::basket_queue::base_hook< opt::gc<gc> > hook;
typedef node_deallocator disposer;
static constexpr const cds::intrusive::opt::link_check_type link_checker = cds::intrusive::basket_queue::traits::link_checker;
};
typedef cds::intrusive::BasketQueue< gc, node_type, intrusive_traits > type;
};
}
//@endcond
/// Basket lock-free queue (non-intrusive variant)
/** @ingroup cds_nonintrusive_queue
It is non-intrusive version of basket queue algorithm based on intrusive::BasketQueue counterpart.
\par Source:
[2007] Moshe Hoffman, Ori Shalev, Nir Shavit "The Baskets Queue"
<b>Key idea</b>
In the 'basket' approach, instead of
the traditional ordered list of nodes, the queue consists of an ordered list of groups
of nodes (logical baskets). The order of nodes in each basket need not be specified, and in
fact, it is easiest to maintain them in LIFO order. The baskets fulfill the following basic
rules:
- Each basket has a time interval in which all its nodes' enqueue operations overlap.
- The baskets are ordered by the order of their respective time intervals.
- For each basket, its nodes' dequeue operations occur after its time interval.
- The dequeue operations are performed according to the order of baskets.
Two properties define the FIFO order of nodes:
- The order of nodes in a basket is not specified.
- The order of nodes in different baskets is the FIFO-order of their respective baskets.
In algorithms such as the MS-queue or optimistic
queue, threads enqueue items by applying a Compare-and-swap (CAS) operation to the
queue's tail pointer, and all the threads that fail on a particular CAS operation (and also
the winner of that CAS) overlap in time. In particular, they share the time interval of
the CAS operation itself. Hence, all the threads that fail to CAS on the tail-node of
the queue may be inserted into the same basket. By integrating the basket-mechanism
as the back-off mechanism, the time usually spent on backing-off before trying to link
onto the new tail, can now be utilized to insert the failed operations into the basket,
allowing enqueues to complete sooner. In the meantime, the next successful CAS operations
by enqueues allow new baskets to be formed down the list, and these can be
filled concurrently. Moreover, the failed operations don't retry their link attempt on the
new tail, lowering the overall contention on it. This leads to a queue
algorithm that unlike all former concurrent queue algorithms requires virtually no tuning
of the backoff mechanisms to reduce contention, making the algorithm an attractive
out-of-the-box queue.
In order to enqueue, just as in MSQueue, a thread first tries to link the new node to
the last node. If it failed to do so, then another thread has already succeeded. Thus it
tries to insert the new node into the new basket that was created by the winner thread.
To dequeue a node, a thread first reads the head of the queue to obtain the
oldest basket. It may then dequeue any node in the oldest basket.
Template arguments:
- \p GC - garbage collector type: \p gc::HP, \p gc::DHP
- \p T - type of value to be stored in the queue
- \p Traits - queue traits, default is \p basket_queue::traits. You can use \p basket_queue::make_traits
metafunction to make your traits or just derive your traits from \p %basket_queue::traits:
\code
struct myTraits: public cds::container::basket_queue::traits {
typedef cds::intrusive::basket_queue::stat<> stat;
typedef cds::atomicity::item_counter item_counter;
};
typedef cds::container::BasketQueue< cds::gc::HP, Foo, myTraits > myQueue;
// Equivalent make_traits example:
typedef cds::container::BasketQueue< cds::gc::HP, Foo,
typename cds::container::basket_queue::make_traits<
cds::opt::stat< cds::container::basket_queue::stat<> >,
cds::opt::item_counter< cds::atomicity::item_counter >
>::type
> myQueue;
\endcode
*/
template <typename GC, typename T, typename Traits = basket_queue::traits >
class BasketQueue:
#ifdef CDS_DOXYGEN_INVOKED
private intrusive::BasketQueue< GC, intrusive::basket_queue::node< T >, Traits >
#else
protected details::make_basket_queue< GC, T, Traits >::type
#endif
{
//@cond
typedef details::make_basket_queue< GC, T, Traits > maker;
typedef typename maker::type base_class;
//@endcond
public:
/// Rebind template arguments
template <typename GC2, typename T2, typename Traits2>
struct rebind {
typedef BasketQueue< GC2, T2, Traits2> other ; ///< Rebinding result
};
public:
typedef GC gc; ///< Garbage collector
typedef T value_type; ///< Type of value to be stored in the queue
typedef Traits traits; ///< Queue's traits
typedef typename base_class::back_off back_off; ///< Back-off strategy used
typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes
typedef typename base_class::item_counter item_counter; ///< Item counting policy used
typedef typename base_class::stat stat; ///< Internal statistics policy used
typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
static constexpr const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm
protected:
typedef typename maker::node_type node_type; ///< queue node type (derived from intrusive::basket_queue::node)
//@cond
typedef typename maker::cxx_allocator cxx_allocator;
typedef typename maker::node_deallocator node_deallocator; // deallocate node
typedef typename base_class::node_traits node_traits;
//@endcond
protected:
///@cond
static node_type * alloc_node()
{
return cxx_allocator().New();
}
static node_type * alloc_node( const value_type& val )
{
return cxx_allocator().New( val );
}
template <typename... Args>
static node_type * alloc_node_move( Args&&... args )
{
return cxx_allocator().MoveNew( std::forward<Args>( args )... );
}
static void free_node( node_type * p )
{
node_deallocator()( p );
}
struct node_disposer {
void operator()( node_type * pNode )
{
free_node( pNode );
}
};
typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr;
//@endcond
public:
/// Initializes empty queue
BasketQueue()
{}
/// Destructor clears the queue
~BasketQueue()
{}
/// Enqueues \p val value into the queue.
/**
The function makes queue node in dynamic memory calling copy constructor for \p val
and then it calls \p intrusive::BasketQueue::enqueue().
Returns \p true if success, \p false otherwise.
*/
bool enqueue( value_type const& val )
{
scoped_node_ptr p( alloc_node(val));
if ( base_class::enqueue( *p )) {
p.release();
return true;
}
return false;
}
/// Enqueues \p val value into the queue, move semantics
bool enqueue( value_type&& val )
{
scoped_node_ptr p( alloc_node_move( std::move( val )));
if ( base_class::enqueue( *p )) {
p.release();
return true;
}
return false;
}
/// Enqueues \p data to queue using a functor
/**
\p Func is a functor called to create node.
The functor \p f takes one argument - a reference to a new node of type \ref value_type :
\code
cds::container::BasketQueue< cds::gc::HP, Foo > myQueue;
Bar bar;
myQueue.enqueue_with( [&bar]( Foo& dest ) { dest = bar; } );
\endcode
*/
template <typename Func>
bool enqueue_with( Func f )
{
scoped_node_ptr p( alloc_node());
f( p->m_value );
if ( base_class::enqueue( *p )) {
p.release();
return true;
}
return false;
}
/// Synonym for \p enqueue() function
bool push( value_type const& val )
{
return enqueue( val );
}
/// Synonym for \p enqueue() function, move semantics
bool push( value_type&& val )
{
return enqueue( std::move( val ));
}
/// Synonym for \p enqueue_with() function
template <typename Func>
bool push_with( Func f )
{
return enqueue_with( f );
}
/// Enqueues data of type \ref value_type constructed with <tt>std::forward<Args>(args)...</tt>
template <typename... Args>
bool emplace( Args&&... args )
{
scoped_node_ptr p( alloc_node_move( std::forward<Args>(args)...));
if ( base_class::enqueue( *p )) {
p.release();
return true;
}
return false;
}
/// Dequeues a value from the queue
/**
If queue is not empty, the function returns \p true, \p dest contains copy of
dequeued value. The assignment operator for \p value_type is invoked.
If queue is empty, the function returns \p false, \p dest is unchanged.
*/
bool dequeue( value_type& dest )
{
return dequeue_with( [&dest]( value_type& src ) {
// TSan finds a race between this read of \p src and node_type constructor
// I think, it is wrong
CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
dest = std::move( src );
CDS_TSAN_ANNOTATE_IGNORE_READS_END;
});
}
/// Dequeues a value using a functor
/**
\p Func is a functor called to copy dequeued value.
The functor takes one argument - a reference to removed node:
\code
cds:container::BasketQueue< cds::gc::HP, Foo > myQueue;
Bar bar;
myQueue.dequeue_with( [&bar]( Foo& src ) { bar = std::move( src );});
\endcode
The functor is called only if the queue is not empty.
*/
template <typename Func>
bool dequeue_with( Func f )
{
typename base_class::dequeue_result res;
if ( base_class::do_dequeue( res, true )) {
f( node_traits::to_value_ptr( *res.pNext )->m_value );
return true;
}
return false;
}
/// Synonym for \p dequeue() function
bool pop( value_type& dest )
{
return dequeue( dest );
}
/// Synonym for \p dequeue_with() function
template <typename Func>
bool pop_with( Func f )
{
return dequeue_with( f );
}
/// Checks if the queue is empty
/**
Note that this function is not \p const.
The function is based on \p dequeue() algorithm.
*/
bool empty()
{
return base_class::empty();
}
/// Clear the queue
/**
The function repeatedly calls \ref dequeue until it returns \p nullptr.
*/
void clear()
{
base_class::clear();
}
/// Returns queue's item count
/** \copydetails cds::intrusive::BasketQueue::size()
*/
size_t size() const
{
return base_class::size();
}
/// Returns reference to internal statistics
const stat& statistics() const
{
return base_class::statistics();
}
};
}} // namespace cds::container
#endif // #ifndef CDSLIB_CONTAINER_BASKET_QUEUE_H

View File

@ -0,0 +1,687 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_CONTAINER_BRONSON_AVLTREE_MAP_RCU_H
#define CDSLIB_CONTAINER_BRONSON_AVLTREE_MAP_RCU_H
#include <functional>
#include <cds/container/impl/bronson_avltree_map_rcu.h>
namespace cds { namespace container {
namespace bronson_avltree {
//@cond
namespace details {
template < class RCU, typename Key, typename T, typename Traits>
struct make_map
{
typedef Key key_type;
typedef T mapped_type;
typedef Traits original_traits;
typedef cds::details::Allocator< mapped_type, typename original_traits::allocator > cxx_allocator;
struct traits : public original_traits
{
struct disposer {
void operator()( mapped_type * p ) const
{
cxx_allocator().Delete( p );
}
};
};
// Metafunction result
typedef BronsonAVLTreeMap< RCU, Key, mapped_type *, traits > type;
};
} // namespace details
//@endcond
} // namespace bronson_avltree
/// Bronson et al AVL-tree (RCU specialization)
/** @ingroup cds_nonintrusive_map
@ingroup cds_nonintrusive_tree
@anchor cds_container_BronsonAVLTreeMap_rcu
Source:
- [2010] N.Bronson, J.Casper, H.Chafi, K.Olukotun "A Practical Concurrent Binary Search Tree"
- <a href="http://github.com/nbronson/snaptree">Java implementation</a>
This is a concurrent AVL tree algorithm that uses hand-over-hand optimistic validation,
a concurrency control mechanism for searching and navigating a binary search tree.
This mechanism minimizes spurious retries when concurrent structural changes cannot
affect the correctness of the search or navigation result.
The algorithm is based on partially external trees, a simple scheme that simplifies deletions
by leaving a routing node in the tree when deleting a node that has two children,
then opportunistically unlinking routing nodes during rebalancing. As in external trees,
which store values only in leaf nodes, deletions can be performed locally while holding
a fixed number of locks. Partially external trees, however, require far fewer routing nodes
than an external tree for most sequences of insertions and deletions.
The algorithm uses optimistic concurrency control, but carefully manage the
tree in such a way that all atomic regions have fixed read and write sets
that are known ahead of time. This allows to reduce practical overheads by embedding
the concurrency control directly. To perform tree operations using only fixed sized
atomic regions the algo uses the following mechanisms: search operations overlap atomic blocks as
in the hand-over-hand locking technique; mutations perform rebalancing separately;
and deletions occasionally leave a routing node in the tree.
<b>Template arguments</b>:
- \p RCU - one of \ref cds_urcu_gc "RCU type"
- \p Key - key type
- \p T - value type to be stored in tree's nodes.
- \p Traits - tree traits, default is \p bronson_avltree::traits
It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
instead of \p Traits template argument.
There is \ref cds_container_BronsonAVLTreeMap_rcu_ptr "a specialization" for "key -> value pointer" map.
@note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
*/
template <
typename RCU,
typename Key,
typename T,
# ifdef CDS_DOXYGEN_INVOKED
typename Traits = bronson_avltree::traits
#else
typename Traits
#endif
>
class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T, Traits >
#ifdef CDS_DOXYGEN_INVOKED
: private BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
#else
: private bronson_avltree::details::make_map< cds::urcu::gc<RCU>, Key, T, Traits >::type
#endif
{
//@cond
typedef bronson_avltree::details::make_map< cds::urcu::gc<RCU>, Key, T, Traits > maker;
typedef typename maker::type base_class;
//@endcond
public:
typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
typedef Key key_type; ///< type of a key stored in the map
typedef T mapped_type; ///< type of value stored in the map
typedef Traits traits; ///< Traits template parameter
typedef typename base_class::key_comparator key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
typedef typename traits::item_counter item_counter; ///< Item counting policy
typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
typedef typename traits::allocator allocator_type; ///< allocator for value
typedef typename traits::node_allocator node_allocator_type;///< allocator for maintaining internal nodes
typedef typename traits::stat stat; ///< internal statistics
typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
typedef typename traits::back_off back_off; ///< Back-off strategy
typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
/// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
static bool const c_bRelaxedInsert = traits::relaxed_insert;
/// Group of \p extract_xxx functions does not require external locking
static constexpr const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal;
typedef typename base_class::rcu_lock rcu_lock; ///< RCU scoped lock
/// Returned pointer to \p mapped_type of extracted node
typedef typename base_class::exempt_ptr exempt_ptr;
protected:
//@cond
typedef typename base_class::node_type node_type;
typedef typename base_class::node_scoped_lock node_scoped_lock;
typedef typename maker::cxx_allocator cxx_allocator;
typedef typename base_class::update_flags update_flags;
//@endcond
public:
/// Creates empty map
BronsonAVLTreeMap()
{}
/// Destroys the map
~BronsonAVLTreeMap()
{}
/// Inserts new node with \p key and default value
/**
The function creates a node with \p key and default value, and then inserts the node created into the map.
Preconditions:
- The \p key_type should be constructible from a value of type \p K.
- The \p mapped_type should be default-constructible.
RCU \p synchronize() can be called. RCU should not be locked.
Returns \p true if inserting successful, \p false otherwise.
*/
template <typename K>
bool insert( K const& key )
{
return base_class::do_update(key, key_comparator(),
[]( node_type * pNode ) -> mapped_type*
{
assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
CDS_UNUSED( pNode );
return cxx_allocator().New();
},
update_flags::allow_insert
) == update_flags::result_inserted;
}
/// Inserts new node
/**
The function creates a node with copy of \p val value
and then inserts the node created into the map.
Preconditions:
- The \p key_type should be constructible from \p key of type \p K.
- The \p mapped_type should be constructible from \p val of type \p V.
RCU \p synchronize() method can be called. RCU should not be locked.
Returns \p true if \p val is inserted into the map, \p false otherwise.
*/
template <typename K, typename V>
bool insert( K const& key, V const& val )
{
return base_class::do_update( key, key_comparator(),
[&val]( node_type * pNode ) -> mapped_type*
{
assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
CDS_UNUSED( pNode );
return cxx_allocator().New( val );
},
update_flags::allow_insert
) == update_flags::result_inserted;
}
/// Inserts new node and initialize it by a functor
/**
This function inserts new node with key \p key and if inserting is successful then it calls
\p func functor with signature
\code
struct functor {
void operator()( key_type const& key, mapped_type& item );
};
\endcode
The key_type should be constructible from value of type \p K.
The function allows to split creating of new item into two part:
- create item from \p key;
- insert new item into the map;
- if inserting is successful, initialize the value of item by calling \p func functor
This can be useful if complete initialization of object of \p value_type is heavyweight and
it is preferable that the initialization should be completed only if inserting is successful.
The functor is called under the node lock.
RCU \p synchronize() method can be called. RCU should not be locked.
*/
template <typename K, typename Func>
bool insert_with( K const& key, Func func )
{
return base_class::do_update( key, key_comparator(),
[&func]( node_type * pNode ) -> mapped_type*
{
assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
mapped_type * pVal = cxx_allocator().New();
func( pNode->m_key, *pVal );
return pVal;
},
update_flags::allow_insert
) == update_flags::result_inserted;
}
/// For \p key inserts data of type \p mapped_type created in-place from \p args
/**
Returns \p true if inserting successful, \p false otherwise.
RCU \p synchronize() method can be called. RCU should not be locked.
*/
template <typename K, typename... Args>
bool emplace( K&& key, Args&&... args )
{
struct scoped_ptr
{
mapped_type * pVal;
scoped_ptr( mapped_type * p ): pVal( p ) {}
~scoped_ptr() { if ( pVal ) cxx_allocator().Delete( pVal ); }
void release() { pVal = nullptr; }
};
scoped_ptr p( cxx_allocator().MoveNew( std::forward<Args>( args )... ));
if ( base_class::insert( std::forward<K>( key ), p.pVal )) {
p.release();
return true;
}
return false;
}
/// Updates the value for \p key
/**
The operation performs inserting or changing data with lock-free manner.
If the \p key not found in the map, then the new item created from \p key
will be inserted into the map iff \p bAllowInsert is \p true
(note that in this case the \ref key_type should be constructible from type \p K).
Otherwise, the functor \p func is called with item found.
The functor \p Func signature is:
\code
struct my_functor {
void operator()( bool bNew, key_type const& key, mapped_type& item );
};
\endcode
with arguments:
- \p bNew - \p true if the item has been inserted, \p false otherwise
- \p item - value
The functor may change any fields of the \p item. The functor is called under the node lock,
the caller can change any field of \p item.
RCU \p synchronize() method can be called. RCU should not be locked.
Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successful,
\p second is \p true if new item has been added or \p false if the item with \p key
already exists.
*/
template <typename K, typename Func>
std::pair<bool, bool> update( K const& key, Func func, bool bAllowInsert = true )
{
int result = base_class::do_update( key, key_comparator(),
[&func]( node_type * pNode ) -> mapped_type*
{
mapped_type * pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
if ( !pVal ) {
pVal = cxx_allocator().New();
func( true, pNode->m_key, *pVal );
}
else
func( false, pNode->m_key, *pVal );
return pVal;
},
(bAllowInsert ? update_flags::allow_insert : 0) | update_flags::allow_update
);
return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
}
/// Delete \p key from the map
/**
RCU \p synchronize() method can be called. RCU should not be locked.
Return \p true if \p key is found and deleted, \p false otherwise
*/
template <typename K>
bool erase( K const& key )
{
return base_class::erase( key );
}
/// Deletes the item from the map using \p pred predicate for searching
/**
The function is an analog of \p erase(K const&)
but \p pred is used for key comparing.
\p Less functor has the interface like \p std::less.
\p Less must imply the same element order as the comparator used for building the map.
*/
template <typename K, typename Less>
bool erase_with( K const& key, Less pred )
{
return base_class::erase_with( key, pred );
}
/// Delete \p key from the map
/** \anchor cds_nonintrusive_BronsonAVLTreeMap_rcu_erase_func
The function searches an item with key \p key, calls \p f functor
and deletes the item. If \p key is not found, the functor is not called.
The functor \p Func interface:
\code
struct extractor {
void operator()(key_type const& key, mapped_type& item) { ... }
};
\endcode
RCU \p synchronize method can be called. RCU should not be locked.
Return \p true if key is found and deleted, \p false otherwise
*/
template <typename K, typename Func>
bool erase( K const& key, Func f )
{
return base_class::erase( key, f );
}
/// Deletes the item from the map using \p pred predicate for searching
/**
The function is an analog of \ref cds_nonintrusive_BronsonAVLTreeMap_rcu_erase_func "erase(K const&, Func)"
but \p pred is used for key comparing.
\p Less functor has the interface like \p std::less.
\p Less must imply the same element order as the comparator used for building the map.
*/
template <typename K, typename Less, typename Func>
bool erase_with( K const& key, Less pred, Func f )
{
return base_class::erase_with( key, pred, f );
}
/// Extracts a value with minimal key from the map
/**
Returns \p exempt_ptr pointer to the leftmost item.
If the set is empty, returns empty \p exempt_ptr.
Note that the function returns only the value for minimal key.
To retrieve its key use \p extract_min( Func ) member function.
@note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
It means that the function gets leftmost leaf of the tree and tries to unlink it.
During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
So, the function returns the item with minimum key at the moment of tree traversing.
RCU \p synchronize method can be called. RCU should NOT be locked.
The function does not free the item.
The deallocator will be implicitly invoked when the returned object is destroyed or when
its \p release() member function is called.
*/
exempt_ptr extract_min()
{
return base_class::extract_min();
}
/// Extracts minimal key and corresponding value
/**
Returns \p exempt_ptr to the leftmost item.
If the tree is empty, returns empty \p exempt_ptr.
\p Func functor is used to store minimal key.
\p Func has the following signature:
\code
struct functor {
void operator()( key_type const& key );
};
\endcode
If the tree is empty, \p f is not called.
Otherwise, is it called with minimal key, the pointer to corresponding value is returned
as \p exempt_ptr.
@note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
It means that the function gets leftmost leaf of the tree and tries to unlink it.
During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
So, the function returns the item with minimum key at the moment of tree traversing.
RCU \p synchronize method can be called. RCU should NOT be locked.
The function does not free the item.
The deallocator will be implicitly invoked when the returned object is destroyed or when
its \p release() member function is called.
*/
template <typename Func>
exempt_ptr extract_min( Func f )
{
return base_class::extract_min( f );
}
/// Extracts minimal key and corresponding value
/**
This function is a shortcut for the following call:
\code
key_type key;
exempt_ptr xp = theTree.extract_min( [&key]( key_type const& k ) { key = k; } );
\endcode
\p key_type should be copy-assignable. The copy of minimal key
is returned in \p min_key argument.
*/
typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
extract_min_key( key_type& min_key )
{
return base_class::extract_min_key( min_key );
}
/// Extracts an item with maximal key from the map
/**
Returns \p exempt_ptr pointer to the rightmost item.
If the set is empty, returns empty \p exempt_ptr.
Note that the function returns only the value for maximal key.
To retrieve its key use \p extract_max( Func ) or \p extract_max_key(key_type&) member function.
@note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
It means that the function gets rightmost leaf of the tree and tries to unlink it.
During unlinking, a concurrent thread may insert an item with key greater than rightmost item's key.
So, the function returns the item with maximum key at the moment of tree traversing.
RCU \p synchronize method can be called. RCU should NOT be locked.
The function does not free the item.
The deallocator will be implicitly invoked when the returned object is destroyed or when
its \p release() is called.
*/
exempt_ptr extract_max()
{
return base_class::extract_max();
}
/// Extracts the maximal key and corresponding value
/**
Returns \p exempt_ptr pointer to the rightmost item.
If the set is empty, returns empty \p exempt_ptr.
\p Func functor is used to store maximal key.
\p Func has the following signature:
\code
struct functor {
void operator()( key_type const& key );
};
\endcode
If the tree is empty, \p f is not called.
Otherwise, is it called with maximal key, the pointer to corresponding value is returned
as \p exempt_ptr.
@note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
It means that the function gets rightmost leaf of the tree and tries to unlink it.
During unlinking, a concurrent thread may insert an item with key greater than rightmost item's key.
So, the function returns the item with maximum key at the moment of tree traversing.
RCU \p synchronize method can be called. RCU should NOT be locked.
The function does not free the item.
The deallocator will be implicitly invoked when the returned object is destroyed or when
its \p release() is called.
*/
template <typename Func>
exempt_ptr extract_max( Func f )
{
return base_class::extract_max( f );
}
/// Extracts the maximal key and corresponding value
/**
This function is a shortcut for the following call:
\code
key_type key;
exempt_ptr xp = theTree.extract_max( [&key]( key_type const& k ) { key = k; } );
\endcode
\p key_type should be copy-assignable. The copy of maximal key
is returned in \p max_key argument.
*/
typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
extract_max_key( key_type& max_key )
{
return base_class::extract_max_key( max_key );
}
/// Extracts an item from the map
/**
The function searches an item with key equal to \p key in the tree,
unlinks it, and returns \p exempt_ptr pointer to a value found.
If \p key is not found the function returns an empty \p exempt_ptr.
RCU \p synchronize method can be called. RCU should NOT be locked.
The function does not destroy the value found.
The dealloctor will be implicitly invoked when the returned object is destroyed or when
its \p release() member function is called.
*/
template <typename Q>
exempt_ptr extract( Q const& key )
{
return base_class::extract( key );
}
/// Extracts an item from the map using \p pred for searching
/**
The function is an analog of \p extract(Q const&)
but \p pred is used for key compare.
\p Less has the interface like \p std::less.
\p pred must imply the same element order as the comparator used for building the map.
*/
template <typename Q, typename Less>
exempt_ptr extract_with( Q const& key, Less pred )
{
return base_class::extract_with( key, pred );
}
/// Find the key \p key
/**
The function searches the item with key equal to \p key and calls the functor \p f for item found.
The interface of \p Func functor is:
\code
struct functor {
void operator()( key_type const& key, mapped_type& val );
};
\endcode
where \p val is the item found for \p key
The functor is called under node-level lock.
The function applies RCU lock internally.
The function returns \p true if \p key is found, \p false otherwise.
*/
template <typename K, typename Func>
bool find( K const& key, Func f )
{
return base_class::find( key, f );
}
/// Finds the key \p val using \p pred predicate for searching
/**
The function is an analog of \p find(K const&, Func)
but \p pred is used for key comparing.
\p Less functor has the interface like \p std::less.
\p Less must imply the same element order as the comparator used for building the map.
*/
template <typename K, typename Less, typename Func>
bool find_with( K const& key, Less pred, Func f )
{
return base_class::find_with( key, pred, f );
}
/// Checks whether the map contains \p key
/**
The function searches the item with key equal to \p key
and returns \p true if it is found, and \p false otherwise.
The function applies RCU lock internally.
*/
template <typename K>
bool contains( K const& key )
{
return base_class::contains( key );
}
/// Checks whether the map contains \p key using \p pred predicate for searching
/**
The function is similar to <tt>contains( key )</tt> but \p pred is used for key comparing.
\p Less functor has the interface like \p std::less.
\p Less must imply the same element order as the comparator used for building the set.
*/
template <typename K, typename Less>
bool contains( K const& key, Less pred )
{
return base_class::contains( key, pred );
}
/// Clears the map
void clear()
{
base_class::clear();
}
/// Checks if the map is empty
bool empty() const
{
return base_class::empty();
}
/// Returns item count in the map
/**
Only leaf nodes containing user data are counted.
The value returned depends on item counter type provided by \p Traits template parameter.
If it is \p atomicity::empty_item_counter this function always returns 0.
The function is not suitable for checking the tree emptiness, use \p empty()
member function for this purpose.
*/
size_t size() const
{
return base_class::size();
}
/// Returns const reference to internal statistics
stat const& statistics() const
{
return base_class::statistics();
}
/// Returns reference to \p sync_monitor object
sync_monitor& monitor()
{
return base_class::monitor();
}
//@cond
sync_monitor const& monitor() const
{
return base_class::monitor();
}
//@endcond
/// Checks internal consistency (not atomic, not thread-safe)
/**
The debugging function to check internal consistency of the tree.
*/
bool check_consistency() const
{
return base_class::check_consistency();
}
/// Checks internal consistency (not atomic, not thread-safe)
/**
The debugging function to check internal consistency of the tree.
The functor \p Func is called if a violation of internal tree structure
is found:
\code
struct functor {
void operator()( size_t nLevel, size_t hLeft, size_t hRight );
};
\endcode
where
- \p nLevel - the level where the violation is found
- \p hLeft - the height of left subtree
- \p hRight - the height of right subtree
The functor is called for each violation found.
*/
template <typename Func>
bool check_consistency( Func f ) const
{
return base_class::check_consistency( f );
}
};
}} // namespace cds::container
#endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H

747
extern/libcds/cds/container/cuckoo_map.h vendored Normal file
View File

@ -0,0 +1,747 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_CONTAINER_CUCKOO_MAP_H
#define CDSLIB_CONTAINER_CUCKOO_MAP_H
#include <cds/container/details/cuckoo_base.h>
#include <cds/details/binary_functor_wrapper.h>
namespace cds { namespace container {
//@cond
namespace details {
template <typename Key, typename T, typename Traits>
struct make_cuckoo_map
{
typedef Key key_type; ///< key type
typedef T mapped_type; ///< type of value stored in the map
typedef std::pair<key_type const, mapped_type> value_type; ///< Pair type
typedef Traits original_traits;
typedef typename original_traits::probeset_type probeset_type;
static bool const store_hash = original_traits::store_hash;
static unsigned int const store_hash_count = store_hash ? ((unsigned int) std::tuple_size< typename original_traits::hash::hash_tuple_type >::value) : 0;
struct node_type: public intrusive::cuckoo::node<probeset_type, store_hash_count>
{
value_type m_val;
template <typename K>
node_type( K const& key )
: m_val( std::make_pair( key_type(key), mapped_type()))
{}
template <typename K, typename Q>
node_type( K const& key, Q const& v )
: m_val( std::make_pair( key_type(key), mapped_type(v)))
{}
template <typename K, typename... Args>
node_type( K&& key, Args&&... args )
: m_val( std::forward<K>(key), std::move( mapped_type(std::forward<Args>(args)...)))
{}
};
struct key_accessor {
key_type const& operator()( node_type const& node ) const
{
return node.m_val.first;
}
};
struct intrusive_traits: public original_traits
{
typedef intrusive::cuckoo::base_hook<
cds::intrusive::cuckoo::probeset_type< probeset_type >
,cds::intrusive::cuckoo::store_hash< store_hash_count >
> hook;
typedef cds::intrusive::cuckoo::traits::disposer disposer;
typedef typename std::conditional<
std::is_same< typename original_traits::equal_to, opt::none >::value
, opt::none
, cds::details::predicate_wrapper< node_type, typename original_traits::equal_to, key_accessor >
>::type equal_to;
typedef typename std::conditional<
std::is_same< typename original_traits::compare, opt::none >::value
, opt::none
, cds::details::compare_wrapper< node_type, typename original_traits::compare, key_accessor >
>::type compare;
typedef typename std::conditional<
std::is_same< typename original_traits::less, opt::none >::value
,opt::none
,cds::details::predicate_wrapper< node_type, typename original_traits::less, key_accessor >
>::type less;
typedef opt::details::hash_list_wrapper< typename original_traits::hash, node_type, key_accessor > hash;
};
typedef intrusive::CuckooSet< node_type, intrusive_traits > type;
};
} // namespace details
//@endcond
/// Cuckoo hash map
/** @ingroup cds_nonintrusive_map
Source
- [2007] M.Herlihy, N.Shavit, M.Tzafrir "Concurrent Cuckoo Hashing. Technical report"
- [2008] Maurice Herlihy, Nir Shavit "The Art of Multiprocessor Programming"
<b>About Cuckoo hashing</b>
[From "The Art of Multiprocessor Programming"]
<a href="https://en.wikipedia.org/wiki/Cuckoo_hashing">Cuckoo hashing</a> is a hashing algorithm in which a newly added item displaces any earlier item
occupying the same slot. For brevity, a table is a k-entry array of items. For a hash set f size
N = 2k we use a two-entry array of tables, and two independent hash functions,
<tt> h0, h1: KeyRange -> 0,...,k-1</tt>
mapping the set of possible keys to entries in he array. To test whether a value \p x is in the set,
<tt>find(x)</tt> tests whether either <tt>table[0][h0(x)]</tt> or <tt>table[1][h1(x)]</tt> is
equal to \p x. Similarly, <tt>erase(x)</tt>checks whether \p x is in either <tt>table[0][h0(x)]</tt>
or <tt>table[1][h1(x)]</tt>, ad removes it if found.
The <tt>insert(x)</tt> successively "kicks out" conflicting items until every key has a slot.
To add \p x, the method swaps \p x with \p y, the current occupant of <tt>table[0][h0(x)]</tt>.
If the prior value was \p nullptr, it is done. Otherwise, it swaps the newly nest-less value \p y
for the current occupant of <tt>table[1][h1(y)]</tt> in the same way. As before, if the prior value
was \p nullptr, it is done. Otherwise, the method continues swapping entries (alternating tables)
until it finds an empty slot. We might not find an empty slot, either because the table is full,
or because the sequence of displacement forms a cycle. We therefore need an upper limit on the
number of successive displacements we are willing to undertake. When this limit is exceeded,
we resize the hash table, choose new hash functions and start over.
For concurrent cuckoo hashing, rather than organizing the set as a two-dimensional table of
items, we use two-dimensional table of probe sets, where a probe set is a constant-sized set
of items with the same hash code. Each probe set holds at most \p PROBE_SIZE items, but the algorithm
tries to ensure that when the set is quiescent (i.e no method call in progress) each probe set
holds no more than <tt>THRESHOLD < PROBE_SET</tt> items. While method calls are in-flight, a probe
set may temporarily hold more than \p THRESHOLD but never more than \p PROBE_SET items.
In current implementation, a probe set can be defined either as a (single-linked) list
or as a fixed-sized vector, optionally ordered.
In description above two-table cuckoo hashing (<tt>k = 2</tt>) has been considered.
We can generalize this approach for <tt>k >= 2</tt> when we have \p k hash functions
<tt>h[0], ... h[k-1]</tt> and \p k tables <tt>table[0], ... table[k-1]</tt>.
The search in probe set is linear, the complexity is <tt> O(PROBE_SET) </tt>.
The probe set may be ordered or not. Ordered probe set can be a little better since
the average search complexity is <tt>O(PROBE_SET/2)</tt>.
However, the overhead of sorting can eliminate a gain of ordered search.
The probe set is ordered if \p compare or \p less is specified in \p Traits
template parameter. Otherwise, the probe set is unordered and \p Traits must contain
\p equal_to predicate.
Template arguments:
- \p Key - key type
- \p T - the type stored in the map.
- \p Traits - map traits, default is \p cuckoo::traits.
It is possible to declare option-based set with \p cuckoo::make_traits metafunction
result as \p Traits template argument.
<b>Examples</b>
Declares cuckoo mapping from \p std::string to struct \p foo.
For cuckoo hashing we should provide at least two hash functions:
\code
struct hash1 {
size_t operator()(std::string const& s) const
{
return cds::opt::v::hash<std::string>( s );
}
};
struct hash2: private hash1 {
size_t operator()(std::string const& s) const
{
size_t h = ~( hash1::operator()(s));
return ~h + 0x9e3779b9 + (h << 6) + (h >> 2);
}
};
\endcode
Cuckoo-map with list-based unordered probe set and storing hash values
\code
#include <cds/container/cuckoo_map.h>
// Declare type traits
struct my_traits: public cds::container::cuckoo::traits
{
typedef std::equal_to< std::string > equal_to;
typedef std::tuple< hash1, hash2 > hash;
static bool const store_hash = true;
};
// Declare CuckooMap type
typedef cds::container::CuckooMap< std::string, foo, my_traits > my_cuckoo_map;
// Equal option-based declaration
typedef cds::container::CuckooMap< std::string, foo,
cds::container::cuckoo::make_traits<
cds::opt::hash< std::tuple< hash1, hash2 > >
,cds::opt::equal_to< std::equal_to< std::string > >
,cds::container::cuckoo::store_hash< true >
>::type
> opt_cuckoo_map;
\endcode
If we provide \p less functor instead of \p equal_to
we get as a result a cuckoo map with ordered probe set that may improve
performance.
Example for ordered vector-based probe-set:
\code
#include <cds/container/cuckoo_map.h>
// Declare type traits
// We use a vector of capacity 4 as probe-set container and store hash values in the node
struct my_traits: public cds::container::cuckoo::traits
{
typedef std::less< std::string > less;
typedef std::tuple< hash1, hash2 > hash;
typedef cds::container::cuckoo::vector<4> probeset_type;
static bool const store_hash = true;
};
// Declare CuckooMap type
typedef cds::container::CuckooMap< std::string, foo, my_traits > my_cuckoo_map;
// Equal option-based declaration
typedef cds::container::CuckooMap< std::string, foo,
cds::container::cuckoo::make_traits<
cds::opt::hash< std::tuple< hash1, hash2 > >
,cds::opt::less< std::less< std::string > >
,cds::container::cuckoo::probeset_type< cds::container::cuckoo::vector<4> >
,cds::container::cuckoo::store_hash< true >
>::type
> opt_cuckoo_map;
\endcode
*/
template <typename Key, typename T, typename Traits = cuckoo::traits>
class CuckooMap:
#ifdef CDS_DOXYGEN_INVOKED
protected intrusive::CuckooSet< std::pair< Key const, T>, Traits>
#else
protected details::make_cuckoo_map<Key, T, Traits>::type
#endif
{
//@cond
typedef details::make_cuckoo_map<Key, T, Traits> maker;
typedef typename maker::type base_class;
//@endcond
public:
typedef Key key_type; ///< key type
typedef T mapped_type; ///< value type stored in the container
typedef std::pair<key_type const, mapped_type> value_type; ///< Key-value pair type stored in the map
typedef Traits traits; ///< Map traits
typedef typename traits::hash hash; ///< hash functor tuple wrapped for internal use
typedef typename base_class::hash_tuple_type hash_tuple_type; ///< hash tuple type
typedef typename base_class::mutex_policy mutex_policy; ///< Concurrent access policy, see \p cuckoo::traits::mutex_policy
typedef typename base_class::stat stat; ///< internal statistics type
static bool const c_isSorted = base_class::c_isSorted; ///< whether the probe set should be ordered
static size_t const c_nArity = base_class::c_nArity; ///< the arity of cuckoo hashing: the number of hash functors provided; minimum 2.
typedef typename base_class::key_equal_to key_equal_to; ///< Key equality functor; used only for unordered probe-set
typedef typename base_class::key_comparator key_comparator; ///< key comparing functor based on opt::compare and opt::less option setter. Used only for ordered probe set
typedef typename base_class::allocator allocator; ///< allocator type used for internal bucket table allocations
/// Node allocator type
typedef typename std::conditional<
std::is_same< typename traits::node_allocator, opt::none >::value,
allocator,
typename traits::node_allocator
>::type node_allocator;
/// item counter type
typedef typename traits::item_counter item_counter;
protected:
//@cond
typedef typename base_class::scoped_cell_lock scoped_cell_lock;
typedef typename base_class::scoped_full_lock scoped_full_lock;
typedef typename base_class::scoped_resize_lock scoped_resize_lock;
typedef typename maker::key_accessor key_accessor;
typedef typename base_class::value_type node_type;
typedef cds::details::Allocator< node_type, node_allocator > cxx_node_allocator;
//@endcond
public:
static unsigned int const c_nDefaultProbesetSize = base_class::c_nDefaultProbesetSize; ///< default probeset size
static size_t const c_nDefaultInitialSize = base_class::c_nDefaultInitialSize; ///< default initial size
static unsigned int const c_nRelocateLimit = base_class::c_nRelocateLimit; ///< Count of attempts to relocate before giving up
protected:
//@cond
template <typename K>
static node_type * alloc_node( K const& key )
{
return cxx_node_allocator().New( key );
}
template <typename K, typename... Args>
static node_type * alloc_node( K&& key, Args&&... args )
{
return cxx_node_allocator().MoveNew( std::forward<K>( key ), std::forward<Args>(args)... );
}
static void free_node( node_type * pNode )
{
cxx_node_allocator().Delete( pNode );
}
//@endcond
protected:
//@cond
struct node_disposer {
void operator()( node_type * pNode )
{
free_node( pNode );
}
};
typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr;
//@endcond
public:
/// Default constructor
/**
Initial size = \ref c_nDefaultInitialSize
Probe set size:
- \ref c_nDefaultProbesetSize if \p probeset_type is \p cuckoo::list
- \p Capacity if \p probeset_type is <tt> cuckoo::vector<Capacity> </tt>
Probe set threshold = probe set size - 1
*/
CuckooMap()
{}
/// Constructs an object with given probe set size and threshold
/**
If probe set type is <tt> cuckoo::vector<Capacity> </tt> vector
then \p nProbesetSize should be equal to vector's \p Capacity.
*/
CuckooMap(
size_t nInitialSize ///< Initial map size; if 0 - use default initial size \ref c_nDefaultInitialSize
, unsigned int nProbesetSize ///< probe set size
, unsigned int nProbesetThreshold = 0 ///< probe set threshold, <tt>nProbesetThreshold < nProbesetSize</tt>. If 0, nProbesetThreshold = nProbesetSize - 1
)
: base_class( nInitialSize, nProbesetSize, nProbesetThreshold )
{}
/// Constructs an object with given hash functor tuple
/**
The probe set size and threshold are set as default, see CuckooSet()
*/
CuckooMap(
hash_tuple_type const& h ///< hash functor tuple of type <tt>std::tuple<H1, H2, ... Hn></tt> where <tt> n == \ref c_nArity </tt>
)
: base_class( h )
{}
/// Constructs a map with given probe set properties and hash functor tuple
/**
If probe set type is <tt> cuckoo::vector<Capacity> </tt> vector
then \p nProbesetSize should be equal to vector's \p Capacity.
*/
CuckooMap(
size_t nInitialSize ///< Initial map size; if 0 - use default initial size \ref c_nDefaultInitialSize
, unsigned int nProbesetSize ///< probe set size
, unsigned int nProbesetThreshold ///< probe set threshold, <tt>nProbesetThreshold < nProbesetSize</tt>. If 0, nProbesetThreshold = nProbesetSize - 1
, hash_tuple_type const& h ///< hash functor tuple of type <tt>std::tuple<H1, H2, ... Hn></tt> where <tt> n == \ref c_nArity </tt>
)
: base_class( nInitialSize, nProbesetSize, nProbesetThreshold, h )
{}
/// Constructs a map with given hash functor tuple (move semantics)
/**
The probe set size and threshold are set as default, see CuckooSet()
*/
CuckooMap(
hash_tuple_type&& h ///< hash functor tuple of type <tt>std::tuple<H1, H2, ... Hn></tt> where <tt> n == \ref c_nArity </tt>
)
: base_class( std::forward<hash_tuple_type>(h))
{}
/// Constructs a map with given probe set properties and hash functor tuple (move semantics)
/**
If probe set type is <tt> cuckoo::vector<Capacity> </tt> vector
then \p nProbesetSize should be equal to vector's \p Capacity.
*/
CuckooMap(
size_t nInitialSize ///< Initial map size; if 0 - use default initial size \ref c_nDefaultInitialSize
, unsigned int nProbesetSize ///< probe set size
, unsigned int nProbesetThreshold ///< probe set threshold, <tt>nProbesetThreshold < nProbesetSize</tt>. If 0, nProbesetThreshold = nProbesetSize - 1
, hash_tuple_type&& h ///< hash functor tuple of type <tt>std::tuple<H1, H2, ... Hn></tt> where <tt> n == \ref c_nArity </tt>
)
: base_class( nInitialSize, nProbesetSize, nProbesetThreshold, std::forward<hash_tuple_type>(h))
{}
/// Destructor clears the map
~CuckooMap()
{
clear();
}
public:
/// Inserts new node with key and default value
/**
The function creates a node with \p key and default value, and then inserts the node created into the map.
Preconditions:
- The \ref key_type should be constructible from a value of type \p K.
In trivial case, \p K is equal to \ref key_type.
- The \ref mapped_type should be default-constructible.
Returns \p true if inserting successful, \p false otherwise.
*/
template <typename K>
bool insert( K const& key )
{
return insert_with( key, [](value_type&){} );
}
/// Inserts new node
/**
The function creates a node with copy of \p val value
and then inserts the node created into the map.
Preconditions:
- The \ref key_type should be constructible from \p key of type \p K.
- The \ref value_type should be constructible from \p val of type \p V.
Returns \p true if \p val is inserted into the set, \p false otherwise.
*/
template <typename K, typename V>
bool insert( K const& key, V const& val )
{
return insert_with( key, [&val](value_type& item) { item.second = val ; } );
}
/// Inserts new node and initialize it by a functor
/**
This function inserts new node with key \p key and if inserting is successful then it calls
\p func functor with signature
\code
struct functor {
void operator()( value_type& item );
};
\endcode
The argument \p item of user-defined functor \p func is the reference
to the map's item inserted:
- <tt>item.first</tt> is a const reference to item's key that cannot be changed.
- <tt>item.second</tt> is a reference to item's value that may be changed.
The key_type should be constructible from value of type \p K.
The function allows to split creating of new item into two part:
- create item from \p key;
- insert new item into the map;
- if inserting is successful, initialize the value of item by calling \p func functor
This can be useful if complete initialization of object of \p value_type is heavyweight and
it is preferable that the initialization should be completed only if inserting is successful.
*/
template <typename K, typename Func>
bool insert_with( const K& key, Func func )
{
scoped_node_ptr pNode( alloc_node( key ));
if ( base_class::insert( *pNode, [&func]( node_type& item ) { func( item.m_val ); } )) {
pNode.release();
return true;
}
return false;
}
/// For key \p key inserts data of type \ref value_type constructed with <tt>std::forward<Args>(args)...</tt>
/**
Returns \p true if inserting successful, \p false otherwise.
*/
template <typename K, typename... Args>
bool emplace( K&& key, Args&&... args )
{
scoped_node_ptr pNode( alloc_node( std::forward<K>(key), std::forward<Args>(args)... ));
if ( base_class::insert( *pNode )) {
pNode.release();
return true;
}
return false;
}
/// Updates the node
/**
The operation performs inserting or changing data with lock-free manner.
If \p key is not found in the map, then \p key is inserted iff \p bAllowInsert is \p true.
Otherwise, the functor \p func is called with item found.
The functor \p func signature is:
\code
struct my_functor {
void operator()( bool bNew, value_type& item );
};
\endcode
with arguments:
- \p bNew - \p true if the item has been inserted, \p false otherwise
- \p item - an item of the map for \p key
Returns std::pair<bool, bool> where \p first is \p true if operation is successful,
i.e. the node has been inserted or updated,
\p second is \p true if new item has been added or \p false if the item with \p key
already exists.
*/
template <typename K, typename Func>
std::pair<bool, bool> update( K const& key, Func func, bool bAllowInsert = true )
{
scoped_node_ptr pNode( alloc_node( key ));
std::pair<bool, bool> res = base_class::update( *pNode,
[&func](bool bNew, node_type& item, node_type const& ){ func( bNew, item.m_val ); },
bAllowInsert
);
if ( res.first && res.second )
pNode.release();
return res;
}
//@cond
template <typename K, typename Func>
CDS_DEPRECATED("ensure() is deprecated, use update()")
std::pair<bool, bool> ensure( K const& key, Func func )
{
return update( key, func, true );
}
//@endcond
/// Delete \p key from the map
/** \anchor cds_nonintrusive_CuckooMap_erase_val
Return \p true if \p key is found and deleted, \p false otherwise
*/
template <typename K>
bool erase( K const& key )
{
node_type * pNode = base_class::erase(key);
if ( pNode ) {
free_node( pNode );
return true;
}
return false;
}
/// Deletes the item from the list using \p pred predicate for searching
/**
The function is an analog of \ref cds_nonintrusive_CuckooMap_erase_val "erase(Q const&)"
but \p pred is used for key comparing.
If cuckoo map is ordered, then \p Predicate should have the interface and semantics like \p std::less.
If cuckoo map is unordered, then \p Predicate should have the interface and semantics like \p std::equal_to.
\p Predicate must imply the same element order as the comparator used for building the map.
*/
template <typename K, typename Predicate>
bool erase_with( K const& key, Predicate pred )
{
CDS_UNUSED( pred );
node_type * pNode = base_class::erase_with(key, cds::details::predicate_wrapper<node_type, Predicate, key_accessor>());
if ( pNode ) {
free_node( pNode );
return true;
}
return false;
}
/// Delete \p key from the map
/** \anchor cds_nonintrusive_CuckooMap_erase_func
The function searches an item with key \p key, calls \p f functor
and deletes the item. If \p key is not found, the functor is not called.
The functor \p Func interface:
\code
struct extractor {
void operator()(value_type& item) { ... }
};
\endcode
Return \p true if key is found and deleted, \p false otherwise
See also: \ref erase
*/
template <typename K, typename Func>
bool erase( K const& key, Func f )
{
node_type * pNode = base_class::erase( key );
if ( pNode ) {
f( pNode->m_val );
free_node( pNode );
return true;
}
return false;
}
/// Deletes the item from the list using \p pred predicate for searching
/**
The function is an analog of \ref cds_nonintrusive_CuckooMap_erase_func "erase(Q const&, Func)"
but \p pred is used for key comparing.
If cuckoo map is ordered, then \p Predicate should have the interface and semantics like \p std::less.
If cuckoo map is unordered, then \p Predicate should have the interface and semantics like \p std::equal_to.
\p Predicate must imply the same element order as the comparator used for building the map.
*/
template <typename K, typename Predicate, typename Func>
bool erase_with( K const& key, Predicate pred, Func f )
{
CDS_UNUSED( pred );
node_type * pNode = base_class::erase_with( key, cds::details::predicate_wrapper<node_type, Predicate, key_accessor>());
if ( pNode ) {
f( pNode->m_val );
free_node( pNode );
return true;
}
return false;
}
/// Find the key \p key
/** \anchor cds_nonintrusive_CuckooMap_find_func
The function searches the item with key equal to \p key and calls the functor \p f for item found.
The interface of \p Func functor is:
\code
struct functor {
void operator()( value_type& item );
};
\endcode
where \p item is the item found.
The functor may change \p item.second.
The function returns \p true if \p key is found, \p false otherwise.
*/
template <typename K, typename Func>
bool find( K const& key, Func f )
{
return base_class::find( key, [&f](node_type& item, K const& ) { f( item.m_val );});
}
/// Find the key \p val using \p pred predicate for comparing
/**
The function is an analog of \ref cds_nonintrusive_CuckooMap_find_func "find(K const&, Func)"
but \p pred is used for key comparison.
If you use ordered cuckoo map, then \p Predicate should have the interface and semantics like \p std::less.
If you use unordered cuckoo map, then \p Predicate should have the interface and semantics like \p std::equal_to.
\p pred must imply the same element order as the comparator used for building the map.
*/
template <typename K, typename Predicate, typename Func>
bool find_with( K const& key, Predicate pred, Func f )
{
CDS_UNUSED( pred );
return base_class::find_with( key, cds::details::predicate_wrapper<node_type, Predicate, key_accessor>(),
[&f](node_type& item, K const& ) { f( item.m_val );});
}
/// Checks whether the map contains \p key
/**
The function searches the item with key equal to \p key
and returns \p true if it is found, and \p false otherwise.
*/
template <typename K>
bool contains( K const& key )
{
return base_class::contains( key );
}
//@cond
template <typename K>
CDS_DEPRECATED("the function is deprecated, use contains()")
bool find( K const& key )
{
return contains( key );
}
//@endcond
/// Checks whether the map contains \p key using \p pred predicate for searching
/**
The function is similar to <tt>contains( key )</tt> but \p pred is used for key comparing.
\p Less functor has the interface like \p std::less.
\p Less must imply the same element order as the comparator used for building the map.
*/
template <typename K, typename Predicate>
bool contains( K const& key, Predicate pred )
{
CDS_UNUSED( pred );
return base_class::contains( key, cds::details::predicate_wrapper<node_type, Predicate, key_accessor>());
}
//@cond
template <typename K, typename Predicate>
CDS_DEPRECATED("the function is deprecated, use contains()")
bool find_with( K const& key, Predicate pred )
{
return contains( key, pred );
}
//@endcond
/// Clears the map
void clear()
{
base_class::clear_and_dispose( node_disposer());
}
/// Checks if the map is empty
/**
Emptiness is checked by item counting: if item count is zero then the map is empty.
*/
bool empty() const
{
return base_class::empty();
}
/// Returns item count in the map
size_t size() const
{
return base_class::size();
}
/// Returns the size of hash table
/**
The hash table size is non-constant and can be increased via resizing.
*/
size_t bucket_count() const
{
return base_class::bucket_count();
}
/// Returns lock array size
/**
The lock array size is constant.
*/
size_t lock_count() const
{
return base_class::lock_count();
}
/// Returns const reference to internal statistics
stat const& statistics() const
{
return base_class::statistics();
}
/// Returns const reference to mutex policy internal statistics
typename mutex_policy::statistics_type const& mutex_policy_statistics() const
{
return base_class::mutex_policy_statistics();
}
};
}} // namespace cds::container
#endif //#ifndef CDSLIB_CONTAINER_CUCKOO_MAP_H

825
extern/libcds/cds/container/cuckoo_set.h vendored Normal file
View File

@ -0,0 +1,825 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_CONTAINER_CUCKOO_SET_H
#define CDSLIB_CONTAINER_CUCKOO_SET_H
#include <cds/container/details/cuckoo_base.h>
#include <cds/details/binary_functor_wrapper.h>
namespace cds { namespace container {
//@cond
namespace details {
template <typename T, typename Traits>
struct make_cuckoo_set
{
typedef T value_type;
typedef Traits original_traits;
typedef typename original_traits::probeset_type probeset_type;
static bool const store_hash = original_traits::store_hash;
static unsigned int const store_hash_count = store_hash ? ((unsigned int) std::tuple_size< typename original_traits::hash::hash_tuple_type >::value) : 0;
struct node_type: public intrusive::cuckoo::node<probeset_type, store_hash_count>
{
value_type m_val;
template <typename Q>
node_type( Q const& v )
: m_val(v)
{}
template <typename... Args>
node_type( Args&&... args )
: m_val( std::forward<Args>(args)...)
{}
};
struct value_accessor {
value_type const& operator()( node_type const& node ) const
{
return node.m_val;
}
};
template <typename Pred, typename ReturnValue>
using predicate_wrapper = cds::details::binary_functor_wrapper< ReturnValue, Pred, node_type, value_accessor >;
struct intrusive_traits: public original_traits
{
typedef intrusive::cuckoo::base_hook<
cds::intrusive::cuckoo::probeset_type< probeset_type >
,cds::intrusive::cuckoo::store_hash< store_hash_count >
> hook;
typedef cds::intrusive::cuckoo::traits::disposer disposer;
typedef typename std::conditional<
std::is_same< typename original_traits::equal_to, opt::none >::value
, opt::none
, predicate_wrapper< typename original_traits::equal_to, bool >
>::type equal_to;
typedef typename std::conditional<
std::is_same< typename original_traits::compare, opt::none >::value
, opt::none
, predicate_wrapper< typename original_traits::compare, int >
>::type compare;
typedef typename std::conditional<
std::is_same< typename original_traits::less, opt::none >::value
,opt::none
,predicate_wrapper< typename original_traits::less, bool >
>::type less;
typedef opt::details::hash_list_wrapper< typename original_traits::hash, node_type, value_accessor > hash;
};
typedef intrusive::CuckooSet< node_type, intrusive_traits > type;
};
} // namespace details
//@endcond
/// Cuckoo hash set
/** @ingroup cds_nonintrusive_set
Source
- [2007] M.Herlihy, N.Shavit, M.Tzafrir "Concurrent Cuckoo Hashing. Technical report"
- [2008] Maurice Herlihy, Nir Shavit "The Art of Multiprocessor Programming"
<b>About Cuckoo hashing</b>
[From "The Art of Multiprocessor Programming"]
<a href="https://en.wikipedia.org/wiki/Cuckoo_hashing">Cuckoo hashing</a> is a hashing algorithm in which a newly added item displaces any earlier item
occupying the same slot. For brevity, a table is a k-entry array of items. For a hash set f size
N = 2k we use a two-entry array of tables, and two independent hash functions,
<tt> h0, h1: KeyRange -> 0,...,k-1</tt>
mapping the set of possible keys to entries in he array. To test whether a value \p x is in the set,
<tt>find(x)</tt> tests whether either <tt>table[0][h0(x)]</tt> or <tt>table[1][h1(x)]</tt> is
equal to \p x. Similarly, <tt>erase(x)</tt>checks whether \p x is in either <tt>table[0][h0(x)]</tt>
or <tt>table[1][h1(x)]</tt>, ad removes it if found.
The <tt>insert(x)</tt> successively "kicks out" conflicting items until every key has a slot.
To add \p x, the method swaps \p x with \p y, the current occupant of <tt>table[0][h0(x)]</tt>.
If the prior value was \p nullptr, it is done. Otherwise, it swaps the newly nest-less value \p y
for the current occupant of <tt>table[1][h1(y)]</tt> in the same way. As before, if the prior value
was \p nullptr, it is done. Otherwise, the method continues swapping entries (alternating tables)
until it finds an empty slot. We might not find an empty slot, either because the table is full,
or because the sequence of displacement forms a cycle. We therefore need an upper limit on the
number of successive displacements we are willing to undertake. When this limit is exceeded,
we resize the hash table, choose new hash functions and start over.
For concurrent cuckoo hashing, rather than organizing the set as a two-dimensional table of
items, we use two-dimensional table of probe sets, where a probe set is a constant-sized set
of items with the same hash code. Each probe set holds at most \p PROBE_SIZE items, but the algorithm
tries to ensure that when the set is quiescent (i.e no method call in progress) each probe set
holds no more than <tt>THRESHOLD < PROBE_SET</tt> items. While method calls are in-flight, a probe
set may temporarily hold more than \p THRESHOLD but never more than \p PROBE_SET items.
In current implementation, a probe set can be defined either as a (single-linked) list
or as a fixed-sized vector, optionally ordered.
In description above two-table cuckoo hashing (<tt>k = 2</tt>) has been considered.
We can generalize this approach for <tt>k >= 2</tt> when we have \p k hash functions
<tt>h[0], ... h[k-1]</tt> and \p k tables <tt>table[0], ... table[k-1]</tt>.
The search in probe set is linear, the complexity is <tt> O(PROBE_SET) </tt>.
The probe set may be ordered or not. Ordered probe set can be a little better since
the average search complexity is <tt>O(PROBE_SET/2)</tt>.
However, the overhead of sorting can eliminate a gain of ordered search.
The probe set is ordered if \p compare or \p less is specified in \p Traits
template parameter. Otherwise, the probe set is unordered and \p Traits must contain
\p equal_to predicate.
Template arguments:
- \p T - the type stored in the set.
- \p Traits - type traits. See cuckoo::traits for explanation.
It is possible to declare option-based set with cuckoo::make_traits metafunction result as \p Traits template argument.
<b>Examples</b>
Cuckoo-set with list-based unordered probe set and storing hash values
\code
#include <cds/container/cuckoo_set.h>
// Data stored in cuckoo set
struct my_data
{
// key field
std::string strKey;
// other data
// ...
};
// Provide equal_to functor for my_data since we will use unordered probe-set
struct my_data_equal_to {
bool operator()( const my_data& d1, const my_data& d2 ) const
{
return d1.strKey.compare( d2.strKey ) == 0;
}
bool operator()( const my_data& d, const std::string& s ) const
{
return d.strKey.compare(s) == 0;
}
bool operator()( const std::string& s, const my_data& d ) const
{
return s.compare( d.strKey ) == 0;
}
};
// Provide two hash functor for my_data
struct hash1 {
size_t operator()(std::string const& s) const
{
return cds::opt::v::hash<std::string>( s );
}
size_t operator()( my_data const& d ) const
{
return (*this)( d.strKey );
}
};
struct hash2: private hash1 {
size_t operator()(std::string const& s) const
{
size_t h = ~( hash1::operator()(s));
return ~h + 0x9e3779b9 + (h << 6) + (h >> 2);
}
size_t operator()( my_data const& d ) const
{
return (*this)( d.strKey );
}
};
// Declare type traits
struct my_traits: public cds::container::cuckoo::traits
{
typedef my_data_equa_to equal_to;
typedef std::tuple< hash1, hash2 > hash;
static bool const store_hash = true;
};
// Declare CuckooSet type
typedef cds::container::CuckooSet< my_data, my_traits > my_cuckoo_set;
// Equal option-based declaration
typedef cds::container::CuckooSet< my_data,
cds::container::cuckoo::make_traits<
cds::opt::hash< std::tuple< hash1, hash2 > >
,cds::opt::equal_to< my_data_equal_to >
,cds::container::cuckoo::store_hash< true >
>::type
> opt_cuckoo_set;
\endcode
If we provide \p compare function instead of \p equal_to for \p my_data
we get as a result a cuckoo set with ordered probe set that may improve
performance.
Example for ordered vector-based probe-set:
\code
#include <cds/container/cuckoo_set.h>
// Data stored in cuckoo set
struct my_data
{
// key field
std::string strKey;
// other data
// ...
};
// Provide compare functor for my_data since we want to use ordered probe-set
struct my_data_compare {
int operator()( const my_data& d1, const my_data& d2 ) const
{
return d1.strKey.compare( d2.strKey );
}
int operator()( const my_data& d, const std::string& s ) const
{
return d.strKey.compare(s);
}
int operator()( const std::string& s, const my_data& d ) const
{
return s.compare( d.strKey );
}
};
// Provide two hash functor for my_data
struct hash1 {
size_t operator()(std::string const& s) const
{
return cds::opt::v::hash<std::string>( s );
}
size_t operator()( my_data const& d ) const
{
return (*this)( d.strKey );
}
};
struct hash2: private hash1 {
size_t operator()(std::string const& s) const
{
size_t h = ~( hash1::operator()(s));
return ~h + 0x9e3779b9 + (h << 6) + (h >> 2);
}
size_t operator()( my_data const& d ) const
{
return (*this)( d.strKey );
}
};
// Declare type traits
// We use a vector of capacity 4 as probe-set container and store hash values in the node
struct my_traits: public cds::container::cuckoo::traits
{
typedef my_data_compare compare;
typedef std::tuple< hash1, hash2 > hash;
typedef cds::container::cuckoo::vector<4> probeset_type;
static bool const store_hash = true;
};
// Declare CuckooSet type
typedef cds::container::CuckooSet< my_data, my_traits > my_cuckoo_set;
// Equal option-based declaration
typedef cds::container::CuckooSet< my_data,
cds::container::cuckoo::make_traits<
cds::opt::hash< std::tuple< hash1, hash2 > >
,cds::opt::compare< my_data_compare >
,cds::container::cuckoo::probeset_type< cds::container::cuckoo::vector<4> >
,cds::container::cuckoo::store_hash< true >
>::type
> opt_cuckoo_set;
\endcode
*/
template <typename T, typename Traits = cuckoo::traits>
class CuckooSet:
#ifdef CDS_DOXYGEN_INVOKED
protected intrusive::CuckooSet<T, Traits>
#else
protected details::make_cuckoo_set<T, Traits>::type
#endif
{
//@cond
typedef details::make_cuckoo_set<T, Traits> maker;
typedef typename maker::type base_class;
//@endcond
public:
typedef T value_type ; ///< value type stored in the container
typedef Traits traits ; ///< traits
typedef typename traits::hash hash; ///< hash functor tuple wrapped for internal use
typedef typename base_class::hash_tuple_type hash_tuple_type; ///< Type of hash tuple
typedef typename base_class::mutex_policy mutex_policy; ///< Concurrent access policy, see cuckoo::traits::mutex_policy
typedef typename base_class::stat stat; ///< internal statistics type
static bool const c_isSorted = base_class::c_isSorted; ///< whether the probe set should be ordered
static size_t const c_nArity = base_class::c_nArity; ///< the arity of cuckoo hashing: the number of hash functors provided; minimum 2.
typedef typename base_class::key_equal_to key_equal_to; ///< Key equality functor; used only for unordered probe-set
typedef typename base_class::key_comparator key_comparator; ///< key comparing functor based on \p Traits::compare and \p Traits::less option setter. Used only for ordered probe set
typedef typename base_class::allocator allocator; ///< allocator type used for internal bucket table allocations
/// Node allocator type
typedef typename std::conditional<
std::is_same< typename traits::node_allocator, opt::none >::value,
allocator,
typename traits::node_allocator
>::type node_allocator;
/// item counter type
typedef typename traits::item_counter item_counter;
protected:
//@cond
typedef typename base_class::value_type node_type;
typedef cds::details::Allocator< node_type, node_allocator > cxx_node_allocator;
//@endcond
public:
static unsigned int const c_nDefaultProbesetSize = base_class::c_nDefaultProbesetSize; ///< default probeset size
static size_t const c_nDefaultInitialSize = base_class::c_nDefaultInitialSize; ///< default initial size
static unsigned int const c_nRelocateLimit = base_class::c_nRelocateLimit; ///< Count of attempts to relocate before giving up
protected:
//@cond
template <typename Q>
static node_type * alloc_node( Q const& v )
{
return cxx_node_allocator().New( v );
}
template <typename... Args>
static node_type * alloc_node( Args&&... args )
{
return cxx_node_allocator().MoveNew( std::forward<Args>(args)... );
}
static void free_node( node_type * pNode )
{
cxx_node_allocator().Delete( pNode );
}
//@endcond
protected:
//@cond
struct node_disposer {
void operator()( node_type * pNode )
{
free_node( pNode );
}
};
typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr;
//@endcond
public:
/// Default constructor
/**
Initial size = \ref c_nDefaultInitialSize
Probe set size:
- \ref c_nDefaultProbesetSize if \p probeset_type is \p cuckoo::list
- \p Capacity if \p probeset_type is <tt> cuckoo::vector<Capacity> </tt>
Probe set threshold = probe set size - 1
*/
CuckooSet()
{}
/// Constructs the set object with given probe set size and threshold
/**
If probe set type is <tt> cuckoo::vector<Capacity> </tt> vector
then \p nProbesetSize should be equal to vector's \p Capacity.
*/
CuckooSet(
size_t nInitialSize ///< Initial set size; if 0 - use default initial size \ref c_nDefaultInitialSize
, unsigned int nProbesetSize ///< probe set size
, unsigned int nProbesetThreshold = 0 ///< probe set threshold, <tt>nProbesetThreshold < nProbesetSize</tt>. If 0, nProbesetThreshold = nProbesetSize - 1
)
: base_class( nInitialSize, nProbesetSize, nProbesetThreshold )
{}
/// Constructs the set object with given hash functor tuple
/**
The probe set size and threshold are set as default, see CuckooSet()
*/
CuckooSet(
hash_tuple_type const& h ///< hash functor tuple of type <tt>std::tuple<H1, H2, ... Hn></tt> where <tt> n == \ref c_nArity </tt>
)
: base_class( h )
{}
/// Constructs the set object with given probe set properties and hash functor tuple
/**
If probe set type is <tt> cuckoo::vector<Capacity> </tt> vector
then \p nProbesetSize should be equal to vector's \p Capacity.
*/
CuckooSet(
size_t nInitialSize ///< Initial set size; if 0 - use default initial size \ref c_nDefaultInitialSize
, unsigned int nProbesetSize ///< probe set size
, unsigned int nProbesetThreshold ///< probe set threshold, <tt>nProbesetThreshold < nProbesetSize</tt>. If 0, nProbesetThreshold = nProbesetSize - 1
, hash_tuple_type const& h ///< hash functor tuple of type <tt>std::tuple<H1, H2, ... Hn></tt> where <tt> n == \ref c_nArity </tt>
)
: base_class( nInitialSize, nProbesetSize, nProbesetThreshold, h )
{}
/// Constructs the set object with given hash functor tuple (move semantics)
/**
The probe set size and threshold are set as default, see CuckooSet()
*/
CuckooSet(
hash_tuple_type&& h ///< hash functor tuple of type <tt>std::tuple<H1, H2, ... Hn></tt> where <tt> n == \ref c_nArity </tt>
)
: base_class( std::forward<hash_tuple_type>(h))
{}
/// Constructs the set object with given probe set properties and hash functor tuple (move semantics)
/**
If probe set type is <tt> cuckoo::vector<Capacity> </tt> vector
then \p nProbesetSize should be equal to vector's \p Capacity.
*/
CuckooSet(
size_t nInitialSize ///< Initial set size; if 0 - use default initial size \ref c_nDefaultInitialSize
, unsigned int nProbesetSize ///< probe set size
, unsigned int nProbesetThreshold ///< probe set threshold, <tt>nProbesetThreshold < nProbesetSize</tt>. If 0, nProbesetThreshold = nProbesetSize - 1
, hash_tuple_type&& h ///< hash functor tuple of type <tt>std::tuple<H1, H2, ... Hn></tt> where <tt> n == \ref c_nArity </tt>
)
: base_class( nInitialSize, nProbesetSize, nProbesetThreshold, std::forward<hash_tuple_type>(h))
{}
/// Destructor clears the set
~CuckooSet()
{
clear();
}
public:
/// Inserts new node
/**
The function creates a node with copy of \p val value
and then inserts the node created into the set.
The type \p Q should contain as minimum the complete key for the node.
The object of \ref value_type should be constructible from a value of type \p Q.
In trivial case, \p Q is equal to \ref value_type.
Returns \p true if \p val is inserted into the set, \p false otherwise.
*/
template <typename Q>
bool insert( Q const& val )
{
return insert( val, []( value_type& ) {} );
}
/// Inserts new node
/**
The function allows to split creating of new item into two part:
- create item with key only
- insert new item into the set
- if inserting is success, calls \p f functor to initialize value-field of new item .
The functor signature is:
\code
void func( value_type& item );
\endcode
where \p item is the item inserted.
The type \p Q can differ from \ref value_type of items storing in the set.
Therefore, the \p value_type should be constructible from type \p Q.
The user-defined functor is called only if the inserting is success.
*/
template <typename Q, typename Func>
bool insert( Q const& val, Func f )
{
scoped_node_ptr pNode( alloc_node( val ));
if ( base_class::insert( *pNode, [&f]( node_type& node ) { f( node.m_val ); } )) {
pNode.release();
return true;
}
return false;
}
/// Inserts data of type \ref value_type constructed with <tt>std::forward<Args>(args)...</tt>
/**
Returns \p true if inserting successful, \p false otherwise.
*/
template <typename... Args>
bool emplace( Args&&... args )
{
scoped_node_ptr pNode( alloc_node( std::forward<Args>(args)... ));
if ( base_class::insert( *pNode )) {
pNode.release();
return true;
}
return false;
}
/// Updates the node
/**
The operation performs inserting or changing data with lock-free manner.
If the item \p val is not found in the set, then \p val is inserted into the set
iff \p bAllowInsert is \p true.
Otherwise, the functor \p func is called with item found.
The functor \p func signature is:
\code
struct my_functor {
void operator()( bool bNew, value_type& item, const Q& val );
};
\endcode
with arguments:
- \p bNew - \p true if the item has been inserted, \p false otherwise
- \p item - item of the set
- \p val - argument \p val passed into the \p %update() function
If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments
refer to the same thing.
Returns std::pair<bool, bool> where \p first is \p true if operation is successful,
i.e. the node has been inserted or updated,
\p second is \p true if new item has been added or \p false if the item with \p key
already exists.
*/
template <typename Q, typename Func>
std::pair<bool, bool> update( Q const& val, Func func, bool bAllowInsert = true )
{
scoped_node_ptr pNode( alloc_node( val ));
std::pair<bool, bool> res = base_class::update( *pNode,
[&val,&func](bool bNew, node_type& item, node_type const& ){ func( bNew, item.m_val, val ); },
bAllowInsert
);
if ( res.first && res.second )
pNode.release();
return res;
}
//@cond
template <typename Q, typename Func>
CDS_DEPRECATED("ensure() is deprecated, use update()")
std::pair<bool, bool> ensure( Q const& val, Func func )
{
return update( val, func, true );
}
//@endcond
/// Delete \p key from the set
/** \anchor cds_nonintrusive_CuckooSet_erase
Since the key of set's item type \ref value_type is not explicitly specified,
template parameter \p Q defines the key type searching in the list.
The set item comparator should be able to compare the type \p value_type
and the type \p Q.
Return \p true if key is found and deleted, \p false otherwise
*/
template <typename Q>
bool erase( Q const& key )
{
node_type * pNode = base_class::erase( key );
if ( pNode ) {
free_node( pNode );
return true;
}
return false;
}
/// Deletes the item from the list using \p pred predicate for searching
/**
The function is an analog of \ref cds_nonintrusive_CuckooSet_erase "erase(Q const&)"
but \p pred is used for key comparing.
If cuckoo set is ordered, then \p Predicate should have the interface and semantics like \p std::less.
If cuckoo set is unordered, then \p Predicate should have the interface and semantics like \p std::equal_to.
\p Predicate must imply the same element order as the comparator used for building the set.
*/
template <typename Q, typename Predicate>
bool erase_with( Q const& key, Predicate pred )
{
CDS_UNUSED( pred );
node_type * pNode = base_class::erase_with( key, typename maker::template predicate_wrapper<Predicate, bool>());
if ( pNode ) {
free_node( pNode );
return true;
}
return false;
}
/// Delete \p key from the set
/** \anchor cds_nonintrusive_CuckooSet_erase_func
The function searches an item with key \p key, calls \p f functor
and deletes the item. If \p key is not found, the functor is not called.
The functor \p Func interface is:
\code
struct functor {
void operator()(value_type const& val);
};
\endcode
Return \p true if key is found and deleted, \p false otherwise
*/
template <typename Q, typename Func>
bool erase( Q const& key, Func f )
{
node_type * pNode = base_class::erase( key );
if ( pNode ) {
f( pNode->m_val );
free_node( pNode );
return true;
}
return false;
}
/// Deletes the item from the list using \p pred predicate for searching
/**
The function is an analog of \ref cds_nonintrusive_CuckooSet_erase_func "erase(Q const&, Func)"
but \p pred is used for key comparing.
If you use ordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::less.
If you use unordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::equal_to.
\p Predicate must imply the same element order as the comparator used for building the set.
*/
template <typename Q, typename Predicate, typename Func>
bool erase_with( Q const& key, Predicate pred, Func f )
{
CDS_UNUSED( pred );
node_type * pNode = base_class::erase_with( key, typename maker::template predicate_wrapper<Predicate, bool>());
if ( pNode ) {
f( pNode->m_val );
free_node( pNode );
return true;
}
return false;
}
/// Find the key \p val
/** \anchor cds_nonintrusive_CuckooSet_find_func
The function searches the item with key equal to \p val and calls the functor \p f for item found.
The interface of \p Func functor is:
\code
struct functor {
void operator()( value_type& item, Q& val );
};
\endcode
where \p item is the item found, \p val is the <tt>find</tt> function argument.
The functor can change non-key fields of \p item.
The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor
can modify both arguments.
The type \p Q can differ from \ref value_type of items storing in the container.
Therefore, the \p value_type should be comparable with type \p Q.
The function returns \p true if \p val is found, \p false otherwise.
*/
template <typename Q, typename Func>
bool find( Q& val, Func f )
{
return base_class::find( val, [&f](node_type& item, Q& v) { f( item.m_val, v );});
}
//@cond
template <typename Q, typename Func>
bool find( Q const& val, Func f )
{
return base_class::find( val, [&f](node_type& item, Q const& v) { f( item.m_val, v );});
}
//@endcond
/// Find the key \p val using \p pred predicate for comparing
/**
The function is an analog of \ref cds_nonintrusive_CuckooSet_find_func "find(Q&, Func)"
but \p pred is used for key comparison.
If you use ordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::less.
If you use unordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::equal_to.
\p pred must imply the same element order as the comparator used for building the set.
*/
template <typename Q, typename Predicate, typename Func>
bool find_with( Q& val, Predicate pred, Func f )
{
CDS_UNUSED( pred );
return base_class::find_with( val, typename maker::template predicate_wrapper<Predicate, bool>(),
[&f](node_type& item, Q& v) { f( item.m_val, v );});
}
//@cond
template <typename Q, typename Predicate, typename Func>
bool find_with( Q const& val, Predicate pred, Func f )
{
CDS_UNUSED( pred );
return base_class::find_with( val, typename maker::template predicate_wrapper<Predicate, bool>(),
[&f](node_type& item, Q const& v) { f( item.m_val, v );});
}
//@endcond
/// Checks whether the set contains \p key
/**
The function searches the item with key equal to \p key
and returns \p true if it is found, and \p false otherwise.
*/
template <typename Q>
bool contains( Q const& key )
{
return base_class::find( key, [](node_type&, Q const&) {});
}
//@cond
template <typename Q>
CDS_DEPRECATED("the function is deprecated, use contains()")
bool find( Q const& key )
{
return contains( key );
}
//@endcond
/// Checks whether the set contains \p key using \p pred predicate for searching
/**
The function is similar to <tt>contains( key )</tt> but \p pred is used for key comparing.
\p Less functor has the interface like \p std::less.
\p Less must imply the same element order as the comparator used for building the set.
*/
template <typename Q, typename Predicate>
bool contains( Q const& key, Predicate pred )
{
CDS_UNUSED( pred );
return base_class::find_with( key, typename maker::template predicate_wrapper<Predicate, bool>(), [](node_type&, Q const&) {});
}
//@cond
template <typename Q, typename Predicate>
CDS_DEPRECATED("the function is deprecated, use contains()")
bool find_with( Q const& key, Predicate pred )
{
return contains( key, pred );
}
//@endcond
/// Clears the set
/**
The function erases all items from the set.
*/
void clear()
{
return base_class::clear_and_dispose( node_disposer());
}
/// Checks if the set is empty
/**
Emptiness is checked by item counting: if item count is zero then the set is empty.
*/
bool empty() const
{
return base_class::empty();
}
/// Returns item count in the set
size_t size() const
{
return base_class::size();
}
/// Returns the size of hash table
/**
The hash table size is non-constant and can be increased via resizing.
*/
size_t bucket_count() const
{
return base_class::bucket_count();
}
/// Returns lock array size
size_t lock_count() const
{
return base_class::lock_count();
}
/// Returns const reference to internal statistics
stat const& statistics() const
{
return base_class::statistics();
}
/// Returns const reference to mutex policy internal statistics
typename mutex_policy::statistics_type const& mutex_policy_statistics() const
{
return base_class::mutex_policy_statistics();
}
};
}} // namespace cds::container
#endif //#ifndef CDSLIB_CONTAINER_CUCKOO_SET_H

View File

@ -0,0 +1,78 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_CONTAINER_DETAILS_BASE_H
#define CDSLIB_CONTAINER_DETAILS_BASE_H
#include <cds/intrusive/details/base.h>
namespace cds {
/// Standard (non-intrusive) containers
/**
@ingroup cds_nonintrusive_containers
This namespace contains implementations of non-intrusive (std-like) lock-free containers.
*/
namespace container {
/// Common options for non-intrusive containers
/** @ingroup cds_nonintrusive_helper
This namespace contains options for non-intrusive containers that is, in general, the same as for the intrusive containers.
It imports all definitions from cds::opt and cds::intrusive::opt namespaces
*/
namespace opt {
using namespace cds::intrusive::opt;
} // namespace opt
/// @defgroup cds_nonintrusive_containers Non-intrusive containers
/** @defgroup cds_nonintrusive_helper Helper structs for non-intrusive containers
@ingroup cds_nonintrusive_containers
*/
/** @defgroup cds_nonintrusive_stack Stack
@ingroup cds_nonintrusive_containers
*/
/** @defgroup cds_nonintrusive_queue Queue
@ingroup cds_nonintrusive_containers
*/
/** @defgroup cds_nonintrusive_deque Deque
@ingroup cds_nonintrusive_containers
*/
/** @defgroup cds_nonintrusive_priority_queue Priority queue
@ingroup cds_nonintrusive_containers
*/
/** @defgroup cds_nonintrusive_map Map
@ingroup cds_nonintrusive_containers
*/
/** @defgroup cds_nonintrusive_set Set
@ingroup cds_nonintrusive_containers
*/
/** @defgroup cds_nonintrusive_list List
@ingroup cds_nonintrusive_containers
*/
/** @defgroup cds_nonintrusive_tree Tree
@ingroup cds_nonintrusive_containers
*/
// Tag for selecting iterable list implementation
/** @ingroup cds_nonintrusive_helper
This struct is empty and it is used only as a tag for selecting \p IterableList
as ordered list implementation in declaration of some classes.
See \p split_list::traits::ordered_list as an example.
*/
typedef intrusive::iterable_list_tag iterable_list_tag;
//@cond
template <typename List>
struct is_iterable_list: public cds::intrusive::is_iterable_list< List >
{};
//@endcond
} // namespace container
} // namespace cds
#endif // #ifndef CDSLIB_CONTAINER_DETAILS_BASE_H

View File

@ -0,0 +1,503 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_CONTAINER_DETAILS_BRONSON_AVLTREE_BASE_H
#define CDSLIB_CONTAINER_DETAILS_BRONSON_AVLTREE_BASE_H
#include <cds/container/details/base.h>
#include <cds/opt/compare.h>
#include <cds/urcu/options.h>
#include <cds/sync/spinlock.h>
#include <cds/sync/injecting_monitor.h>
namespace cds { namespace container {
/// BronsonAVLTree related declarations
namespace bronson_avltree {
template <typename Key, typename T, typename SyncMonitor >
struct node;
//@cond
template <typename Node, typename T, typename SyncMonitor>
struct link_node
{
typedef Node node_type;
typedef T mapped_type;
typedef uint32_t version_type; ///< version type (internal)
enum
{
shrinking = 1,
unlinked = 2,
version_flags = shrinking | unlinked
// the rest is version counter
};
atomics::atomic< int > m_nHeight; ///< Node height
atomics::atomic<version_type> m_nVersion; ///< Version bits
atomics::atomic<node_type *> m_pParent; ///< Parent node
atomics::atomic<node_type *> m_pLeft; ///< Left child
atomics::atomic<node_type *> m_pRight; ///< Right child
typename SyncMonitor::node_injection m_SyncMonitorInjection; ///< @ref cds_sync_monitor "synchronization monitor" injected data
atomics::atomic<mapped_type *> m_pValue; ///< Value
public:
link_node()
: m_nHeight( 0 )
, m_nVersion( 0 )
, m_pParent( nullptr )
, m_pLeft( nullptr )
, m_pRight( nullptr )
{
m_pValue.store( nullptr, atomics::memory_order_release );
}
link_node( int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
: m_nHeight( nHeight )
, m_nVersion( version )
, m_pParent( pParent )
, m_pLeft( pLeft )
, m_pRight( pRight )
{
m_pValue.store( nullptr, atomics::memory_order_release );
}
node_type * parent( atomics::memory_order order ) const
{
return m_pParent.load( order );
}
void parent( node_type * p, atomics::memory_order order )
{
m_pParent.store( p, order );
}
node_type * child( int nDirection, atomics::memory_order order ) const
{
assert( nDirection != 0 );
return nDirection < 0 ? m_pLeft.load( order ) : m_pRight.load( order );
}
void child( node_type * pChild, int nDirection, atomics::memory_order order )
{
assert( nDirection != 0 );
if ( nDirection < 0 )
m_pLeft.store( pChild, order );
else
m_pRight.store( pChild, order );
}
version_type version( atomics::memory_order order ) const
{
return m_nVersion.load( order );
}
void version( version_type ver, atomics::memory_order order )
{
m_nVersion.store( ver, order );
}
void exchange_version( version_type ver, atomics::memory_order order )
{
m_nVersion.exchange( ver, order );
}
int height( atomics::memory_order order ) const
{
return m_nHeight.load( order );
}
void height( int h, atomics::memory_order order )
{
m_nHeight.store( h, order );
}
template <typename BackOff>
void wait_until_shrink_completed( atomics::memory_order order ) const
{
BackOff bkoff;
while ( is_shrinking( order ))
bkoff();
}
bool is_unlinked( atomics::memory_order order ) const
{
return m_nVersion.load( order ) == unlinked;
}
bool is_shrinking( atomics::memory_order order ) const
{
return (m_nVersion.load( order ) & shrinking) != 0;
}
mapped_type * value( atomics::memory_order order ) const
{
return m_pValue.load( order );
}
bool is_valued( atomics::memory_order order ) const
{
return value( order ) != nullptr;
}
};
//@endcond
/// BronsonAVLTree internal node
template <typename Key, typename T, typename SyncMonitor >
struct node<Key, T*, SyncMonitor>: public link_node< node<Key, T*, SyncMonitor>, T, SyncMonitor >
{
//@cond
typedef link_node< node<Key, T*, SyncMonitor>, T, SyncMonitor > base_class;
//@endcond
typedef Key key_type; ///< key type
typedef T mapped_type; ///< value type
//@cond
typedef typename base_class::version_type version_type;
//@endcond
key_type const m_key; ///< Key
node * m_pNextRemoved; ///< thread-local list of removed node
public:
//@cond
template <typename Q>
node( Q&& key )
: base_class()
, m_key( std::forward<Q>( key ))
, m_pNextRemoved( nullptr )
{}
template <typename Q>
node( Q&& key, int nHeight, version_type version, node * pParent, node * pLeft, node * pRight )
: base_class( nHeight, version, pParent, pLeft, pRight )
, m_key( std::forward<Q>( key ))
, m_pNextRemoved( nullptr )
{}
//@endcond
};
/// BronsonAVLTreeMap internal statistics
template <typename Counter = cds::atomicity::event_counter>
struct stat {
typedef Counter event_counter; ///< Event counter type
event_counter m_nFindSuccess; ///< Count of success \p find() call
event_counter m_nFindFailed; ///< Count of failed \p find() call
event_counter m_nFindRetry; ///< Count of retries during \p find()
event_counter m_nFindWaitShrinking; ///< Count of waiting until shrinking completed duting \p find() call
event_counter m_nInsertSuccess; ///< Count of inserting data node
event_counter m_nInsertFailed; ///< Count of insert failures
event_counter m_nRelaxedInsertFailed; ///< Count of false creating of data nodes (only if @ref bronson_avltree::relaxed_insert "relaxed insertion" is enabled)
event_counter m_nInsertRetry; ///< Count of insert retries via concurrent operations
event_counter m_nUpdateWaitShrinking; ///< Count of waiting until shrinking completed during \p update() call
event_counter m_nUpdateRetry; ///< Count of update retries via concurrent operations
event_counter m_nUpdateRootWaitShrinking; ///< Count of waiting until root shrinking completed duting \p update() call
event_counter m_nUpdateSuccess; ///< Count of updating data node
event_counter m_nUpdateUnlinked; ///< Count of attempts to update unlinked node
event_counter m_nDisposedNode; ///< Count of disposed node
event_counter m_nDisposedValue; ///< Count of disposed value
event_counter m_nExtractedValue; ///< Count of extracted value
event_counter m_nRemoveSuccess; ///< Count of successfully \p erase() call
event_counter m_nRemoveFailed; ///< Count of failed \p erase() call
event_counter m_nRemoveRetry; ///< Count o erase/extract retries
event_counter m_nExtractSuccess; ///< Count of successfully \p extract() call
event_counter m_nExtractFailed; ///< Count of failed \p extract() call
event_counter m_nRemoveWaitShrinking; ///< ount of waiting until shrinking completed during \p erase() or \p extract() call
event_counter m_nRemoveRootWaitShrinking; ///< Count of waiting until root shrinking completed duting \p erase() or \p extract() call
event_counter m_nMakeRoutingNode; ///< How many nodes were converted to routing (valueless) nodes
event_counter m_nRightRotation; ///< Count of single right rotation
event_counter m_nLeftRotation; ///< Count of single left rotation
event_counter m_nLeftRightRotation; ///< Count of double left-over-right rotation
event_counter m_nRightLeftRotation; ///< Count of double right-over-left rotation
event_counter m_nRotateAfterRightRotation; ///< Count of rotation required after single right rotation
event_counter m_nRemoveAfterRightRotation; ///< Count of removal required after single right rotation
event_counter m_nDamageAfterRightRotation; ///< Count of damaged node after single right rotation
event_counter m_nRotateAfterLeftRotation; ///< Count of rotation required after signle left rotation
event_counter m_nRemoveAfterLeftRotation; ///< Count of removal required after single left rotation
event_counter m_nDamageAfterLeftRotation; ///< Count of damaged node after single left rotation
event_counter m_nRotateAfterRLRotation; ///< Count of rotation required after right-over-left rotation
event_counter m_nRemoveAfterRLRotation; ///< Count of removal required after right-over-left rotation
event_counter m_nRotateAfterLRRotation; ///< Count of rotation required after left-over-right rotation
event_counter m_nRemoveAfterLRRotation; ///< Count of removal required after left-over-right rotation
event_counter m_nInsertRebalanceReq; ///< Count of rebalance required after inserting
event_counter m_nRemoveRebalanceReq; ///< Count of rebalance required after removing
//@cond
void onFindSuccess() { ++m_nFindSuccess ; }
void onFindFailed() { ++m_nFindFailed ; }
void onFindRetry() { ++m_nFindRetry ; }
void onFindWaitShrinking() { ++m_nFindWaitShrinking; }
void onInsertSuccess() { ++m_nInsertSuccess; }
void onInsertFailed() { ++m_nInsertFailed; }
void onRelaxedInsertFailed() { ++m_nRelaxedInsertFailed; }
void onInsertRetry() { ++m_nInsertRetry ; }
void onUpdateWaitShrinking() { ++m_nUpdateWaitShrinking; }
void onUpdateRetry() { ++m_nUpdateRetry; }
void onUpdateRootWaitShrinking() { ++m_nUpdateRootWaitShrinking; }
void onUpdateSuccess() { ++m_nUpdateSuccess; }
void onUpdateUnlinked() { ++m_nUpdateUnlinked; }
void onDisposeNode() { ++m_nDisposedNode; }
void onDisposeValue() { ++m_nDisposedValue; }
void onExtractValue() { ++m_nExtractedValue; }
void onRemove(bool bSuccess)
{
if ( bSuccess )
++m_nRemoveSuccess;
else
++m_nRemoveFailed;
}
void onExtract( bool bSuccess )
{
if ( bSuccess )
++m_nExtractSuccess;
else
++m_nExtractFailed;
}
void onRemoveRetry() { ++m_nRemoveRetry; }
void onRemoveWaitShrinking() { ++m_nRemoveWaitShrinking; }
void onRemoveRootWaitShrinking() { ++m_nRemoveRootWaitShrinking; }
void onMakeRoutingNode() { ++m_nMakeRoutingNode; }
void onRotateRight() { ++m_nRightRotation; }
void onRotateLeft() { ++m_nLeftRotation; }
void onRotateRightOverLeft() { ++m_nRightLeftRotation; }
void onRotateLeftOverRight() { ++m_nLeftRightRotation; }
void onRotateAfterRightRotation() { ++m_nRotateAfterRightRotation; }
void onRemoveAfterRightRotation() { ++m_nRemoveAfterRightRotation; }
void onDamageAfterRightRotation() { ++m_nDamageAfterRightRotation; }
void onRotateAfterLeftRotation() { ++m_nRotateAfterLeftRotation; }
void onRemoveAfterLeftRotation() { ++m_nRemoveAfterLeftRotation; }
void onDamageAfterLeftRotation() { ++m_nDamageAfterLeftRotation; }
void onRotateAfterRLRotation() { ++m_nRotateAfterRLRotation; }
void onRemoveAfterRLRotation() { ++m_nRemoveAfterRLRotation; }
void onRotateAfterLRRotation() { ++m_nRotateAfterLRRotation; }
void onRemoveAfterLRRotation() { ++m_nRemoveAfterLRRotation; }
void onInsertRebalanceRequired() { ++m_nInsertRebalanceReq; }
void onRemoveRebalanceRequired() { ++m_nRemoveRebalanceReq; }
//@endcond
};
/// BronsonAVLTreeMap empty statistics
struct empty_stat {
//@cond
void onFindSuccess() const {}
void onFindFailed() const {}
void onFindRetry() const {}
void onFindWaitShrinking() const {}
void onInsertSuccess() const {}
void onInsertFailed() const {}
void onRelaxedInsertFailed() const {}
void onInsertRetry() const {}
void onUpdateWaitShrinking() const {}
void onUpdateRetry() const {}
void onUpdateRootWaitShrinking() const {}
void onUpdateSuccess() const {}
void onUpdateUnlinked() const {}
void onDisposeNode() const {}
void onDisposeValue() const {}
void onExtractValue() const {}
void onRemove(bool /*bSuccess*/) const {}
void onExtract(bool /*bSuccess*/) const {}
void onRemoveRetry() const {}
void onRemoveWaitShrinking() const {}
void onRemoveRootWaitShrinking() const {}
void onMakeRoutingNode() const {}
void onRotateRight() const {}
void onRotateLeft() const {}
void onRotateRightOverLeft() const {}
void onRotateLeftOverRight() const {}
void onRotateAfterRightRotation() const {}
void onRemoveAfterRightRotation() const {}
void onDamageAfterRightRotation() const {}
void onRotateAfterLeftRotation() const {}
void onRemoveAfterLeftRotation() const {}
void onDamageAfterLeftRotation() const {}
void onRotateAfterRLRotation() const {}
void onRemoveAfterRLRotation() const {}
void onRotateAfterLRRotation() const {}
void onRemoveAfterLRRotation() const {}
void onInsertRebalanceRequired() const {}
void onRemoveRebalanceRequired() const {}
//@endcond
};
/// Option to allow relaxed insert into \ref cds_container_BronsonAVLTreeMap_rcu "Bronson et al AVL-tree"
/**
By default, this option is disabled and the new node is created under its parent lock.
In this case, it is guaranteed the new node will be attached to its parent.
On the other hand, constructing of the new node can be too complex to make it under the lock,
that can lead to lock contention.
When this option is enabled, the new node is created before locking the parent node.
After that, the parent is locked and checked whether the new node can be attached to the parent.
In this case, false node creating can be performed, but locked section can be significantly small.
*/
template <bool Enable>
struct relaxed_insert {
//@cond
template <typename Base> struct pack : public Base
{
enum { relaxed_insert = Enable };
};
//@endcond
};
/// \p BronsonAVLTreeMap traits
/**
Note that there are two main specialization of Bronson et al AVL-tree:
- \ref cds_container_BronsonAVLTreeMap_rcu_ptr "pointer-oriented" - the tree node stores an user-provided pointer to value
- \ref cds_container_BronsonAVLTreeMap_rcu "data-oriented" - the tree node contains a copy of values
Depends on tree specialization, different traits member can be used.
*/
struct traits
{
/// Key comparison functor
/**
No default functor is provided. If the option is not specified, the \p less is used.
See \p cds::opt::compare option description for functor interface.
You should provide \p compare or \p less functor.
*/
typedef opt::none compare;
/// Specifies binary predicate used for key compare.
/**
See \p cds::opt::less option description for predicate interface.
You should provide \p compare or \p less functor.
*/
typedef opt::none less;
/// Allocator for internal node
typedef CDS_DEFAULT_ALLOCATOR node_allocator;
/// Allocator for node's value (not used in \p BronsonAVLTreeMap<RCU, Key, T*, Traits> specialisation)
typedef CDS_DEFAULT_ALLOCATOR allocator;
/// Disposer (only for pointer-oriented tree specialization)
/**
The functor used for dispose removed values.
The user-provided disposer is used only for pointer-oriented tree specialization
like \p BronsonAVLTreeMap<GC, Key, T*, Traits>. When the node becomes the routing node without value,
the disposer will be called to signal that the memory for the value can be safely freed.
Default is \ref cds::intrusive::opt::delete_disposer "cds::container::opt::v::delete_disposer<>" which calls \p delete operator.
*/
typedef opt::v::delete_disposer<> disposer;
/// @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
typedef cds::sync::injecting_monitor<cds::sync::spin> sync_monitor;
/// Enable relaxed insertion.
/**
About relaxed insertion see \p bronson_avltree::relaxed_insert option.
By default, this option is disabled.
*/
static bool const relaxed_insert = false;
/// Item counter
/**
The type for item counter, by default it is disabled (\p atomicity::empty_item_counter).
To enable it use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter.
*/
typedef atomicity::empty_item_counter item_counter;
/// C++ memory ordering model
/**
List of available memory ordering see \p opt::memory_model
*/
typedef opt::v::relaxed_ordering memory_model;
/// Internal statistics
/**
By default, internal statistics is disabled (\p bronson_avltree::empty_stat).
To enable it use \p bronson_avltree::stat.
*/
typedef empty_stat stat;
/// Back-off strategy
typedef cds::backoff::empty back_off;
/// RCU deadlock checking policy
/**
List of available options see \p opt::rcu_check_deadlock
*/
typedef cds::opt::v::rcu_throw_deadlock rcu_check_deadlock;
};
/// Metafunction converting option list to BronsonAVLTreeMap traits
/**
Note that there are two main specialization of Bronson et al AVL-tree:
- \ref cds_container_BronsonAVLTreeMap_rcu_ptr "pointer-oriented" - the tree node stores an user-provided pointer to value
- \ref cds_container_BronsonAVLTreeMap_rcu "data-oriented" - the tree node contains a copy of values
Depends on tree specialization, different options can be specified.
\p Options are:
- \p opt::compare - key compare functor. No default functor is provided.
If the option is not specified, \p %opt::less is used.
- \p opt::less - specifies binary predicate used for key compare. At least \p %opt::compare or \p %opt::less should be defined.
- \p opt::node_allocator - the allocator for internal nodes. Default is \ref CDS_DEFAULT_ALLOCATOR.
- \p opt::allocator - the allocator for node's value. Default is \ref CDS_DEFAULT_ALLOCATOR.
This option is not used in \p BronsonAVLTreeMap<RCU, Key, T*, Traits> specialisation
- \p cds::intrusive::opt::disposer - the functor used for dispose removed values.
The user-provided disposer is used only for pointer-oriented tree specialization
like \p BronsonAVLTreeMap<GC, Key, T*, Traits>. When the node becomes the rounting node without value,
the disposer will be called to signal that the memory for the value can be safely freed.
Default is \p cds::intrusive::opt::delete_disposer which calls \p delete operator.
Due the nature of GC schema the disposer may be called asynchronously.
- \p opt::sync_monitor - @ref cds_sync_monitor "synchronization monitor" type for node-level locking,
default is \p cds::sync::injecting_monitor<cds::sync::spin>
- \p bronson_avltree::relaxed_insert - enable (\p true) or disable (\p false, the default)
@ref bronson_avltree::relaxed_insert "relaxed insertion"
- \p opt::item_counter - the type of item counting feature, by default it is disabled (\p atomicity::empty_item_counter)
To enable it use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter
- \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
or \p opt::v::sequential_consistent (sequentially consisnent memory model).
- \p opt::stat - internal statistics, by default it is disabled (\p bronson_avltree::empty_stat)
To enable statistics use \p \p bronson_avltree::stat
- \p opt::backoff - back-off strategy, by default no strategy is used (\p cds::backoff::empty)
- \p opt::rcu_check_deadlock - a deadlock checking policy for RCU-based tree, default is \p opt::v::rcu_throw_deadlock
*/
template <typename... Options>
struct make_traits {
# ifdef CDS_DOXYGEN_INVOKED
typedef implementation_defined type ; ///< Metafunction result
# else
typedef typename cds::opt::make_options<
typename cds::opt::find_type_traits< traits, Options... >::type
,Options...
>::type type;
# endif
};
} // namespace bronson_avltree
// Forwards
template < class GC, typename Key, typename T, class Traits = bronson_avltree::traits >
class BronsonAVLTreeMap;
}} // namespace cds::container
#endif // #ifndef CDSLIB_CONTAINER_DETAILS_BRONSON_AVLTREE_BASE_H

View File

@ -0,0 +1,244 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_CONTAINER_DETAILS_CUCKOO_BASE_H
#define CDSLIB_CONTAINER_DETAILS_CUCKOO_BASE_H
#include <cds/intrusive/cuckoo_set.h>
namespace cds { namespace container {
/// CuckooSet and CuckooMap related definitions
/** @ingroup cds_nonintrusive_helper
*/
namespace cuckoo {
#ifdef CDS_DOXYGEN_INVOKED
/// Lock striping concurrent access policy. This is typedef for intrusive::cuckoo::striping template
class striping
{};
#else
using intrusive::cuckoo::striping;
#endif
#ifdef CDS_DOXYGEN_INVOKED
/// Refinable concurrent access policy. This is typedef for intrusive::cuckoo::refinable template
class refinable
{};
#else
using intrusive::cuckoo::refinable;
#endif
#ifdef CDS_DOXYGEN_INVOKED
/// Striping internal statistics. This is typedef for intrusive::cuckoo::striping_stat
class striping_stat
{};
#else
using intrusive::cuckoo::striping_stat;
#endif
#ifdef CDS_DOXYGEN_INVOKED
/// Empty striping internal statistics. This is typedef for intrusive::cuckoo::empty_striping_stat
class empty_striping_stat
{};
#else
using intrusive::cuckoo::empty_striping_stat;
#endif
#ifdef CDS_DOXYGEN_INVOKED
/// Refinable internal statistics. This is typedef for intrusive::cuckoo::refinable_stat
class refinable_stat
{};
#else
using intrusive::cuckoo::refinable_stat;
#endif
#ifdef CDS_DOXYGEN_INVOKED
/// Empty refinable internal statistics. This is typedef for intrusive::cuckoo::empty_refinable_stat
class empty_refinable_stat
{};
#else
using intrusive::cuckoo::empty_refinable_stat;
#endif
#ifdef CDS_DOXYGEN_INVOKED
/// Cuckoo statistics. This is typedef for intrusive::cuckoo::stat
class stat
{};
#else
using intrusive::cuckoo::stat;
#endif
#ifdef CDS_DOXYGEN_INVOKED
/// Cuckoo empty statistics.This is typedef for intrusive::cuckoo::empty_stat
class empty_stat
{};
#else
using intrusive::cuckoo::empty_stat;
#endif
/// Option specifying whether to store hash values in the node
/**
This option reserves additional space in the hook to store the hash value of the object once it's introduced in the container.
When this option is used, the unordered container will store the calculated hash value in the hook and rehashing operations won't need
to recalculate the hash of the value. This option will improve the performance of unordered containers
when rehashing is frequent or hashing the value is a slow operation
The \p Enable template parameter toggles the feature:
- the value \p true enables storing the hash values
- the value \p false disables storing the hash values
*/
template <bool Enable>
struct store_hash
{
//@cond
template <typename Base>
struct pack: public Base {
static bool const store_hash = Enable;
};
//@endcond
};
#ifdef CDS_DOXYGEN_INVOKED
/// Probe set type option
/**
@copydetails cds::intrusive::cuckoo::probeset_type
*/
template <typename Type>
struct probeset_type
{};
#else
using intrusive::cuckoo::probeset_type;
#endif
using intrusive::cuckoo::list;
using intrusive::cuckoo::vector;
/// Type traits for CuckooSet and CuckooMap classes
struct traits
{
/// Hash functors tuple
/**
This is mandatory type and has no predefined one.
At least, two hash functors should be provided. All hash functor
should be orthogonal (different): for each <tt> i,j: i != j => h[i](x) != h[j](x) </tt>.
The hash functors are defined as <tt> std::tuple< H1, H2, ... Hn > </tt>:
\@code cds::opt::hash< std::tuple< h1, h2 > > \@endcode
The number of hash functors specifies the number \p k - the count of hash tables in cuckoo hashing.
To specify hash tuple in traits you should use \p cds::opt::hash_tuple:
\code
struct my_traits: public cds::container::cuckoo::traits {
typedef cds::opt::hash_tuple< hash1, hash2 > hash;
};
\endcode
*/
typedef cds::opt::none hash;
/// Concurrent access policy
/**
Available opt::mutex_policy types:
- cuckoo::striping - simple, but the lock array is not resizable
- cuckoo::refinable - resizable lock array, but more complex access to set data.
Default is cuckoo::striping.
*/
typedef cuckoo::striping<> mutex_policy;
/// Key equality functor
/**
Default is <tt>std::equal_to<T></tt>
*/
typedef opt::none equal_to;
/// Key comparison functor
/**
No default functor is provided. If the option is not specified, the \p less is used.
*/
typedef opt::none compare;
/// specifies binary predicate used for key comparison.
/**
Default is \p std::less<T>.
*/
typedef opt::none less;
/// Item counter
/**
The type for item counting feature.
Default is cds::atomicity::item_counter
Only atomic item counter type is allowed.
*/
typedef cds::intrusive::cuckoo::traits::item_counter item_counter;
/// Allocator type
/**
The allocator type for allocating bucket tables.
Default is \p CDS_DEFAULT_ALLOCATOR that is \p std::allocator
*/
typedef CDS_DEFAULT_ALLOCATOR allocator;
/// Node allocator type
/**
If this type is not set explicitly, the \ref allocator type is used.
*/
typedef opt::none node_allocator;
/// Store hash value into items. See cuckoo::store_hash for explanation
static bool const store_hash = false;
/// Probe-set type. See \ref probeset_type option for explanation
typedef cuckoo::list probeset_type;
/// Internal statistics
typedef empty_stat stat;
};
/// Metafunction converting option list to CuckooSet/CuckooMap traits
/**
Template argument list \p Options... are:
- \p opt::hash - hash functor tuple, mandatory option. At least, two hash functors should be provided. All hash functor
should be orthogonal (different): for each <tt> i,j: i != j => h[i](x) != h[j](x) </tt>.
The hash functors are passed as <tt> std::tuple< H1, H2, ... Hn > </tt>. The number of hash functors specifies
the number \p k - the count of hash tables in cuckoo hashing.
- \p opt::mutex_policy - concurrent access policy.
Available policies: \p cuckoo::striping, \p cuckoo::refinable.
Default is \p %cuckoo::striping.
- \p opt::equal_to - key equality functor like \p std::equal_to.
If this functor is defined then the probe-set will be unordered.
If \p %opt::compare or \p %opt::less option is specified too, then the probe-set will be ordered
and \p %opt::equal_to will be ignored.
- \p opt::compare - key comparison functor. No default functor is provided.
If the option is not specified, the \p %opt::less is used.
If \p %opt::compare or \p %opt::less option is specified, then the probe-set will be ordered.
- \p opt::less - specifies binary predicate used for key comparison. Default is \p std::less<T>.
If \p %opt::compare or \p %opt::less option is specified, then the probe-set will be ordered.
- \p opt::item_counter - the type of item counting feature. Default is \p opt::v::sequential_item_counter.
- \p opt::allocator - the allocator type using for allocating bucket tables.
Default is \ref CDS_DEFAULT_ALLOCATOR
- \p opt::node_allocator - the allocator type using for allocating set's items. If this option
is not specified then the type defined in \p %opt::allocator option is used.
- \p cuckoo::store_hash - this option reserves additional space in the node to store the hash value
of the object once it's introduced in the container. When this option is used,
the unordered container will store the calculated hash value in the node and rehashing operations won't need
to recalculate the hash of the value. This option will improve the performance of unordered containers
when rehashing is frequent or hashing the value is a slow operation. Default value is \p false.
- \ref intrusive::cuckoo::probeset_type "cuckoo::probeset_type" - type of probe set, may be \p cuckoo::list or <tt>cuckoo::vector<Capacity></tt>,
Default is \p cuckoo::list.
- \p opt::stat - internal statistics. Possibly types: \p cuckoo::stat, \p cuckoo::empty_stat.
Default is \p %cuckoo::empty_stat
*/
template <typename... Options>
struct make_traits {
typedef typename cds::opt::make_options<
typename cds::opt::find_type_traits< cuckoo::traits, Options... >::type
,Options...
>::type type ; ///< Result of metafunction
};
} // namespace cuckoo
}} // namespace cds::container
#endif // #ifndef CDSLIB_CONTAINER_DETAILS_CUCKOO_BASE_H

View File

@ -0,0 +1,435 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_CONTAINER_DETAILS_ELLEN_BINTREE_BASE_H
#define CDSLIB_CONTAINER_DETAILS_ELLEN_BINTREE_BASE_H
#include <cds/intrusive/details/ellen_bintree_base.h>
#include <cds/container/details/base.h>
#include <cds/opt/compare.h>
#include <cds/details/binary_functor_wrapper.h>
namespace cds { namespace container {
/// EllenBinTree related definitions
/** @ingroup cds_nonintrusive_helper
*/
namespace ellen_bintree {
#ifdef CDS_DOXYGEN_INVOKED
/// Typedef for \p cds::intrusive::ellen_bintree::update_desc
typedef cds::intrusive::ellen_bintree::update_desc update_desc;
/// Typedef for \p cds::intrusive::ellen_bintree::internal_node
typedef cds::intrusive::ellen_bintree::internal_node internal_node;
/// Typedef for \p cds::intrusive::ellen_bintree::key_extractor
typedef cds::intrusive::ellen_bintree::key_extractor key_extractor;
/// Typedef for \p cds::intrusive::ellen_bintree::update_desc_allocator
typedef cds::intrusive::ellen_bintree::update_desc_allocator update_desc_allocator;
#else
using cds::intrusive::ellen_bintree::update_desc;
using cds::intrusive::ellen_bintree::internal_node;
using cds::intrusive::ellen_bintree::key_extractor;
using cds::intrusive::ellen_bintree::update_desc_allocator;
using cds::intrusive::ellen_bintree::node_types;
#endif
/// EllenBinTree internal statistics
template <typename Counter = cds::intrusive::ellen_bintree::stat<>::event_counter >
using stat = cds::intrusive::ellen_bintree::stat< Counter >;
/// EllenBinTree empty internal statistics
typedef cds::intrusive::ellen_bintree::empty_stat empty_stat;
/// EllenBinTree leaf node
template <typename GC, typename T>
struct node: public cds::intrusive::ellen_bintree::node<GC>
{
typedef T value_type ; ///< Value type
T m_Value ; ///< Value
/// Default ctor
node()
{}
/// Initializing ctor
template <typename Q>
node(Q const& v)
: m_Value(v)
{}
/// Copy constructor
template <typename... Args>
node( Args const&... args )
: m_Value( args... )
{}
/// Move constructor
template <typename... Args>
node( Args&&... args )
: m_Value( std::forward<Args>( args )... )
{}
};
/// EllenBinTreeMap leaf node
template <typename GC, typename Key, typename T>
struct map_node: public cds::intrusive::ellen_bintree::node< GC >
{
typedef Key key_type ; ///< key type
typedef T mapped_type ; ///< value type
typedef std::pair<key_type const, mapped_type> value_type ; ///< key-value pair stored in the map
value_type m_Value ; ///< Key-value pair stored in map leaf node
/// Initializes key field, value if default-constructed
template <typename K>
map_node( K const& key )
: m_Value( std::make_pair( key_type(key), mapped_type()))
{}
/// Initializes key and value fields
template <typename K, typename Q>
map_node( K const& key, Q const& v )
: m_Value( std::make_pair(key_type(key), mapped_type(v)))
{}
};
/// Type traits for \p EllenBinTreeSet and \p EllenBinTreeMap
struct traits
{
/// Key extracting functor (only for \p EllenBinTreeSet)
/**
This is mandatory functor for \p %EllenBinTreeSet.
It has the following prototype:
\code
struct key_extractor {
void operator ()( Key& dest, T const& src );
};
\endcode
It should initialize \p dest key from \p src data.
The functor is used to initialize internal nodes of \p %EllenBinTreeSet
*/
typedef opt::none key_extractor;
/// Key comparison functor
/**
No default functor is provided. If the option is not specified, the \p less is used.
See \p cds::opt::compare option description for functor interface.
You should provide \p compare or \p less functor.
See \ref cds_container_EllenBinTreeSet_rcu_less "predicate requirements".
*/
typedef opt::none compare;
/// Specifies binary predicate used for key compare.
/**
See \p cds::opt::less option description.
You should provide \p compare or \p less functor.
See \ref cds_container_EllenBinTreeSet_rcu_less "predicate requirements".
*/
typedef opt::none less;
/// Item counter
/**
The type for item counter, by default it is disabled (\p atomicity::empty_item_counter).
To enable it use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter
*/
typedef atomicity::empty_item_counter item_counter;
/// C++ memory ordering model
/**
List of available memory ordering see \p opt::memory_model
*/
typedef opt::v::relaxed_ordering memory_model;
/// Allocator for update descriptors
/**
The allocator type is used for \p ellen_bintree::update_desc.
Update descriptor is helping data structure with short lifetime and it is good candidate
for pooling. The number of simultaneously existing descriptors is a small number
limited the number of threads working with the tree.
Therefore, a bounded lock-free container like \p cds::container::VyukovMPMCCycleQueue
is good choice for the free-list of update descriptors,
see \p cds::memory::vyukov_queue_pool free-list implementation.
Also notice that size of update descriptor is not dependent on the type of data
stored in the tree so single free-list object can be used for several \p EllenBinTree object.
*/
typedef CDS_DEFAULT_ALLOCATOR update_desc_allocator;
/// Allocator for internal nodes
/**
The allocator type is used for \p ellen_bintree::internal_node.
*/
typedef CDS_DEFAULT_ALLOCATOR node_allocator;
/// Allocator for leaf nodes
/**
Each leaf node contains data stored in the container.
*/
typedef CDS_DEFAULT_ALLOCATOR allocator;
/// Internal statistics
/**
By default, internal statistics is disabled (\p ellen_bintree::empty_stat).
To enable it use \p ellen_bintree::stat.
*/
typedef empty_stat stat;
/// Back-off strategy
typedef cds::backoff::empty back_off;
/// RCU deadlock checking policy (only for RCU-based EllenBinTree<i>XXX</i> classes)
/**
List of available options see \p opt::rcu_check_deadlock
*/
typedef cds::opt::v::rcu_throw_deadlock rcu_check_deadlock;
/// Key copy policy (for \p EllenBinTreeMap)
/**
The key copy policy defines a functor to copy leaf node's key to internal node.
This policy is used only in \p EllenBinTreeMap.
By default, assignment operator is used.
The copy functor interface is:
\code
struct copy_functor {
void operator()( Key& dest, Key const& src );
};
\endcode
*/
typedef opt::none copy_policy;
};
/// Metafunction converting option list to \p EllenBinTreeSet traits
/**
\p Options are:
- \p ellen_bintree::key_extractor - key extracting functor, mandatory option. The functor has the following prototype:
\code
struct key_extractor {
void operator ()( Key& dest, T const& src );
};
\endcode
It should initialize \p dest key from \p src data. The functor is used to initialize internal nodes.
- \p opt::compare - key compare functor. No default functor is provided.
If the option is not specified, \p %opt::less is used.
- \p opt::less - specifies binary predicate used for key compare. At least \p %opt::compare or \p %opt::less should be defined.
- \p opt::item_counter - the type of item counter, default is disabled (\p atomicity::empty_item_counter).
To enable it use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter
- \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
or \p opt::v::sequential_consistent (sequentially consisnent memory model).
- \p opt::allocator - the allocator for \ref ellen_bintree::node "leaf nodes" which contains data.
Default is \ref CDS_DEFAULT_ALLOCATOR.
- \p opt::node_allocator - the allocator for internal nodes. Default is \ref CDS_DEFAULT_ALLOCATOR.
- \p ellen_bintree::update_desc_allocator - an allocator of \ref ellen_bintree::update_desc "update descriptors",
default is \ref CDS_DEFAULT_ALLOCATOR.
Note that update descriptor is helping data structure with short lifetime and it is good candidate for pooling.
The number of simultaneously existing descriptors is a relatively small number limited the number of threads
working with the tree and RCU buffer size.
Therefore, a bounded lock-free container like \p cds::container::VyukovMPMCCycleQueue is good choice for the free-list
of update descriptors, see \p cds::memory::vyukov_queue_pool free-list implementation.
Also notice that size of update descriptor is not dependent on the type of data
stored in the tree so single free-list object can be used for several EllenBinTree-based object.
- \p opt::stat - internal statistics, by default disabled (\p ellen_bintree::empty_stat). To enable
it use \p ellen_bintree::stat.
- \p opt::backoff - back-off strategy, by default no strategy is used (\p cds::backoff::empty)
- \p opt::rcu_check_deadlock - a deadlock checking policy, only for RCU-based tree.
Default is \p opt::v::rcu_throw_deadlock.
*/
template <typename... Options>
struct make_set_traits {
# ifdef CDS_DOXYGEN_INVOKED
typedef implementation_defined type ; ///< Metafunction result
# else
typedef typename cds::opt::make_options<
typename cds::opt::find_type_traits< traits, Options... >::type
,Options...
>::type type;
# endif
};
/// Metafunction converting option list to \p EllenBinTreeMap traits
/**
\p Options are:
- \p opt::compare - key compare functor. No default functor is provided.
If the option is not specified, \p %opt::less is used.
- \p opt::less - specifies binary predicate used for key compare. At least \p %opt::compare or \p %opt::less should be defined.
- \p opt::item_counter - the type of item counter, default is disabled (\p atomicity::empty_item_counter).
To enable it use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter
- opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
or \p opt::v::sequential_consistent (sequentially consisnent memory model).
- \p opt::allocator - the allocator used for \ref ellen_bintree::map_node "leaf nodes" which contains data.
Default is \ref CDS_DEFAULT_ALLOCATOR.
- \p opt::node_allocator - the allocator used for \ref ellen_bintree::internal_node "internal nodes".
Default is \ref CDS_DEFAULT_ALLOCATOR.
- \p ellen_bintree::update_desc_allocator - an allocator of \ref ellen_bintree::update_desc "update descriptors",
default is \ref CDS_DEFAULT_ALLOCATOR.
Note that update descriptor is helping data structure with short lifetime and it is good candidate for pooling.
The number of simultaneously existing descriptors is a relatively small number limited the number of threads
working with the tree and RCU buffer size.
Therefore, a bounded lock-free container like \p cds::container::VyukovMPMCCycleQueue is good choice for the free-list
of update descriptors, see \p cds::memory::vyukov_queue_pool free-list implementation.
Also notice that size of update descriptor is not dependent on the type of data
stored in the tree so single free-list object can be used for several EllenBinTree-based object.
- \p opt::stat - internal statistics, by default disabled (\p ellen_bintree::empty_stat). To enable
it use \p ellen_bintree::stat.
- \p opt::backoff - back-off strategy, by default no strategy is used (\p cds::backoff::empty)
- \p opt::rcu_check_deadlock - a deadlock checking policy, only for RCU-based tree. Default is \p opt::v::rcu_throw_deadlock
- opt::copy_policy - key copying policy defines a functor to copy leaf node's key to internal node.
By default, assignment operator is used.
The copy functor interface is:
\code
struct copy_functor {
void operator()( Key& dest, Key const& src );
};
\endcode
*/
template <typename... Options>
struct make_map_traits {
# ifdef CDS_DOXYGEN_INVOKED
typedef implementation_defined type ; ///< Metafunction result
# else
typedef typename cds::opt::make_options<
typename cds::opt::find_type_traits< traits, Options... >::type
,Options...
>::type type;
# endif
};
//@cond
namespace details {
template < class GC, typename Key, typename T, class Traits>
struct make_ellen_bintree_set
{
typedef GC gc;
typedef Key key_type;
typedef T value_type;
typedef Traits original_traits;
typedef node< gc, value_type > leaf_node;
struct intrusive_key_extractor
{
void operator()( key_type& dest, leaf_node const& src ) const
{
typename original_traits::key_extractor()( dest, src.m_Value );
}
};
struct value_accessor
{
value_type const& operator()( leaf_node const& node ) const
{
return node.m_Value;
}
};
typedef typename cds::opt::details::make_comparator< value_type, original_traits, false >::type key_comparator;
typedef cds::details::Allocator< leaf_node, typename original_traits::allocator> cxx_leaf_node_allocator;
struct leaf_deallocator
{
void operator()( leaf_node * p ) const
{
cxx_leaf_node_allocator().Delete( p );
}
};
struct intrusive_traits: public original_traits
{
typedef cds::intrusive::ellen_bintree::base_hook< cds::opt::gc< gc >> hook;
typedef intrusive_key_extractor key_extractor;
typedef leaf_deallocator disposer;
typedef cds::details::compare_wrapper< leaf_node, key_comparator, value_accessor > compare;
};
// Metafunction result
typedef cds::intrusive::EllenBinTree< gc, key_type, leaf_node, intrusive_traits > type;
};
template < class GC, typename Key, typename T, class Traits>
struct make_ellen_bintree_map
{
typedef GC gc;
typedef Key key_type;
typedef T mapped_type;
typedef map_node< gc, key_type, mapped_type > leaf_node;
typedef typename leaf_node::value_type value_type;
typedef Traits original_traits;
struct assignment_copy_policy {
void operator()( key_type& dest, key_type const& src )
{
dest = src;
}
};
typedef typename std::conditional<
std::is_same< typename original_traits::copy_policy, opt::none >::value,
assignment_copy_policy,
typename original_traits::copy_policy
>::type copy_policy;
struct intrusive_key_extractor
{
void operator()( key_type& dest, leaf_node const& src ) const
{
copy_policy()( dest, src.m_Value.first );
}
};
struct key_accessor
{
key_type const& operator()( leaf_node const& node ) const
{
return node.m_Value.first;
}
};
typedef typename cds::opt::details::make_comparator< key_type, original_traits, false >::type key_comparator;
typedef cds::details::Allocator< leaf_node, typename original_traits::allocator> cxx_leaf_node_allocator;
struct leaf_deallocator
{
void operator()( leaf_node * p ) const
{
cxx_leaf_node_allocator().Delete( p );
}
};
struct intrusive_traits: public original_traits
{
typedef cds::intrusive::ellen_bintree::base_hook< cds::opt::gc< gc > > hook;
typedef intrusive_key_extractor key_extractor;
typedef leaf_deallocator disposer;
typedef cds::details::compare_wrapper< leaf_node, key_comparator, key_accessor > compare;
};
// Metafunction result
typedef cds::intrusive::EllenBinTree< gc, key_type, leaf_node, intrusive_traits > type;
};
} // namespace details
//@endcond
} // namespace ellen_bintree
// Forward declarations
//@cond
template < class GC, typename Key, typename T, class Traits = ellen_bintree::traits >
class EllenBinTreeSet;
template < class GC, typename Key, typename T, class Traits = ellen_bintree::traits >
class EllenBinTreeMap;
//@endcond
}} // namespace cds::container
#endif // #ifndef CDSLIB_CONTAINER_DETAILS_ELLEN_BINTREE_BASE_H

View File

@ -0,0 +1,366 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_CONTAINER_DETAILS_FELDMAN_HASHMAP_BASE_H
#define CDSLIB_CONTAINER_DETAILS_FELDMAN_HASHMAP_BASE_H
#include <cds/intrusive/details/feldman_hashset_base.h>
#include <cds/container/details/base.h>
#include <cds/opt/hash.h>
namespace cds { namespace container {
/// \p FeldmanHashMap related definitions
/** @ingroup cds_nonintrusive_helper
*/
namespace feldman_hashmap {
/// \p FeldmanHashMap internal statistics, see cds::intrusive::feldman_hashset::stat
template <typename EventCounter = cds::atomicity::event_counter>
using stat = cds::intrusive::feldman_hashset::stat< EventCounter >;
/// \p FeldmanHashMap empty internal statistics
typedef cds::intrusive::feldman_hashset::empty_stat empty_stat;
/// Bit-wise memcmp-based comparator for hash value \p T
template <typename T>
using bitwise_compare = cds::intrusive::feldman_hashset::bitwise_compare< T >;
/// \p FeldmanHashMap level statistics
typedef cds::intrusive::feldman_hashset::level_statistics level_statistics;
/// Key size option
/**
@copydetails cds::container::feldman_hashmap::traits::hash_size
*/
template <size_t Size>
using hash_size = cds::intrusive::feldman_hashset::hash_size< Size >;
/// Hash splitter option
/**
@copydetails cds::container::feldman_hashmap::traits::hash_splitter
*/
template <typename Splitter>
using hash_splitter = cds::intrusive::feldman_hashset::hash_splitter< Splitter >;
/// \p FeldmanHashMap traits
struct traits
{
/// Hash functor, default is \p opt::none
/**
\p FeldmanHashMap may use any hash functor converting a key to
fixed-sized bit-string, for example, <a href="https://en.wikipedia.org/wiki/Secure_Hash_Algorithm">SHA1, SHA2</a>,
<a href="https://en.wikipedia.org/wiki/MurmurHash">MurmurHash</a>,
<a href="https://en.wikipedia.org/wiki/CityHash">CityHash</a>
or its successor <a href="https://code.google.com/p/farmhash/">FarmHash</a>.
If you use a fixed-sized key you can use it directly instead of a hash.
In such case \p %traits::hash should be specified as \p opt::none.
However, if you want to use the hash values or if your key type is not fixed-sized
you must specify a proper hash functor in your traits.
For example:
fixed-sized key - IP4 address map
@code
// Key - IP address
struct ip4_address {
uint8_t ip[4];
};
// IP compare
struct ip4_cmp {
int operator()( ip4_address const& lhs, ip4_address const& rhs ) const
{
return memcmp( &lhs, &rhs, sizeof(lhs));
}
};
// Value - statistics for the IP address
struct statistics {
// ...
};
// Traits
// Key type (ip4_addr) is fixed-sized so we may use the map without any hash functor
struct ip4_map_traits: public cds::container::multilevl_hashmap::traits
{
typedef ip4_cmp compare;
};
// IP4 address - statistics map
typedef cds::container::FeldmanHashMap< cds::gc::HP, ip4_address, statistics, ip4_map_traits > ip4_map;
@endcode
variable-size key requires a hash functor: URL map
@code
// Value - statistics for the URL
struct statistics {
// ...
};
// Traits
// Key type (std::string) is variable-sized so we must provide a hash functor in our traits
// We do not specify any comparing predicate (less or compare) so <tt> std::less<std::string> </tt> will be used by default
struct url_map_traits: public cds::container::multilevl_hashmap::traits
{
typedef std::hash<std::string> hash;
};
// URL statistics map
typedef cds::container::FeldmanHashMap< cds::gc::HP, std::string, statistics, url_map_traits > url_map;
@endcode
*/
typedef opt::none hash;
/// The size of hash value in bytes
/**
By default, the size of hash value is <tt>sizeof( hash_type )</tt>
where \p hash_type is type of \p hash() result or <tt>sizeof( key )</tt> if you use fixed-sized key.
Sometimes that size is wrong, for example, for that 6-byte key:
\code
struct key_type {
uint32_t key;
uint16_t subkey;
};
static_assert( sizeof( key_type ) == 6, "Key type size mismatch" );
\endcode
Here <tt>sizeof( key_type ) == 8</tt> so \p static_assert will be thrown.
For that case you can specify \p hash_size explicitly.
Value \p 0 means auto-calculated <tt>sizeof( key_type )</tt>.
*/
static constexpr size_t const hash_size = 0;
/// Hash splitter
/**
@copydetails cds::intrusive::feldman_hashset::traits::hash_splitter
*/
typedef cds::opt::none hash_splitter;
/// Hash comparing functor
/**
@copydetails cds::intrusive::feldman_hashset::traits::compare
*/
typedef cds::opt::none compare;
/// Specifies binary predicate used for hash compare.
/**
@copydetails cds::intrusive::feldman_hashset::traits::less
*/
typedef cds::opt::none less;
/// Item counter
/**
@copydetails cds::intrusive::feldman_hashset::traits::item_counter
*/
typedef cds::atomicity::item_counter item_counter;
/// Item allocator
/**
Default is \ref CDS_DEFAULT_ALLOCATOR
*/
typedef CDS_DEFAULT_ALLOCATOR allocator;
/// Array node allocator
/**
@copydetails cds::intrusive::feldman_hashset::traits::node_allocator
*/
typedef CDS_DEFAULT_ALLOCATOR node_allocator;
/// C++ memory ordering model
/**
@copydetails cds::intrusive::feldman_hashset::traits::memory_model
*/
typedef cds::opt::v::relaxed_ordering memory_model;
/// Back-off strategy
typedef cds::backoff::Default back_off;
/// Internal statistics
/**
@copydetails cds::intrusive::feldman_hashset::traits::stat
*/
typedef empty_stat stat;
/// RCU deadlock checking policy (only for \ref cds_container_FeldmanHashMap_rcu "RCU-based FeldmanHashMap")
/**
@copydetails cds::intrusive::feldman_hashset::traits::rcu_check_deadlock
*/
typedef cds::opt::v::rcu_throw_deadlock rcu_check_deadlock;
};
/// Metafunction converting option list to \p feldman_hashmap::traits
/**
Supported \p Options are:
- \p opt::hash - a hash functor, default is \p std::hash
@copydetails traits::hash
- \p feldman_hashmap::hash_size - the size of hash value in bytes.
@copydetails traits::hash_size
- \p opt::allocator - item allocator
@copydetails traits::allocator
- \p opt::node_allocator - array node allocator.
@copydetails traits::node_allocator
- \p opt::compare - hash comparison functor. No default functor is provided.
If the option is not specified, the \p opt::less is used.
- \p opt::less - specifies binary predicate used for hash comparison.
@copydetails cds::container::feldman_hashmap::traits::less
- \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used.
- \p opt::item_counter - the type of item counting feature.
@copydetails cds::container::feldman_hashmap::traits::item_counter
- \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
or \p opt::v::sequential_consistent (sequentially consisnent memory model).
- \p opt::stat - internal statistics. By default, it is disabled (\p feldman_hashmap::empty_stat).
To enable it use \p feldman_hashmap::stat
- \p opt::rcu_check_deadlock - a deadlock checking policy for \ref cds_intrusive_FeldmanHashSet_rcu "RCU-based FeldmanHashSet"
Default is \p opt::v::rcu_throw_deadlock
*/
template <typename... Options>
struct make_traits
{
# ifdef CDS_DOXYGEN_INVOKED
typedef implementation_defined type ; ///< Metafunction result
# else
typedef typename cds::opt::make_options<
typename cds::opt::find_type_traits< traits, Options... >::type
,Options...
>::type type;
# endif
};
} // namespace feldman_hashmap
//@cond
// Forward declaration
template < class GC, typename Key, typename T, class Traits = feldman_hashmap::traits >
class FeldmanHashMap;
//@endcond
//@cond
namespace details {
template <typename Key, typename Value, typename Hash>
struct hash_selector
{
typedef Key key_type;
typedef Value mapped_type;
typedef Hash hasher;
typedef typename std::decay<
typename std::remove_reference<
decltype(hasher()(std::declval<key_type>()))
>::type
>::type hash_type;
struct node_type
{
std::pair< key_type const, mapped_type> m_Value;
hash_type const m_hash;
node_type() = delete;
node_type(node_type const&) = delete;
template <typename Q>
node_type(hasher& h, Q const& key)
: m_Value( std::move( std::make_pair( key_type( key ), mapped_type())))
, m_hash( h( m_Value.first ))
{}
template <typename Q, typename U >
node_type(hasher& h, Q const& key, U const& val)
: m_Value( std::move( std::make_pair( key_type( key ), mapped_type(val))))
, m_hash( h( m_Value.first ))
{}
template <typename Q, typename... Args>
node_type(hasher& h, Q&& key, Args&&... args)
: m_Value( std::move(std::make_pair( key_type( std::forward<Q>(key)), std::move( mapped_type(std::forward<Args>(args)...)))))
, m_hash( h( m_Value.first ))
{}
};
struct hash_accessor
{
hash_type const& operator()(node_type const& node) const
{
return node.m_hash;
}
};
};
template <typename Key, typename Value>
struct hash_selector<Key, Value, opt::none>
{
typedef Key key_type;
typedef Value mapped_type;
struct hasher {
key_type const& operator()(key_type const& k) const
{
return k;
}
};
typedef key_type hash_type;
struct node_type
{
std::pair< key_type const, mapped_type> m_Value;
node_type() = delete;
node_type(node_type const&) = delete;
template <typename Q, typename... Args>
node_type( hasher /*h*/, Q&& key, Args&&... args )
: m_Value( std::make_pair( key_type( std::forward<Q>( key )), mapped_type( std::forward<Args>(args)...)))
{}
};
struct hash_accessor
{
hash_type const& operator()(node_type const& node) const
{
return node.m_Value.first;
}
};
};
template <typename GC, typename Key, typename T, typename Traits>
struct make_feldman_hashmap
{
typedef GC gc;
typedef Key key_type;
typedef T mapped_type;
typedef Traits original_traits;
typedef hash_selector< key_type, mapped_type, typename original_traits::hash > select;
typedef typename select::hasher hasher;
typedef typename select::hash_type hash_type;
typedef typename select::node_type node_type;
typedef cds::details::Allocator< node_type, typename original_traits::allocator > cxx_node_allocator;
struct node_disposer
{
void operator()( node_type * p ) const
{
cxx_node_allocator().Delete( p );
}
};
struct intrusive_traits: public original_traits
{
typedef typename select::hash_accessor hash_accessor;
typedef node_disposer disposer;
};
// Metafunction result
typedef cds::intrusive::FeldmanHashSet< GC, node_type, intrusive_traits > type;
};
} // namespace details
//@endcond
}} // namespace cds::container
#endif // #ifndef CDSLIB_CONTAINER_DETAILS_FELDMAN_HASHMAP_BASE_H

View File

@ -0,0 +1,205 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_CONTAINER_DETAILS_FELDMAN_HASHSET_BASE_H
#define CDSLIB_CONTAINER_DETAILS_FELDMAN_HASHSET_BASE_H
#include <cds/intrusive/details/feldman_hashset_base.h>
#include <cds/container/details/base.h>
namespace cds { namespace container {
/// \p FeldmanHashSet related definitions
/** @ingroup cds_nonintrusive_helper
*/
namespace feldman_hashset {
/// Hash accessor option
/**
@copydetails cds::intrusive::feldman_hashset::traits::hash_accessor
*/
template <typename Accessor>
using hash_accessor = cds::intrusive::feldman_hashset::hash_accessor< Accessor >;
/// Hash size option
/**
@copydetails cds::intrusive::feldman_hashset::traits::hash_size
*/
template <size_t Size>
using hash_size = cds::intrusive::feldman_hashset::hash_size< Size >;
/// Hash splitter
/**
@copydetails cds::intrusive::feldman_hashset::traits::hash_splitter
*/
template <typename Splitter>
using hash_splitter = cds::intrusive::feldman_hashset::hash_splitter< Splitter >;
/// \p FeldmanHashSet internal statistics, see cds::intrusive::feldman_hashset::stat
template <typename EventCounter = cds::atomicity::event_counter>
using stat = cds::intrusive::feldman_hashset::stat< EventCounter >;
/// \p FeldmanHashSet empty internal statistics
typedef cds::intrusive::feldman_hashset::empty_stat empty_stat;
/// Bit-wise memcmp-based comparator for hash value \p T
template <typename T>
using bitwise_compare = cds::intrusive::feldman_hashset::bitwise_compare< T >;
/// \p FeldmanHashSet level statistics
typedef cds::intrusive::feldman_hashset::level_statistics level_statistics;
/// \p FeldmanHashSet traits
struct traits
{
/// Mandatory functor to get hash value from data node
/**
@copydetails cds::intrusive::feldman_hashset::traits::hash_accessor
*/
typedef cds::opt::none hash_accessor;
/// The size of hash value in bytes
/**
@copydetails cds::intrusive::feldman_hashset::traits::hash_size
*/
static constexpr size_t const hash_size = 0;
/// Hash splitter
/**
@copydetails cds::intrusive::feldman_hashset::traits::hash_splitter
*/
typedef cds::opt::none hash_splitter;
/// Hash comparing functor
/**
@copydetails cds::intrusive::feldman_hashset::traits::compare
*/
typedef cds::opt::none compare;
/// Specifies binary predicate used for hash compare.
/**
@copydetails cds::intrusive::feldman_hashset::traits::less
*/
typedef cds::opt::none less;
/// Item counter
/**
@copydetails cds::intrusive::feldman_hashset::traits::item_counter
*/
typedef cds::atomicity::item_counter item_counter;
/// Item allocator
/**
Default is \ref CDS_DEFAULT_ALLOCATOR
*/
typedef CDS_DEFAULT_ALLOCATOR allocator;
/// Array node allocator
/**
@copydetails cds::intrusive::feldman_hashset::traits::node_allocator
*/
typedef CDS_DEFAULT_ALLOCATOR node_allocator;
/// C++ memory ordering model
/**
@copydetails cds::intrusive::feldman_hashset::traits::memory_model
*/
typedef cds::opt::v::relaxed_ordering memory_model;
/// Back-off strategy
typedef cds::backoff::Default back_off;
/// Internal statistics
/**
@copydetails cds::intrusive::feldman_hashset::traits::stat
*/
typedef empty_stat stat;
/// RCU deadlock checking policy (only for \ref cds_container_FeldmanHashSet_rcu "RCU-based FeldmanHashSet")
/**
@copydetails cds::intrusive::feldman_hashset::traits::rcu_check_deadlock
*/
typedef cds::opt::v::rcu_throw_deadlock rcu_check_deadlock;
};
/// Metafunction converting option list to \p feldman_hashset::traits
/**
Supported \p Options are:
- \p feldman_hashset::hash_accessor - mandatory option, hash accessor functor.
@copydetails traits::hash_accessor
- \p feldman_hashset::hash_size - the size of hash value in bytes.
@copydetails traits::hash_size
- \p feldman_hashset::hash_splitter - a hash splitter algorithm
@copydetails traits::hash_splitter
- \p opt::allocator - item allocator
@copydetails traits::allocator
- \p opt::node_allocator - array node allocator.
@copydetails traits::node_allocator
- \p opt::compare - hash comparison functor. No default functor is provided.
If the option is not specified, the \p opt::less is used.
- \p opt::less - specifies binary predicate used for hash comparison.
@copydetails cds::container::feldman_hashset::traits::less
- \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used.
- \p opt::item_counter - the type of item counting feature.
@copydetails cds::intrusive::feldman_hashset::traits::item_counter
- \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
or \p opt::v::sequential_consistent (sequentially consisnent memory model).
- \p opt::stat - internal statistics. By default, it is disabled (\p feldman_hashset::empty_stat).
To enable it use \p feldman_hashset::stat
- \p opt::rcu_check_deadlock - a deadlock checking policy for \ref cds_intrusive_FeldmanHashSet_rcu "RCU-based FeldmanHashSet"
Default is \p opt::v::rcu_throw_deadlock
*/
template <typename... Options>
struct make_traits
{
# ifdef CDS_DOXYGEN_INVOKED
typedef implementation_defined type ; ///< Metafunction result
# else
typedef typename cds::opt::make_options<
typename cds::opt::find_type_traits< traits, Options... >::type
,Options...
>::type type;
# endif
};
} // namespace feldman_hashset
//@cond
// Forward declaration
template < class GC, typename T, class Traits = cds::container::feldman_hashset::traits >
class FeldmanHashSet;
//@endcond
//@cond
namespace details {
template <typename GC, typename T, typename Traits>
struct make_feldman_hashset
{
typedef GC gc;
typedef T value_type;
typedef Traits original_traits;
typedef cds::details::Allocator< value_type, typename original_traits::allocator > cxx_node_allocator;
struct node_disposer
{
void operator()( value_type * p ) const
{
cxx_node_allocator().Delete( p );
}
};
struct intrusive_traits: public original_traits
{
typedef node_disposer disposer;
};
// Metafunction result
typedef cds::intrusive::FeldmanHashSet< GC, T, intrusive_traits > type;
};
} // namespace details
//@endcond
}} // namespace cds::container
#endif // #ifndef CDSLIB_CONTAINER_DETAILS_FELDMAN_HASHSET_BASE_H

View File

@ -0,0 +1,33 @@
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef CDSLIB_CONTAINER_DETAILS_GUARDED_PTR_CAST_H
#define CDSLIB_CONTAINER_DETAILS_GUARDED_PTR_CAST_H
//@cond
#include <cds/details/defs.h>
namespace cds { namespace container { namespace details {
template <typename Node, typename T>
struct guarded_ptr_cast_set {
T * operator()(Node* pNode ) const noexcept
{
return &(pNode->m_Value);
}
};
template <typename Node, typename T>
struct guarded_ptr_cast_map {
T * operator()(Node* pNode ) const noexcept
{
return &(pNode->m_Data);
}
};
}}} // namespace cds::container::details
//@endcond
#endif // #ifndef CDSLIB_CONTAINER_DETAILS_GUARDED_PTR_CAST_H

Some files were not shown because too many files have changed in this diff Show More