diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index c1f39f6fd8..0cf1e472bc 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -3,6 +3,7 @@ add_subdirectory(./benchmark_client) add_subdirectory(./binlog_sender) add_subdirectory(./manifest_generator) add_subdirectory(./rdb_to_pika) +add_subdirectory(./pika_migrate) #add_subdirectory(./pika_to_txt) #add_subdirectory(./txt_to_pika) #add_subdirectory(./pika-port/pika_port_3) diff --git a/tools/pika_migrate/.gitattributes b/tools/pika_migrate/.gitattributes deleted file mode 100644 index 3ff2dd9c7b..0000000000 --- a/tools/pika_migrate/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -tests/* linguist-vendored diff --git a/tools/pika_migrate/.gitignore b/tools/pika_migrate/.gitignore deleted file mode 100644 index 5d21ed9c5a..0000000000 --- a/tools/pika_migrate/.gitignore +++ /dev/null @@ -1,49 +0,0 @@ -# Compiled Object files -*.slo -*.lo -*.o -*.obj -*pb.cc -*pb.h - -# Precompiled Headers -*.gch -*.pch - -# Compiled Dynamic libraries -*.so -*.dylib -*.dll - -# Fortran module files -*.mod - -# Compiled Static libraries -*.lai -*.la -*.a -*.lib - -# Executables -*.exe -*.out -*.app - -# Log path -make_config.mk -log/ -lib/ -tools/ -output/ - -# DB -db/ -dump/ - -# third party -gdb.txt -tags - -make_config.mk -src/*.d -src/build_version.cc diff --git a/tools/pika_migrate/.travis.yml b/tools/pika_migrate/.travis.yml deleted file mode 100644 index cdc94a458c..0000000000 --- a/tools/pika_migrate/.travis.yml +++ /dev/null @@ -1,26 +0,0 @@ -sudo: required -dist: trusty -language: cpp - -os: - - linux - -env: - global: - - PROTOBUF_VERSION=2.5.0 - -install: - - wget https://github.com/protocolbuffers/protobuf/releases/download/v$PROTOBUF_VERSION/protobuf-$PROTOBUF_VERSION.tar.bz2 - - tar xvf protobuf-$PROTOBUF_VERSION.tar.bz2 - - ( cd protobuf-$PROTOBUF_VERSION && ./configure --prefix=/usr && make && sudo make install ) - -addons: - apt: - packages: ['libsnappy-dev', 'libprotobuf-dev', 'libgoogle-glog-dev'] - -compiler: - - gcc - -language: cpp - -script: make diff --git a/tools/pika_migrate/CMakeLists.txt b/tools/pika_migrate/CMakeLists.txt new file mode 100644 index 0000000000..f835b969e5 --- /dev/null +++ b/tools/pika_migrate/CMakeLists.txt @@ -0,0 +1,69 @@ +project(pika-migrate) + +aux_source_directory(src DIR_SRCS) + +set(PIKA_BUILD_VERSION_CC ${CMAKE_BINARY_DIR}/pika_build_version.cc + src/pika_cache_load_thread.cc + ) +message("PIKA_BUILD_VERSION_CC : " ${PIKA_BUILD_VERSION_CC}) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/src/build_version.cc.in ${PIKA_BUILD_VERSION_CC} @ONLY) + +set(PROTO_FILES ${CMAKE_CURRENT_SOURCE_DIR}/src/pika_inner_message.proto ${CMAKE_CURRENT_SOURCE_DIR}/src/rsync_service.proto) +custom_protobuf_generate_cpp(PROTO_SRCS PROTO_HDRS ${PROTO_FILES}) +message("pika PROTO_SRCS = ${PROTO_SRCS}") +message("pika PROTO_HDRS = ${PROTO_HDRS}") + +add_executable(${PROJECT_NAME} + ${DIR_SRCS} + ${PROTO_SRCS} + ${PROTO_HDRS} + ${PIKA_BUILD_VERSION_CC}) + + target_link_directories(${PROJECT_NAME} + PUBLIC ${INSTALL_LIBDIR_64} + PUBLIC ${INSTALL_LIBDIR}) + +add_dependencies(${PROJECT_NAME} + gflags + gtest + ${LIBUNWIND_NAME} + glog + fmt + snappy + zstd + lz4 + zlib + ${LIBGPERF_NAME} + ${LIBJEMALLOC_NAME} + rocksdb + protobuf + pstd + net + rediscache + storage + cache +) + +target_include_directories(${PROJECT_NAME} + PUBLIC ${CMAKE_CURRENT_BINARY_DIR} + PUBLIC ${PROJECT_SOURCE_DIR} + ${INSTALL_INCLUDEDIR} +) + +target_link_libraries(${PROJECT_NAME} + cache + storage + net + pstd + ${GLOG_LIBRARY} + librocksdb.a + ${LIB_PROTOBUF} + ${LIB_GFLAGS} + ${LIB_FMT} + libsnappy.a + libzstd.a + liblz4.a + libz.a + librediscache.a + ${LIBUNWIND_LIBRARY} + ${JEMALLOC_LIBRARY}) \ No newline at end of file diff --git a/tools/pika_migrate/CODE_OF_CONDUCT.md b/tools/pika_migrate/CODE_OF_CONDUCT.md deleted file mode 100644 index f50b192489..0000000000 --- a/tools/pika_migrate/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,46 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at g-infra-bada@360.cn. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/tools/pika_migrate/CONTRIBUTING.md b/tools/pika_migrate/CONTRIBUTING.md deleted file mode 100644 index 4cf487071f..0000000000 --- a/tools/pika_migrate/CONTRIBUTING.md +++ /dev/null @@ -1 +0,0 @@ -### Contributing to pika diff --git a/tools/pika_migrate/Dockerfile b/tools/pika_migrate/Dockerfile deleted file mode 100644 index 3fc690c3e7..0000000000 --- a/tools/pika_migrate/Dockerfile +++ /dev/null @@ -1,21 +0,0 @@ -FROM centos:latest -MAINTAINER left2right - -RUN rpm -ivh https://mirrors.ustc.edu.cn/epel/epel-release-latest-7.noarch.rpm && \ - yum -y update && \ - yum -y install snappy-devel && \ - yum -y install protobuf-devel && \ - yum -y install gflags-devel && \ - yum -y install glog-devel && \ - yum -y install gcc-c++ && \ - yum -y install make && \ - yum -y install which && \ - yum -y install git - -ENV PIKA /pika -COPY . ${PIKA} -WORKDIR ${PIKA} -RUN make -ENV PATH ${PIKA}/output/bin:${PATH} - -WORKDIR ${PIKA}/output diff --git a/tools/pika_migrate/LICENSE b/tools/pika_migrate/LICENSE deleted file mode 100644 index 93ce6ffc0b..0000000000 --- a/tools/pika_migrate/LICENSE +++ /dev/null @@ -1,10 +0,0 @@ - The MIT License (MIT) - -Copyright © 2018 - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/tools/pika_migrate/Makefile b/tools/pika_migrate/Makefile deleted file mode 100644 index be7e8191f5..0000000000 --- a/tools/pika_migrate/Makefile +++ /dev/null @@ -1,245 +0,0 @@ -CLEAN_FILES = # deliberately empty, so we can append below. -CXX=g++ -PLATFORM_LDFLAGS= -lpthread -lrt -PLATFORM_CXXFLAGS= -std=c++11 -fno-builtin-memcmp -msse -msse4.2 -PROFILING_FLAGS=-pg -OPT= -LDFLAGS += -Wl,-rpath=$(RPATH) - -# DEBUG_LEVEL can have two values: -# * DEBUG_LEVEL=2; this is the ultimate debug mode. It will compile pika -# without any optimizations. To compile with level 2, issue `make dbg` -# * DEBUG_LEVEL=0; this is the debug level we use for release. If you're -# running pika in production you most definitely want to compile pika -# with debug level 0. To compile with level 0, run `make`, - -# Set the default DEBUG_LEVEL to 0 -DEBUG_LEVEL?=0 - -ifeq ($(MAKECMDGOALS),dbg) - DEBUG_LEVEL=2 -endif - -ifneq ($(DISABLE_UPDATE_SB), 1) -$(info updating submodule) -dummy := $(shell (git submodule init && git submodule update)) -endif - -# compile with -O2 if debug level is not 2 -ifneq ($(DEBUG_LEVEL), 2) -OPT += -O2 -fno-omit-frame-pointer -# if we're compiling for release, compile without debug code (-DNDEBUG) and -# don't treat warnings as errors -OPT += -DNDEBUG -DISABLE_WARNING_AS_ERROR=1 -# Skip for archs that don't support -momit-leaf-frame-pointer -ifeq (,$(shell $(CXX) -fsyntax-only -momit-leaf-frame-pointer -xc /dev/null 2>&1)) -OPT += -momit-leaf-frame-pointer -endif -else -$(warning Warning: Compiling in debug mode. Don't use the resulting binary in production) -OPT += $(PROFILING_FLAGS) -DEBUG_SUFFIX = "_debug" -endif - -# Link tcmalloc if exist -dummy := $(shell ("$(CURDIR)/detect_environment" "$(CURDIR)/make_config.mk")) -include make_config.mk -CLEAN_FILES += $(CURDIR)/make_config.mk -PLATFORM_LDFLAGS += $(TCMALLOC_LDFLAGS) -PLATFORM_LDFLAGS += $(ROCKSDB_LDFLAGS) -PLATFORM_CXXFLAGS += $(TCMALLOC_EXTENSION_FLAGS) - -# ---------------------------------------------- -OUTPUT = $(CURDIR)/output -THIRD_PATH = $(CURDIR)/third -SRC_PATH = $(CURDIR)/src - -# ----------------Dependences------------------- - -ifndef SLASH_PATH -SLASH_PATH = $(THIRD_PATH)/slash -endif -SLASH = $(SLASH_PATH)/slash/lib/libslash$(DEBUG_SUFFIX).a - -ifndef PINK_PATH -PINK_PATH = $(THIRD_PATH)/pink -endif -PINK = $(PINK_PATH)/pink/lib/libpink$(DEBUG_SUFFIX).a - -ifndef ROCKSDB_PATH -ROCKSDB_PATH = $(THIRD_PATH)/rocksdb -endif -ROCKSDB = $(ROCKSDB_PATH)/librocksdb$(DEBUG_SUFFIX).a - -ifndef GLOG_PATH -GLOG_PATH = $(THIRD_PATH)/glog -endif - -ifndef BLACKWIDOW_PATH -BLACKWIDOW_PATH = $(THIRD_PATH)/blackwidow -endif -BLACKWIDOW = $(BLACKWIDOW_PATH)/lib/libblackwidow$(DEBUG_SUFFIX).a - - -ifeq ($(360), 1) -GLOG := $(GLOG_PATH)/.libs/libglog.a -endif - -INCLUDE_PATH = -I. \ - -I$(SLASH_PATH) \ - -I$(PINK_PATH) \ - -I$(BLACKWIDOW_PATH)/include \ - -I$(BLACKWIDOW_PATH)\ - -I$(ROCKSDB_PATH) \ - -I$(ROCKSDB_PATH)/include \ - -I$(GLOG_PATH)/src \ - -LIB_PATH = -L./ \ - -L$(SLASH_PATH)/slash/lib \ - -L$(PINK_PATH)/pink/lib \ - -L$(BLACKWIDOW_PATH)/lib \ - -L$(ROCKSDB_PATH) \ - -L$(GLOG_PATH)/.libs \ - -LDFLAGS += $(LIB_PATH) \ - -lpink$(DEBUG_SUFFIX) \ - -lslash$(DEBUG_SUFFIX) \ - -lblackwidow$(DEBUG_SUFFIX) \ - -lrocksdb$(DEBUG_SUFFIX) \ - -lglog \ - -lprotobuf \ - -static-libstdc++ \ - -# ---------------End Dependences---------------- - -VERSION_CC=$(SRC_PATH)/build_version.cc -LIB_SOURCES := $(VERSION_CC) \ - $(filter-out $(VERSION_CC), $(wildcard $(SRC_PATH)/*.cc)) - -PIKA_PROTO := $(wildcard $(SRC_PATH)/*.proto) -PIKA_PROTO_GENS:= $(PIKA_PROTO:%.proto=%.pb.h) $(PIKA_PROTO:%.proto=%.pb.cc) - - -#----------------------------------------------- - -AM_DEFAULT_VERBOSITY = 0 - -AM_V_GEN = $(am__v_GEN_$(V)) -am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY)) -am__v_GEN_0 = @echo " GEN " $(notdir $@); -am__v_GEN_1 = -AM_V_at = $(am__v_at_$(V)) -am__v_at_ = $(am__v_at_$(AM_DEFAULT_VERBOSITY)) -am__v_at_0 = @ -am__v_at_1 = - -AM_V_CC = $(am__v_CC_$(V)) -am__v_CC_ = $(am__v_CC_$(AM_DEFAULT_VERBOSITY)) -am__v_CC_0 = @echo " CC " $(notdir $@); -am__v_CC_1 = -CCLD = $(CC) -LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ -AM_V_CCLD = $(am__v_CCLD_$(V)) -am__v_CCLD_ = $(am__v_CCLD_$(AM_DEFAULT_VERBOSITY)) -am__v_CCLD_0 = @echo " CCLD " $(notdir $@); -am__v_CCLD_1 = - -AM_LINK = $(AM_V_CCLD)$(CXX) $^ $(EXEC_LDFLAGS) -o $@ $(LDFLAGS) - -CXXFLAGS += -g - -# This (the first rule) must depend on "all". -default: all - -WARNING_FLAGS = -W -Wextra -Wall -Wsign-compare \ - -Wno-unused-parameter -Woverloaded-virtual \ - -Wnon-virtual-dtor -Wno-missing-field-initializers - -ifndef DISABLE_WARNING_AS_ERROR - WARNING_FLAGS += -Werror -endif - -CXXFLAGS += $(WARNING_FLAGS) $(INCLUDE_PATH) $(PLATFORM_CXXFLAGS) $(OPT) - -LDFLAGS += $(PLATFORM_LDFLAGS) - -date := $(shell date +%F) -git_sha := $(shell git rev-parse HEAD 2>/dev/null) -gen_build_version = sed -e s/@@GIT_SHA@@/$(git_sha)/ -e s/@@GIT_DATE_TIME@@/$(date)/ src/build_version.cc.in -# Record the version of the source that we are compiling. -# We keep a record of the git revision in this file. It is then built -# as a regular source file as part of the compilation process. -# One can run "strings executable_filename | grep _build_" to find -# the version of the source that we used to build the executable file. -CLEAN_FILES += $(SRC_PATH)/build_version.cc - -$(SRC_PATH)/build_version.cc: FORCE - $(AM_V_GEN)rm -f $@-t - $(AM_V_at)$(gen_build_version) > $@-t - $(AM_V_at)if test -f $@; then \ - cmp -s $@-t $@ && rm -f $@-t || mv -f $@-t $@; \ - else mv -f $@-t $@; fi -FORCE: - -LIBOBJECTS = $(LIB_SOURCES:.cc=.o) -PROTOOBJECTS = $(PIKA_PROTO:.proto=.pb.o) - -# if user didn't config LIBNAME, set the default -ifeq ($(BINNAME),) -# we should only run pika in production with DEBUG_LEVEL 0 -BINNAME=pika$(DEBUG_SUFFIX) -endif -BINARY = ${BINNAME} - -.PHONY: distclean clean dbg all - -%.pb.h %.pb.cc: %.proto - $(AM_V_GEN)protoc --proto_path=$(SRC_PATH) --cpp_out=$(SRC_PATH) $< - -%.o: %.cc - $(AM_V_CC)$(CXX) $(CXXFLAGS) -c $< -o $@ - -proto: $(PIKA_PROTO_GENS) - -all: $(BINARY) - -dbg: $(BINARY) - -$(BINARY): $(SLASH) $(PINK) $(ROCKSDB) $(BLACKWIDOW) $(GLOG) $(PROTOOBJECTS) $(LIBOBJECTS) - $(AM_V_at)rm -f $@ - $(AM_V_at)$(AM_LINK) - $(AM_V_at)rm -rf $(OUTPUT) - $(AM_V_at)mkdir -p $(OUTPUT)/bin - $(AM_V_at)mv $@ $(OUTPUT)/bin - $(AM_V_at)cp -r $(CURDIR)/conf $(OUTPUT) - - -$(SLASH): - $(AM_V_at)make -C $(SLASH_PATH)/slash/ DEBUG_LEVEL=$(DEBUG_LEVEL) - -$(PINK): - $(AM_V_at)make -C $(PINK_PATH)/pink/ DEBUG_LEVEL=$(DEBUG_LEVEL) NO_PB=0 SLASH_PATH=$(SLASH_PATH) - -$(ROCKSDB): - $(AM_V_at)make -j $(PROCESSOR_NUMS) -C $(ROCKSDB_PATH)/ static_lib DISABLE_JEMALLOC=1 DEBUG_LEVEL=$(DEBUG_LEVEL) - -$(BLACKWIDOW): - $(AM_V_at)make -C $(BLACKWIDOW_PATH) ROCKSDB_PATH=$(ROCKSDB_PATH) SLASH_PATH=$(SLASH_PATH) DEBUG_LEVEL=$(DEBUG_LEVEL) - -$(GLOG): - cd $(THIRD_PATH)/glog; if [ ! -f ./Makefile ]; then ./configure --disable-shared; fi; make; echo '*' > $(CURDIR)/third/glog/.gitignore; - -clean: - rm -rf $(OUTPUT) - rm -rf $(CLEAN_FILES) - rm -rf $(PIKA_PROTO_GENS) - find $(SRC_PATH) -name "*.[oda]*" -exec rm -f {} \; - find $(SRC_PATH) -type f -regex ".*\.\(\(gcda\)\|\(gcno\)\)" -exec rm {} \; - -distclean: clean - make -C $(PINK_PATH)/pink/ SLASH_PATH=$(SLASH_PATH) clean - make -C $(SLASH_PATH)/slash/ clean - make -C $(BLACKWIDOW_PATH)/ clean - make -C $(ROCKSDB_PATH)/ clean -# make -C $(GLOG_PATH)/ clean diff --git a/tools/pika_migrate/README.md b/tools/pika_migrate/README.md deleted file mode 100644 index 2387bb1bf8..0000000000 --- a/tools/pika_migrate/README.md +++ /dev/null @@ -1,76 +0,0 @@ - - -## 适用版本 - -适用 PIKA 3.2.0及以上版本(3.5.x 和 4.0.x 版本不支持),单机模式且只使用了单 DB。若 PIKA 版本低于3.2.0,需将内核版本升级至 3.2.0。具体信息,请参见 升级 PIKA 内核版本至3.2.0。 -### 开发背景: -之前Pika项目官方提供的pika\_to\_redis工具仅支持离线将Pika的DB中的数据迁移到Pika、Redis, 且无法增量同步, 该工具实际上就是一个特殊的Pika, 只不过成为从库之后, 内部会将从主库获取到的数据转发给Redis,同时并支持增量同步, 实现热迁功能. - -## 迁移原理 - -将 PIKA 中的数据在线迁移到 Redis,并支持全量和增量同步。使用 pika-migrate 工具,将工具虚拟为 PIKA 的从库,然后从主库获取到数据转发给 Redis,同时支持增量同步,实现在线热迁的功能。 -1. pika-migrate 通过 dbsync 请求获取主库全量 DB 数据,以及当前 DB 数据所对应的 binlog 点位。 -2. 获取到主库当前全量 DB 数据之后,扫描 DB,将 DB 中的数据打包转发给 Redis。 -3. 通过之前获取的 binlog 的点位向主库进行增量同步, 在增量同步的过程中,将从主库获取到的 binlog 重组成 Redis 命令,转发给 Redis。 - - -## 注意事项 - -PIKA 支持不同数据结构采用同名 Key,但是 Redis 不⽀持,所以在有同 Key 数据的场景下,以第⼀个迁移到 Redis 数据结构为准,其他同名 Key 的数据结构会丢失。 -该工具只支持热迁移单机模式下,并且只采⽤单 DB 版本的 PIKA,如果是集群模式,或者是多 DB 场景,⼯具会报错并且退出。 -为了避免由于主库 binlog 被清理导致该⼯具触发多次全量同步向 Redis 写入脏数据,工具自身做了保护,在第⼆次触发全量同步时会报错退出。 - -## 编译步骤 -```shell -# 若third目录中子仓库为空,需要进入工具根目录更新submodule -git submodule update --init --recursive -# 编译 -make -``` - -### 编译备注 - -1.如果rocksdb编译失败,请先按照[此处](https://github.com/facebook/rocksdb/blob/004237e62790320d8e630456cbeb6f4a1f3579c2/INSTALL.md) 的步骤准备环境 -2.若类似为: -```shell -error: implicitly-declared 'constexpr rocksdb::FileDescriptor::FileDescriptor(const rocksdb::FileDescriptor&)' is deprecated [-Werror=deprecated-copy] -``` -可以修改tools/pika_migrate/third/rocksdb目录下的makefile:
WARNING_FLAGS = -Wno-missing-field-initializers --Wno-unused-parameter - -## 迁移步骤 - -1. 在 PIKA 主库上执行如下命令,让 PIKA 主库保留10000个 binlog 文件。 - -```shell -config set expire-logs-nums 10000 -``` - -```text -说明: -pika-port 将全量数据写入到 Redis 这段时间可能耗时很长,而导致主库原先 binlog 点位被清理。需要在 PIKA 主库上保留10000个 binlog ⽂件,确保后续该⼯具请求增量同步的时候,对应的 binlog 文件还存在。 -binlog 文件占用磁盘空间,可以根据实际情况确定保留 binlog 的数量。 -``` - -2. 修改迁移工具的配置文件 pika.conf 中的如下参数。 - ![img.png](img.png) - - target-redis-host:指定 Redis 的 IP 地址。 - target-redis-port:指定 Redis 的端口号。 - target-redis-pwd:指定 Redis 默认账号的密码。 - sync-batch-num:指定 pika-migrate 接收到主库的 sync-batch-num 个数据⼀起打包发送给 Redis,提升转发效率。 - redis-sender-num:指定 redis-sender-num 个线程用于转发数据包。转发命令通过 Key 的哈希值将数据分配到不同的线程发送,无需担心多线程发送导致数据错乱的问题。 -3. 在工具包的路径下执行如下命令,启动 pika-migrate 工具,并查看回显信息。 -```shell -pika -c pika.conf -``` - -4. 执行如下命令,将迁移工具伪装成 Slave,向主库请求同步,并观察是否有报错信息。 -```shell -slaveof ip port force -``` - -5. 确认主从关系建立成功之后,pika-migrate 同时向目标 Redis 转发数据。执行如下命令,查看主从同步延迟。可在主库写入⼀个特殊的 Key,然后在 Redis 侧查看是否可立即获取到该 Key,判断数据同步完毕。 -```shell -info Replication -``` diff --git a/tools/pika_migrate/conf/pika.conf b/tools/pika_migrate/conf/pika.conf index d1dd3f8831..ffcd0c1403 100644 --- a/tools/pika_migrate/conf/pika.conf +++ b/tools/pika_migrate/conf/pika.conf @@ -1,93 +1,298 @@ -# Pika port -port : 9222 -# Thread Number +########################### +# Pika configuration file # +########################### + +# Pika port, the default value is 9221. +# [NOTICE] Port Magic offsets of port+1000 / port+2000 are used by Pika at present. +# Port 10221 is used for Rsync, and port 11221 is used for Replication, while the listening port is 9221. +port : 9221 + +db-instance-num : 3 +rocksdb-ttl-second : 86400 * 7; +rocksdb-periodic-second : 86400 * 3; + +# Random value identifying the Pika server, its string length must be 40. +# If not set, Pika will generate a random string with a length of 40 random characters. +# run-id : + +# Master's run-id +# master-run-id : + +# The number of Net-worker threads in Pika. +# It's not recommended to set this value exceeds +# the number of CPU cores on the deployment server. thread-num : 1 -# Thread Pool Size + +# use Net worker thread to read redis Cache for [Get, HGet] command, +# which can significantly improve QPS and reduce latency when cache hit rate is high +# default value is "yes", set it to "no" if you wanna disable it +rtc-cache-read : yes + +# Size of the thread pool, The threads within this pool +# are dedicated to handling user requests. thread-pool-size : 12 -# Sync Thread Number + +# This parameter is used to control whether to separate fast and slow commands. +# When slow-cmd-pool is set to yes, fast and slow commands are separated. +# When set to no, they are not separated. +slow-cmd-pool : no + +# Size of the low level thread pool, The threads within this pool +# are dedicated to handling slow user requests. +slow-cmd-thread-pool-size : 1 + +# Size of the low level thread pool, The threads within this pool +# are dedicated to handling slow user requests. +admin-thread-pool-size : 2 + +# Slow cmd list e.g. hgetall, mset +slow-cmd-list : + +# List of commands considered as administrative. These commands will be handled by the admin thread pool. Modify this list as needed. +# Default commands: info, ping, monitor +# This parameter is only supported by the CONFIG GET command and not by CONFIG SET. +admin-cmd-list : info, ping, monitor + +# The number of threads to write DB in slaveNode when replicating. +# It's preferable to set slave's sync-thread-num value close to master's thread-pool-size. sync-thread-num : 6 -# Pika log path + +# The num of threads to write binlog in slaveNode when replicating, +# each DB cloud only bind to one sync-binlog-thread to write binlog in maximum +#[NOTICE] It's highly recommended to set sync-binlog-thread-num equal to conf item 'database'(then each DB cloud have a exclusive thread to write binlog), +# eg. if you use 8 DBs(databases_ is 8), sync-binlog-thread-num is preferable to be 8 +# Valid range of sync-binlog-thread-num is [1, databases], the final value of it is Min(sync-binlog-thread-num, databases) +sync-binlog-thread-num : 1 + +# Directory to store log files of Pika, which contains multiple types of logs, +# Including: INFO, WARNING, ERROR log, as well as binglog(write2fine) file which +# is used for replication. log-path : ./log/ -# Pika db path + +# log retention time of serverlogs(pika.{hostname}.{username}.log.{loglevel}.YYYYMMDD-HHMMSS) files that stored within log-path. +# Any serverlogs files that exceed this time will be cleaned up. +# The unit of serverlogs is in [days] and the default value is 7(days). +log-retention-time : 7 + +# Directory to store the data of Pika. db-path : ./db/ -# Pika write-buffer-size -write-buffer-size : 268435456 -# Pika timeout + +# The size of a single RocksDB memtable at the Pika's bottom layer(Pika use RocksDB to store persist data). +# [Tip] Big write-buffer-size can improve writing performance, +# but this will generate heavier IO load when flushing from buffer to disk, +# you should configure it based on you usage scenario. +# Supported Units [K|M|G], write-buffer-size default unit is in [bytes]. +write-buffer-size : 256M + +# The size of one block in arena memory allocation. +# If <= 0, a proper value is automatically calculated. +# (usually 1/8 of writer-buffer-size, rounded up to a multiple of 4KB) +# Supported Units [K|M|G], arena-block-size default unit is in [bytes]. +arena-block-size : + +# Timeout of Pika's connection, counting down starts When there are no requests +# on a connection (it enters sleep state), when the countdown reaches 0, the connection +# will be closed by Pika. +# [Tip] The issue of running out of Pika's connections may be avoided if this value +# is configured properly. +# The Unit of timeout is in [seconds] and its default value is 60(s). timeout : 60 -# Requirepass + +# The [password of administrator], which is empty by default. +# [NOTICE] If this admin password is the same as user password (including both being empty), +# the value of userpass will be ignored and all users are considered as administrators, +# in this scenario, users are not subject to the restrictions imposed by the userblacklist. +# PS: "user password" refers to value of the parameter below: userpass. requirepass : -# Masterauth + +# Password for replication verify, used for authentication when a slave +# connects to a master to request replication. +# [NOTICE] The value of this parameter must match the "requirepass" setting on the master. masterauth : -# Userpass -userpass : -# User Blacklist -userblacklist : -# if this option is set to 'classic', that means pika support multiple DB, in -# this mode, option databases enable -# if this option is set to 'sharding', that means pika support multiple Table, you -# can specify slot num for each table, in this mode, option default-slot-num enable -# Pika instance mode [classic | sharding] + +# The [password of user], which is empty by default. +# [NOTICE] If this user password is the same as admin password (including both being empty), +# the value of this parameter will be ignored and all users are considered as administrators, +# in this scenario, users are not subject to the restrictions imposed by the userblacklist. +# PS: "admin password" refers to value of the parameter above: requirepass. +# userpass : + +# The blacklist of commands for users that logged in by userpass, +# the commands that added to this list will not be available for users except for administrator. +# [Advice] It's recommended to add high-risk commands to this list. +# [Format] Commands should be separated by ",". For example: FLUSHALL, SHUTDOWN, KEYS, CONFIG +# By default, this list is empty. +# userblacklist : + +# Running Mode of Pika, The current version only supports running in "classic mode". +# If set to 'classic', Pika will create multiple DBs whose number is the value of configure item "databases". instance-mode : classic -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases' - 1, limited in [1, 8] + +# The number of databases when Pika runs in classic mode. +# The default database id is DB 0. You can select a different one on +# a per-connection by using SELECT. The db id range is [0, 'databases' value -1]. +# The value range of this parameter is [1, 8]. +# [NOTICE] It's RECOMMENDED to set sync-binlog-thread-num equal to DB num(databases), +# if you've changed the value of databases, remember to check if the value of sync-binlog-thread-num is proper. databases : 1 -# default slot number each table in sharding mode -default-slot-num : 1024 -# Dump Prefix + +# The number of followers of a master. Only [0, 1, 2, 3, 4] is valid at present. +# By default, this num is set to 0, which means this feature is [not enabled] +# and the Pika runs in standalone mode. +replication-num : 0 + +# consensus level defines the num of confirms(ACKs) the leader node needs to receive from +# follower nodes before returning the result to the client that sent the request. +# The [value range] of this parameter is: [0, ...replicaiton-num]. +# The default value of consensus-level is 0, which means this feature is not enabled. +consensus-level : 0 + +# The Prefix of dump file's name. +# All the files that generated by command "bgsave" will be name with this prefix. dump-prefix : -# daemonize [yes | no] + +# daemonize [yes | no]. #daemonize : yes -# Dump Path + +# The directory to stored dump files that generated by command "bgsave". dump-path : ./dump/ -# Expire-dump-days + +# TTL of dump files that generated by command "bgsave". +# Any dump files which exceed this TTL will be deleted. +# Unit of dump-expire is in [days] and the default value is 0(day), +# which means dump files never expire. dump-expire : 0 -# pidfile Path + +# Pid file Path of Pika. pidfile : ./pika.pid -# Max Connection + +# The Maximum number of Pika's Connection. maxclients : 20000 -# the per file size of sst to compact, defalut is 2M -target-file-size-base : 20971520 -# Expire-logs-days + +# The size of sst file in RocksDB(Pika is based on RocksDB). +# sst files are hierarchical, the smaller the sst file size, the higher the performance and the lower the merge cost, +# the price is that the number of sst files could be huge. On the contrary, the bigger the sst file size, the lower +# the performance and the higher the merge cost, while the number of files is fewer. +# Supported Units [K|M|G], target-file-size-base default unit is in [bytes] and the default value is 20M. +target-file-size-base : 20M + +# Expire-time of binlog(write2file) files that stored within log-path. +# Any binlog(write2file) files that exceed this expire time will be cleaned up. +# The unit of expire-logs-days is in [days] and the default value is 7(days). +# The [Minimum value] of this parameter is 1(day). expire-logs-days : 7 -# Expire-logs-nums + +# The maximum number of binlog(write2file) files. +# Once the total number of binlog files exceed this value, +# automatic cleaning will start to ensure the maximum number +# of binlog files is equal to expire-logs-nums. +# The [Minimum value] of this parameter is 10. expire-logs-nums : 10 -# Root-connection-num + +# The number of guaranteed connections for root user. +# This parameter guarantees that there are 2(By default) connections available +# for root user to log in Pika from 127.0.0.1, even if the maximum connection limit is reached. +# PS: The maximum connection refers to the parameter above: maxclients. +# The default value of root-connection-num is 2. root-connection-num : 2 + # Slowlog-write-errorlog slowlog-write-errorlog : no -# Slowlog-log-slower-than + +# The time threshold for slow log recording. +# Any command whose execution time exceeds this threshold will be recorded in pika-ERROR.log, +# which is stored in log-path. +# The unit of slowlog-log-slower-than is in [microseconds(μs)] and the default value is 10000 μs / 10 ms. slowlog-log-slower-than : 10000 + # Slowlog-max-len slowlog-max-len : 128 + # Pika db sync path db-sync-path : ./dbsync/ -# db sync speed(MB) max is set to 1024MB, min is set to 0, and if below 0 or above 1024, the value will be adjust to 1024 + +# The maximum Transmission speed during full synchronization. +# The exhaustion of network can be prevented by setting this parameter properly. +# The value range of this parameter is [1,1024] with unit in [MB/s]. +# [NOTICE] If this parameter is set to an invalid value(smaller than 0 or bigger than 1024), +# it will be automatically reset to 1024. +# The default value of db-sync-speed is -1 (1024MB/s). db-sync-speed : -1 -# The slave priority + +# The priority of slave node when electing new master node. +# The slave node with [lower] value of slave-priority will have [higher priority] to be elected as the new master node. +# This parameter is only used in conjunction with sentinel and serves no other purpose. +# The default value of slave-priority is 100. slave-priority : 100 -# network interface + +# Specify network interface that work with Pika. #network-interface : eth1 -# replication + +# The IP and port of the master node are specified by this parameter for +# replication between master and slaves. +# [Format] is "ip:port" , for example: "192.168.1.2:6666" indicates that +# the slave instances that configured with this value will automatically send +# SLAVEOF command to port 6666 of 192.168.1.2 after startup. +# This parameter should be configured on slave nodes. #slaveof : master-ip:master-port -# CronTask, format 1: start-end/ratio, like 02-04/60, pika will check to schedule compaction between 2 to 4 o'clock everyday -# if the freesize/disksize > 60%. -# format 2: week/start-end/ratio, like 3/02-04/60, pika will check to schedule compaction between 2 to 4 o'clock -# every wednesday, if the freesize/disksize > 60%. -# NOTICE: if compact-interval is set, compact-cron will be mask and disable. + +# Daily/Weekly Automatic full compaction task is configured by compact-cron. +# +# [Format-daily]: start time(hour)-end time(hour)/disk-free-space-ratio, +# example: with value of "02-04/60", Pika will perform full compaction task between 2:00-4:00 AM everyday if +# the disk-free-size / disk-size > 60%. +# +# [Format-weekly]: week/start time(hour)-end time(hour)/disk-free-space-ratio, +# example: with value of "3/02-04/60", Pika will perform full compaction task between 2:00-4:00 AM every Wednesday if +# the disk-free-size / disk-size > 60%. +# +# [Tip] Automatic full compaction is suitable for scenarios with multiple data structures +# and lots of items are expired or deleted, or key names are frequently reused. +# +# [NOTICE]: If compact-interval is set, compact-cron will be masked and disabled. # #compact-cron : 3/02-04/60 -# Compact-interval, format: interval/ratio, like 6/60, pika will check to schedule compaction every 6 hours, -# if the freesize/disksize > 60%. NOTICE:compact-interval is prior than compact-cron; + +# Automatic full synchronization task between a time interval is configured by compact-interval. +# [Format]: time interval(hour)/disk-free-space-ratio, example: "6/60", Pika will perform full compaction every 6 hours, +# if the disk-free-size / disk-size > 60%. +# [NOTICE]: compact-interval is prior than compact-cron. #compact-interval : -# server-id for hub -server-id : 1 -# the size of flow control window while sync binlog between master and slave.Default is 9000 and the maximum is 90000. +# The disable_auto_compactions option is [true | false] +disable_auto_compactions : false + +# Rocksdb max_subcompactions, increasing this value can accelerate the exec speed of a single compaction task +# it's recommended to increase it's value if large compaction is found in you instance +max-subcompactions : 1 +# The minimum disk usage ratio for checking resume. +# If the disk usage ratio is lower than min-check-resume-ratio, it will not check resume, only higher will check resume. +# Its default value is 0.7. +#min-check-resume-ratio : 0.7 + +# The minimum free disk space to trigger db resume. +# If the db has a background error, only the free disk size is larger than this configuration can trigger manually resume db. +# Its default value is 256MB. +# [NOTICE]: least-free-disk-resume-size should not smaller than write-buffer-size! +#least-free-disk-resume-size : 256M + +# Manually trying to resume db interval is configured by manually-resume-interval. +# If db has a background error, it will try to manually call resume() to resume db if satisfy the least free disk to resume. +# Its default value is 60 seconds. +#manually-resume-interval : 60 + +# This window-size determines the amount of data that can be transmitted in a single synchronization process. +# [Tip] In the scenario of high network latency. Increasing this size can improve synchronization efficiency. +# Its default value is 9000. the [maximum] value is 90000. sync-window-size : 9000 +# Maximum buffer size of a client connection. +# [NOTICE] Master and slaves must have exactly the same value for the max-conn-rbuf-size. +# Supported Units [K|M|G]. Its default unit is in [bytes] and its default value is 268435456(256MB). The value range is [64MB, 1GB]. +max-conn-rbuf-size : 268435456 + ################### ## Migrate Settings ################### @@ -99,46 +304,401 @@ target-redis-pwd : sync-batch-num : 100 redis-sender-num : 10 -################### -## Critical Settings -################### +#######################################################################E####### +#! Critical Settings !# +#######################################################################E####### + # write_binlog [yes | no] write-binlog : yes -# binlog file size: default is 100M, limited in [1K, 2G] + +# The size of binlog file, which can not be modified once Pika instance started. +# [NOTICE] Master and slaves must have exactly the same value for the binlog-file-size. +# The [value range] of binlog-file-size is [1K, 2G]. +# Supported Units [K|M|G], binlog-file-size default unit is in [bytes] and the default value is 100M. binlog-file-size : 104857600 -# Automatically triggers a small compaction according statistics + +# Automatically triggers a small compaction according to statistics # Use the cache to store up to 'max-cache-statistic-keys' keys -# if 'max-cache-statistic-keys' set to '0', that means turn off the statistics function -# it also doesn't automatically trigger a small compact feature +# If 'max-cache-statistic-keys' set to '0', that means turn off the statistics function +# and this automatic small compaction feature is disabled. max-cache-statistic-keys : 0 + # When 'delete' or 'overwrite' a specific multi-data structure key 'small-compaction-threshold' times, -# a small compact is triggered automatically, default is 5000, limited in [1, 100000] +# a small compact is triggered automatically if the small compaction feature is enabled. +# small-compaction-threshold default value is 5000 and the value range is [1, 100000]. small-compaction-threshold : 5000 -# If the total size of all live memtables of all the DBs exceeds -# the limit, a flush will be triggered in the next DB to which the next write -# is issued. +small-compaction-duration-threshold : 10000 + +# The maximum total size of all live memtables of the RocksDB instance that owned by Pika. +# Flushing from memtable to disk will be triggered if the actual memory usage of RocksDB +# exceeds max-write-buffer-size when next write operation is issued. +# [RocksDB-Basic-Tuning](https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning) +# Supported Units [K|M|G], max-write-buffer-size default unit is in [bytes]. max-write-buffer-size : 10737418240 -# Limit some command response size, like Scan, Keys* + +# The maximum number of write buffers(memtables) that are built up in memory for one ColumnFamily in DB. +# The default and the minimum number is 2. It means that Pika(RocksDB) will write to a write buffer +# when it flushes the data of another write buffer to storage. +# If max-write-buffer-num > 3, writing will be slowed down. +max-write-buffer-num : 2 + +# `min_write_buffer_number_to_merge` is the minimum number of memtables +# that need to be merged before placing the order. For example, if the +# option is set to 2, immutable memtables will only be flushed if there +# are two of them - a single immutable memtable will never be flushed. +# If multiple memtables are merged together, less data will be written +# to storage because the two updates are merged into a single key. However, +# each Get() must linearly traverse all unmodifiable memtables and check +# whether the key exists. Setting this value too high may hurt performance. +min-write-buffer-number-to-merge : 1 + +# The total size of wal files, when reaches this limit, rocksdb will force the flush of column-families +# whose memtables are backed by the oldest live WAL file. Also used to control the rocksdb open time when +# process restart. +max-total-wal-size : 1073741824 + +# rocksdb level0_stop_writes_trigger +level0-stop-writes-trigger : 36 + +# rocksdb level0_slowdown_writes_trigger +level0-slowdown-writes-trigger : 20 + +# rocksdb level0_file_num_compaction_trigger +level0-file-num-compaction-trigger : 4 + +# enable db statistics [yes | no] default no +enable-db-statistics : no +# see rocksdb/include/rocksdb/statistics.h enum StatsLevel for more details +# only use ticker counter should set db-statistics-level to 2 +db-statistics-level : 2 + +# The maximum size of the response package to client to prevent memory +# exhaustion caused by commands like 'keys *' and 'Scan' which can generate huge response. +# Supported Units [K|M|G]. The default unit is in [bytes]. max-client-response-size : 1073741824 -# Compression + +# The compression algorithm. You can not change it when Pika started. +# Supported types: [snappy, zlib, lz4, zstd]. If you do not wanna compress the SST file, please set its value as none. +# [NOTICE] The Pika official binary release just linking the snappy library statically, which means that +# you should compile the Pika from the source code and then link it with other compression algorithm library statically by yourself. compression : snappy -# max-background-flushes: default is 1, limited in [1, 4] -max-background-flushes : 1 -# max-background-compactions: default is 2, limited in [1, 8] -max-background-compactions : 2 -# maximum value of Rocksdb cached open file descriptors + +# if the vector size is smaller than the level number, the undefined lower level uses the +# last option in the configurable array, for example, for 3 level +# LSM tree the following settings are the same: +# configurable array: [none:snappy] +# LSM settings: [none:snappy:snappy] +# When this configurable is enabled, compression is ignored, +# default l0 l1 noCompression, l2 and more use `compression` option +# https://github.com/facebook/rocksdb/wiki/Compression +#compression_per_level : [none:none:snappy:lz4:lz4] + +# The number of rocksdb background threads(sum of max-background-compactions and max-background-flushes) +# If max-background-jobs has a valid value AND both 'max-background-flushs' and 'max-background-compactions' is set to -1, +# then max-background-flushs' and 'max-background-compactions will be auto config by rocksdb, specifically: +# 1/4 of max-background-jobs will be given to max-background-flushs' and the rest(3/4) will be given to 'max-background-compactions'. +# 'max-background-jobs' default value is 3 and the value range is [2, 12]. +max-background-jobs : 3 + +# The number of background flushing threads. +# max-background-flushes default value is -1 and the value range is [1, 4] or -1. +# if 'max-background-flushes' is set to -1, the 'max-background-compactions' should also be set to -1, +# which means let rocksdb to auto config them based on the value of 'max-background-jobs' +max-background-flushes : -1 + +# [NOTICE] you MUST NOT set one of the max-background-flushes or max-background-compactions to -1 while setting another one to other values(not -1). +# They SHOULD both be -1 or both not(if you want to config them manually). + +# The number of background compacting threads. +# max-background-compactions default value is -1 and the value range is [1, 8] or -1. +# if 'max-background-compactions' is set to -1, the 'max-background-flushes' should also be set to -1, +# which means let rocksdb to auto config them based on the value of 'max-background-jobs' +max-background-compactions : -1 + +# RocksDB delayed-write-rate, default is 0(infer from rate-limiter by RocksDB) +# Ref from rocksdb: Whenever stall conditions are triggered, RocksDB will reduce write rate to delayed_write_rate, +# and could possibly reduce write rate to even lower than delayed_write_rate if estimated pending compaction bytes accumulates. +# If the value is 0, RcoksDB will infer a value from `rater_limiter` value if it is not empty, or 16MB if `rater_limiter` is empty. +# Note that if users change the rate in `rate_limiter` after DB is opened, delayed_write_rate won't be adjusted. +# [Support Dynamically changeable] send 'config set delayed-write-rate' to a running pika can change it's value dynamically +delayed-write-rate : 0 + + +# RocksDB will try to limit number of bytes in one compaction to be lower than this max-compaction-bytes. +# But it's NOT guaranteed. +# default value is -1, means let it be 25 * target-file-size-base (Which is RocksDB's default value) +max-compaction-bytes : -1 + + +# maximum value of RocksDB cached open file descriptors max-cache-files : 5000 -# max_bytes_for_level_multiplier: default is 10, you can change it to 5 + +# The ratio between the total size of RocksDB level-(L+1) files and the total size of RocksDB level-L files for all L. +# Its default value is 10(x). You can also change it to 5(x). max-bytes-for-level-multiplier : 10 + +# slotmigrate is mainly used to migrate slots, usually we will set it to no. +# When you migrate slots, you need to set it to yes, and reload slotskeys before. +# slotmigrate [yes | no] +slotmigrate : no + +# slotmigrate thread num +slotmigrate-thread-num : 1 + +# thread-migrate-keys-num 1/8 of the write_buffer_size_ +thread-migrate-keys-num : 64 + # BlockBasedTable block_size, default 4k # block-size: 4096 + # block LRU cache, default 8M, 0 to disable +# Supported Units [K|M|G], default unit [bytes] # block-cache: 8388608 + +# num-shard-bits default -1, the number of bits from cache keys to be use as shard id. +# The cache will be sharded into 2^num_shard_bits shards. +# https://github.com/EighteenZi/rocksdb_wiki/blob/master/Block-Cache.md#lru-cache +# num-shard-bits: -1 + # whether the block cache is shared among the RocksDB instances, default is per CF # share-block-cache: no + +# The slot number of pika when used with codis. +default-slot-num : 1024 + +# enable-partitioned-index-filters [yes | no] +# When `cache-index-and-filter-blocks` is enabled, `pin_l0_filter_and_index_blocks_in_cache` +# and `cache-index-and-filter-blocks` is suggested to be enabled +# https://github.com/facebook/rocksdb/wiki/Partitioned-Index-Filters +# enable-partitioned-index-filters: default no + # whether or not index and filter blocks is stored in block cache # cache-index-and-filter-blocks: no + +# pin_l0_filter_and_index_blocks_in_cache [yes | no] +# When `cache-index-and-filter-blocks` is enabled, `pin_l0_filter_and_index_blocks_in_cache` is suggested to be enabled +# pin_l0_filter_and_index_blocks_in_cache : no + # when set to yes, bloomfilter of the last level will not be built # optimize-filters-for-hits: no # https://github.com/facebook/rocksdb/wiki/Leveled-Compaction#levels-target-size # level-compaction-dynamic-level-bytes: no + +################################## RocksDB Rate Limiter ####################### +# rocksdb rate limiter +# https://rocksdb.org/blog/2017/12/18/17-auto-tuned-rate-limiter.html +# https://github.com/EighteenZi/rocksdb_wiki/blob/master/Rate-Limiter.md +#######################################################################E####### + +# rate limiter mode +# 0: Read 1: Write 2: ReadAndWrite +# rate-limiter-mode : default 1 + +# rate limiter bandwidth, units in bytes, default 1024GB/s (No limit) +# [Support Dynamically changeable] send 'rate-limiter-bandwidth' to a running pika can change it's value dynamically +#rate-limiter-bandwidth : 1099511627776 + +#rate-limiter-refill-period-us : 100000 +# +#rate-limiter-fairness: 10 + +# if auto_tuned is true: Enables dynamic adjustment of rate limit within the range +#`[rate-limiter-bandwidth / 20, rate-limiter-bandwidth]`, according to the recent demand for background I/O. +# rate limiter auto tune https://rocksdb.org/blog/2017/12/18/17-auto-tuned-rate-limiter.html. the default value is true. +#rate-limiter-auto-tuned : true + +################################## RocksDB Blob Configure ##################### +# rocksdb blob configure +# https://rocksdb.org/blog/2021/05/26/integrated-blob-db.html +# wiki https://github.com/facebook/rocksdb/wiki/BlobDB +#######################################################################E####### + +# enable rocksdb blob, default no +# enable-blob-files : yes + +# values at or above this threshold will be written to blob files during flush or compaction. +# Supported Units [K|M|G], default unit is in [bytes]. +# min-blob-size : 4K + +# the size limit for blob files +# Supported Units [K|M|G], default unit is in [bytes]. +# blob-file-size : 256M + +# the compression type to use for blob files. All blobs in the same file are compressed using the same algorithm. +# Supported types: [snappy, zlib, lz4, zstd]. If you do not wanna compress the SST file, please set its value as none. +# [NOTICE] The Pika official binary release just link the snappy library statically, which means that +# you should compile the Pika from the source code and then link it with other compression algorithm library statically by yourself. +# blob-compression-type : lz4 + +# set this to open to make BlobDB actively relocate valid blobs from the oldest blob files as they are encountered during compaction. +# The value option is [yes | no] +# enable-blob-garbage-collection : no + +# the cutoff that the GC logic uses to determine which blob files should be considered “old“. +# This parameter can be tuned to adjust the trade-off between write amplification and space amplification. +# blob-garbage-collection-age-cutoff : 0.25 + +# if the ratio of garbage in the oldest blob files exceeds this threshold, +# targeted compactions are scheduled in order to force garbage collecting the blob files in question +# blob_garbage_collection_force_threshold : 1.0 + +# the Cache object to use for blobs, default not open +# blob-cache : 0 + +# blob-num-shard-bits default -1, the number of bits from cache keys to be use as shard id. +# The cache will be sharded into 2^blob-num-shard-bits shards. +# blob-num-shard-bits : -1 + +# Rsync Rate limiting configuration [Default value is 200MB/s] +# [USED BY SLAVE] The transmitting speed(Rsync Rate) In full replication is controlled BY SLAVE NODE, You should modify the throttle-bytes-per-second in slave's pika.conf if you wanna change the rsync rate limit. +# [Dynamic Change Supported] send command 'config set throttle-bytes-per-second new_value' to SLAVE NODE can dynamically adjust rsync rate during full sync(use config rewrite can persist the changes). +throttle-bytes-per-second : 207200000 +# Rsync timeout in full sync stage[Default value is 1000 ms], unnecessary retries will happen if this value is too small. +# [Dynamic Change Supported] similar to throttle-bytes-per-second, rsync-timeout-ms can be dynamically changed by configset command +# [USED BY SLAVE] Similar to throttle-bytes-per-second, you should change rsync-timeout-ms's value in slave's conf file if it is needed to adjust. +rsync-timeout-ms : 1000 +# The valid range for max-rsync-parallel-num is [1, 4]. +# If an invalid value is provided, max-rsync-parallel-num will automatically be reset to 4. +max-rsync-parallel-num : 4 + +# The synchronization mode of Pika primary/secondary replication is determined by ReplicationID. ReplicationID in one replication_cluster are the same +# replication-id : + +################### +## Cache Settings +################### +# the number of caches for every db +cache-num : 16 + +# cache-model 0:cache_none 1:cache_read +cache-model : 1 +# cache-type: string, set, zset, list, hash, bit +cache-type: string, set, zset, list, hash, bit + +# Maximum number of keys in the zset redis cache +# On the disk DB, a zset field may have many fields. In the memory cache, we limit the maximum +# number of keys that can exist in a zset, which is zset-zset-cache-field-num-per-key, with a +# default value of 512. +zset-cache-field-num-per-key : 512 + +# If the number of elements in a zset in the DB exceeds zset-cache-field-num-per-key, +# we determine whether to cache the first 512[zset-cache-field-num-per-key] elements +# or the last 512[zset-cache-field-num-per-key] elements in the zset based on zset-cache-start-direction. +# +# If zset-cache-start-direction is 0, cache the first 512[zset-cache-field-num-per-key] elements from the header +# If zset-cache-start-direction is -1, cache the last 512[zset-cache-field-num-per-key] elements +zset-cache-start-direction : 0 + + +# the cache maxmemory of every db, configuration 10G +cache-maxmemory : 10737418240 + +# cache-maxmemory-policy +# 0: volatile-lru -> Evict using approximated LRU among the keys with an expire set. +# 1: allkeys-lru -> Evict any key using approximated LRU. +# 2: volatile-lfu -> Evict using approximated LFU among the keys with an expire set. +# 3: allkeys-lfu -> Evict any key using approximated LFU. +# 4: volatile-random -> Remove a random key among the ones with an expire set. +# 5: allkeys-random -> Remove a random key, any key. +# 6: volatile-ttl -> Remove the key with the nearest expire time (minor TTL) +# 7: noeviction -> Don't evict anything, just return an error on write operations. +cache-maxmemory-policy : 1 + +# cache-maxmemory-samples +cache-maxmemory-samples: 5 + +# cache-lfu-decay-time +cache-lfu-decay-time: 1 + + +# is possible to manage access to Pub/Sub channels with ACL rules as well. The +# default Pub/Sub channels permission if new users is controlled by the +# acl-pubsub-default configuration directive, which accepts one of these values: +# +# allchannels: grants access to all Pub/Sub channels +# resetchannels: revokes access to all Pub/Sub channels +# +# acl-pubsub-default defaults to 'resetchannels' permission. +# acl-pubsub-default : resetchannels + +# ACL users are defined in the following format: +# user : ... acl rules ... +# +# For example: +# +# user : worker on >password ~key* +@all + +# Using an external ACL file +# +# Instead of configuring users here in this file, it is possible to use +# a stand-alone file just listing users. The two methods cannot be mixed: +# if you configure users here and at the same time you activate the external +# ACL file, the server will refuse to start. +# +# The format of the external ACL user file is exactly the same as the +# format that is used inside pika.conf to describe users. +# +# aclfile : ../conf/users.acl + +# (experimental) +# It is possible to change the name of dangerous commands in a shared environment. +# For instance the CONFIG command may be renamed into something Warning: To prevent +# data inconsistency caused by different configuration files, do not use the rename +# command to modify write commands on the primary and secondary servers. If necessary, +# ensure that the configuration files of the primary and secondary servers are consistent +# In addition, when using the command rename, you must not use "" to modify the command, +# for example, rename-command: FLUSHDB "360flushdb" is incorrect; instead, use +# rename-command: FLUSHDB 360flushdb is correct. After the rename command is executed, +# it is most appropriate to use a numeric string with uppercase or lowercase letters +# for example: rename-command : FLUSHDB joYAPNXRPmcarcR4ZDgC81TbdkSmLAzRPmcarcR +# Warning: Currently only applies to flushdb, slaveof, bgsave, shutdown, config command +# Warning: Ensure that the Settings of rename-command on the master and slave servers are consistent +# +# Example: +# rename-command : FLUSHDB 360flushdb + +# [You can ignore this item] +# This is NOT a regular conf item, it is a internal used metric that relies on pika.conf for persistent storage. +# 'internal-used-unfinished-full-sync' is used to generate a metric 'is_eligible_for_master_election' +# which serves for the scenario of codis-pika cluster reelection +# You'd better [DO NOT MODIFY IT UNLESS YOU KNOW WHAT YOU ARE DOING] +internal-used-unfinished-full-sync : + +# for wash data from 4.0.0 to 4.0.1 +# https://github.com/OpenAtomFoundation/pika/issues/2886 +# default value: true +wash-data: true + +# Pika automatic compact compact strategy, a complement to rocksdb compact. +# Trigger the compact background task periodically according to `compact-interval` +# Can choose `full-compact` or `obd-compact`. +# obd-compact https://github.com/OpenAtomFoundation/pika/issues/2255 +compaction-strategy : obd-compact + +# For OBD_Compact +# According to the number of sst files in rocksdb, +# compact every `compact-every-num-of-files` file. +compact-every-num-of-files : 10 + +# For OBD_Compact +# In another search, if the file creation time is +# greater than `force-compact-file-age-seconds`, +# a compaction of the upper and lower boundaries +# of the file will be performed at the same time +# `compact-every-num-of-files` -1 +force-compact-file-age-seconds : 300 + +# For OBD_Compact +# According to the number of sst files in rocksdb, +# compact every `compact-every-num-of-files` file. +force-compact-min-delete-ratio : 10 + +# For OBD_Compact +# According to the number of sst files in rocksdb, +# compact every `compact-every-num-of-files` file. +dont-compact-sst-created-in-seconds : 20 + +# For OBD_Compact +# According to the number of sst files in rocksdb, +# compact every `compact-every-num-of-files` file. +best-delete-min-ratio : 10 \ No newline at end of file diff --git a/tools/pika_migrate/detect_environment b/tools/pika_migrate/detect_environment deleted file mode 100755 index a316ec02da..0000000000 --- a/tools/pika_migrate/detect_environment +++ /dev/null @@ -1,112 +0,0 @@ -#!/bin/sh - -OUTPUT=$1 -if test -z "$OUTPUT"; then - echo "usage: $0 " >&2 - exit 1 -fi - -# Delete existing output, if it exists -rm -f "$OUTPUT" -touch "$OUTPUT" - -if test -z "$CXX"; then - CXX=g++ -fi - -# Test whether tcmalloc is available -if echo 'int main() {}' | $CXX $CFLAGS -x c++ - -o /dev/null \ - -ltcmalloc 2>/dev/null; then - TCMALLOC_LDFLAGS=" -ltcmalloc" -fi - -# Test whether malloc_extension is available -$CXX $CFLAGS -x c++ - -o /dev/null -ltcmalloc 2>/dev/null < - int main() { - MallocExtension::instance()->Initialize();; - return 0; - } -EOF -if [ "$?" = 0 ]; then - TCMALLOC_EXTENSION_FLAGS=" -DTCMALLOC_EXTENSION" -fi - -# Test whether Snappy library is installed -# http://code.google.com/p/snappy/ -$CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null < - int main() {} -EOF -if [ "$?" = 0 ]; then - ROCKSDB_LDFLAGS="$ROCKSDB_LDFLAGS -lsnappy" -fi - -# Test whether gflags library is installed -# http://gflags.github.io/gflags/ -# check if the namespace is gflags -$CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null << EOF - #include - using namespace gflags; - int main() {} -EOF -if [ "$?" = 0 ]; then - ROCKSDB_LDFLAGS="$ROCKSDB_LDFLAGS -lgflags" -else - # check if namespace is google - $CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null << EOF - #include - using namespace google; - int main() {} -EOF -if [ "$?" = 0 ]; then - ROCKSDB_LDFLAGS="$ROCKSDB_LDFLAGS -lgflags" -fi -fi - -# Test whether zlib library is installed -$CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null < - int main() {} -EOF -if [ "$?" = 0 ]; then - ROCKSDB_LDFLAGS="$ROCKSDB_LDFLAGS -lz" -fi - -# Test whether bzip library is installed -$CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null < - int main() {} -EOF -if [ "$?" = 0 ]; then - ROCKSDB_LDFLAGS="$ROCKSDB_LDFLAGS -lbz2" -fi - -# Test whether lz4 library is installed -$CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null < - #include - int main() {} -EOF -if [ "$?" = 0 ]; then - ROCKSDB_LDFLAGS="$ROCKSDB_LDFLAGS -llz4" -fi - -# Test whether zstd library is installed -$CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null < - int main() {} -EOF -if [ "$?" = 0 ]; then - ROCKSDB_LDFLAGS="$ROCKSDB_LDFLAGS -lzstd" -fi - - - -# Test processor nums -PROCESSOR_NUMS=$(cat /proc/cpuinfo | grep processor | wc -l) - -echo "ROCKSDB_LDFLAGS=$ROCKSDB_LDFLAGS" >> "$OUTPUT" -echo "TCMALLOC_EXTENSION_FLAGS=$TCMALLOC_EXTENSION_FLAGS" >> "$OUTPUT" -echo "TCMALLOC_LDFLAGS=$TCMALLOC_LDFLAGS" >> "$OUTPUT" -echo "PROCESSOR_NUMS=$PROCESSOR_NUMS" >> "$OUTPUT" diff --git a/tools/pika_migrate/img.png b/tools/pika_migrate/img.png deleted file mode 100644 index 756bfa2948..0000000000 Binary files a/tools/pika_migrate/img.png and /dev/null differ diff --git a/tools/pika_migrate/include/acl.h b/tools/pika_migrate/include/acl.h new file mode 100644 index 0000000000..77bd5ba8a3 --- /dev/null +++ b/tools/pika_migrate/include/acl.h @@ -0,0 +1,435 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_ACL_H +#define PIKA_ACL_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "pika_command.h" +#include "pstd_status.h" + +static const int USER_COMMAND_BITS_COUNT = 1024; + +enum class AclSelectorFlag { + ROOT = (1 << 0), // This is the root user permission selector + ALL_KEYS = (1 << 1), // The user can mention any key + ALL_COMMANDS = (1 << 2), // The user can run all commands + ALL_CHANNELS = (1 << 3), // The user can mention any Pub/Sub channel +}; + +enum class AclCategory { + KEYSPACE = (1ULL << 0), + READ = (1ULL << 1), + WRITE = (1ULL << 2), + SET = (1ULL << 3), + SORTEDSET = (1ULL << 4), + LIST = (1ULL << 5), + HASH = (1ULL << 6), + STRING = (1ULL << 7), + BITMAP = (1ULL << 8), + HYPERLOGLOG = (1ULL << 9), + GEO = (1ULL << 10), + STREAM = (1ULL << 11), + PUBSUB = (1ULL << 12), + ADMIN = (1ULL << 13), + FAST = (1ULL << 14), + SLOW = (1ULL << 15), + BLOCKING = (1ULL << 16), + DANGEROUS = (1ULL << 17), + CONNECTION = (1ULL << 18), + TRANSACTION = (1ULL << 19), + SCRIPTING = (1ULL << 20), +}; + +enum class AclUserFlag { + ENABLED = (1 << 0), // The user is active + DISABLED = (1 << 1), // The user is disabled + NO_PASS = (1 << 2), /* The user requires no password, any provided password will work. For the + default user, this also means that no AUTH is needed, and every + connection is immediately authenticated. */ +}; + +enum class AclDeniedCmd { OK, CMD, KEY, CHANNEL, NUMBER, NO_SUB_CMD, NO_AUTH }; + +enum class AclLogCtx { + TOPLEVEL, + MULTI, + LUA, +}; + +// ACL key permission types +enum class AclPermission { + READ = (1 << 0), + WRITE = (1 << 1), + ALL = (READ | WRITE), +}; + +struct AclKeyPattern { + void ToString(std::string* str) { + if (flags & static_cast(AclPermission::ALL)) { + str->append("~"); + } else if (flags & static_cast(AclPermission::WRITE)) { + str->append("%W~"); + } else if (flags & static_cast(AclPermission::READ)) { + str->append("%R~"); + } + str->append(pattern); + } + + uint32_t flags; /* The CMD_KEYS_* flags for this key pattern */ + std::string pattern; /* The pattern to match keys against */ +}; + +class ACLLogEntry { + public: + ACLLogEntry() = delete; + ACLLogEntry(int32_t reason, int32_t context, const std::string& object, const std::string& username, int64_t ctime, + const std::string& cinfo) + : count_(1), + reason_(reason), + context_(context), + object_(object), + username_(username), + ctime_(ctime), + cinfo_(cinfo) {} + + bool Match(int32_t reason, int32_t context, int64_t ctime, const std::string& object, const std::string& username); + + void AddEntry(const std::string& cinfo, u_int64_t ctime); + + void GetReplyInfo(std::vector* vector); + + private: + uint64_t count_; + int32_t reason_; + int32_t context_; + std::string object_; + std::string username_; + int64_t ctime_; + std::string cinfo_; +}; + +class User; +class Acl; + +class AclSelector { + friend User; + + public: + explicit AclSelector() : AclSelector(0) {}; + explicit AclSelector(uint32_t flag); + explicit AclSelector(const AclSelector& selector); + ~AclSelector() = default; + + inline uint32_t Flags() const { return flags_; }; + inline bool HasFlags(uint32_t flag) const { return flags_ & flag; }; + inline void AddFlags(uint32_t flag) { flags_ |= flag; }; + inline void DecFlags(uint32_t flag) { flags_ &= ~flag; }; + bool EqualChannel(const std::vector& allChannel); + + private: + pstd::Status SetSelector(const std::string& op); + + pstd::Status SetSelectorFromOpSet(const std::string& opSet); + + void ACLDescribeSelector(std::string* str); + + void ACLDescribeSelector(std::vector& vector); + + AclDeniedCmd CheckCanExecCmd(std::shared_ptr& cmd, int8_t subCmdIndex, const std::vector& keys, + std::string* errKey); + + bool SetSelectorCommandBitsForCategory(const std::string& categoryName, bool allow); + void SetAllCommandSelector(); + void RestAllCommandSelector(); + + void InsertKeyPattern(const std::string& str, uint32_t flags); + + void InsertChannel(const std::string& str); + + void ChangeSelector(const Cmd* cmd, bool allow); + void ChangeSelector(const std::shared_ptr& cmd, bool allow); + pstd::Status ChangeSelector(const std::shared_ptr& cmd, const std::string& subCmd, bool allow); + + void SetSubCommand(uint32_t cmdId); + void SetSubCommand(uint32_t cmdId, uint32_t subCmdIndex); + void ResetSubCommand(); + void ResetSubCommand(uint32_t cmdId); + void ResetSubCommand(uint32_t cmdId, uint32_t subCmdIndex); + + bool CheckSubCommand(uint32_t cmdId, uint32_t subCmdIndex); + + void DescribeSelectorCommandRules(std::string* str); + + // process acl command op, and sub command + pstd::Status SetCommandOp(const std::string& op, bool allow); + + // when modify command, do update Selector commandRule string + void UpdateCommonRule(const std::string& rule, bool allow); + + // remove rule string from Selector commandRule + void RemoveCommonRule(const std::string& rule); + + // clean commandRule + void CleanCommandRule(); + + bool CheckKey(const std::string& key, const uint32_t cmdFlag); + + bool CheckChannel(const std::string& key, bool isPattern); + + uint32_t flags_; // See SELECTOR_FLAG_* + + /* The bit in allowed_commands is set if this user has the right to + * execute this command.*/ + std::bitset allowedCommands_; + + // record subcommands,key is commandId,value subCommand bit index + std::map subCommand_; + + /* A list of allowed key patterns. If this field is empty the user cannot mention any key in a command, + * unless the flag ALLKEYS is set in the user. */ + std::list> patterns_; + + /* A list of allowed Pub/Sub channel patterns. If this field is empty the user cannot mention any + * channel in a `PUBLISH` or [P][UNSUBSCRIBE] command, unless the flag ALLCHANNELS is set in the user. */ + std::list channels_; + + /* A string representation of the ordered categories and commands, this + * is used to regenerate the original ACL string for display. + */ + std::string commandRules_; +}; + +// acl user +class User { + friend Acl; + + public: + User() = delete; + explicit User(std::string name); + explicit User(const User& user); + ~User() = default; + + std::string Name() const; + // inline uint32_t Flags() const { return flags_; }; + inline bool HasFlags(uint32_t flag) const { return flags_ & flag; }; + inline void AddFlags(uint32_t flag) { flags_ |= flag; }; + inline void DecFlags(uint32_t flag) { flags_ &= ~flag; }; + + void CleanAclString(); + + /** + * store a password + * A lock is required before the call + * @param password + */ + void AddPassword(const std::string& password); + + /** + * delete a stored password + * A lock is required before the call + * @param password + */ + void RemovePassword(const std::string& password); + + // clean the user password + // A lock is required before the call + void CleanPassword(); + + // Add a selector to the user + // A lock is required before the call + void AddSelector(const std::shared_ptr& selector); + + // Set rule for user based on given parameters + // Use this function to handle it because it allows locking specified users + pstd::Status SetUser(const std::vector& rules); + + // Set the user rule with the given string + // A lock is required before the call + pstd::Status SetUser(const std::string& op); + + pstd::Status CreateSelectorFromOpSet(const std::string& opSet); + + // Get the user default selector + // A lock is required before the call + std::shared_ptr GetRootSelector(); + + void DescribeUser(std::string* str); + + // match the user password, when do auth, + // if match,return true, else return false + bool MatchPassword(const std::string& password); + + // handle Cmd Acl|get + void GetUserDescribe(CmdRes* res); + + // Get the user Channel key + // A lock is required before the call + std::vector AllChannelKey(); + + // check the user can exec the cmd + AclDeniedCmd CheckUserPermission(std::shared_ptr& cmd, const PikaCmdArgsType& argv, int8_t& subCmdIndex, + std::string* errKey); + + private: + mutable std::shared_mutex mutex_; + + const std::string name_; // The username + + std::atomic flags_ = static_cast(AclUserFlag::DISABLED); // See USER_FLAG_* + + std::set passwords_; // passwords for this user + + std::list> selectors_; /* A set of selectors this user validates commands + against. This list will always contain at least + one selector for backwards compatibility. */ + + std::string aclString_; /* cached string represent of ACLs */ +}; + +class Acl { + friend User; + friend AclSelector; + + public: + explicit Acl() = default; + ~Acl() = default; + + /** + * Initialization all acl + * @return + */ + pstd::Status Initialization(); + + /** + * create acl default user + * @return + */ + std::shared_ptr CreateDefaultUser(); + + std::shared_ptr CreatedUser(const std::string& name); + + /** + * Set user properties according to the string "op". + * @param op acl rule string + */ + pstd::Status SetUser(const std::string& userName, std::vector& op); + + /** + * get user from users_ map + * @param userName + * @return + */ + std::shared_ptr GetUser(const std::string& userName); + + std::shared_ptr GetUserLock(const std::string& userName); + + /** + * store a user to users_ map + * @param user + */ + void AddUser(const std::shared_ptr& user); + + void AddUserLock(const std::shared_ptr& user); + + // bo user auth, pass not is sha256 + std::shared_ptr Auth(const std::string& userName, const std::string& password); + + // get all user + std::vector Users(); + + void DescribeAllUser(std::vector* content); + + // save acl rule to file + pstd::Status SaveToFile(); + + // delete a user from users + std::set DeleteUser(const std::vector& userNames); + + // reload User from acl file, whe exec acl|load command + pstd::Status LoadUserFromFile(std::set* toUnAuthUsers); + + void UpdateDefaultUserPassword(const std::string& pass); + + void InitLimitUser(const std::string& bl, bool limit_exist); + + // After the user channel is modified, determine whether the current channel needs to be disconnected + void KillPubsubClientsIfNeeded(const std::shared_ptr& origin, const std::shared_ptr& newUser); + + // check the user can be exec the command, after exec command + // bool CheckUserCanExec(const std::shared_ptr& cmd, const PikaCmdArgsType& argv); + + // Gets the value of the classification based on the cmd classification name + static uint32_t GetCommandCategoryFlagByName(const std::string& name); + + // Obtain the corresponding name based on category + static std::string GetCommandCategoryFlagByName(const uint32_t category); + + static std::vector GetAllCategoryName(); + + static const std::string DefaultUser; + static const std::string DefaultLimitUser; + static const int64_t LogGroupingMaxTimeDelta; + + // Adds a new entry in the ACL log, making sure to delete the old entry + // if we reach the maximum length allowed for the log. + void AddLogEntry(int32_t reason, int32_t context, const std::string& username, const std::string& object, + const std::string& cInfo); + + void GetLog(long count, CmdRes* res); + void ResetLog(); + + private: + /** + * This function is called once the server is already running,we are ready to start, + * in order to load the ACLs either from the pending list of users defined in redis.conf, + * or from the ACL file.The function will just exit with an error if the user is trying to mix + * both the loading methods. + */ + pstd::Status LoadUsersAtStartup(); + + /** + * Loads the ACL from the specified filename: every line + * is validated and should be either empty or in the format used to specify + * users in the pika.conf configuration or in the ACL file, that is: + * + * user ... rules ... + * + * @param users pika.conf users rule + */ + pstd::Status LoadUserConfigured(std::vector& users); + + /** + * Load ACL from acl rule file + * @param fileName file full name + */ + pstd::Status LoadUserFromFile(const std::string& fileName); + + void ACLMergeSelectorArguments(std::vector& argv, std::vector* merged); + mutable std::shared_mutex mutex_; + + static std::array, 21> CommandCategories; + + static std::array, 3> UserFlags; + + static std::array, 3> SelectorFlags; + + std::map> users_; + + std::list> logEntries_; +}; + +#endif // PIKA_ACL_H diff --git a/tools/pika_migrate/include/migrator_thread.h b/tools/pika_migrate/include/migrator_thread.h index ed4141f90b..6be816a4a4 100644 --- a/tools/pika_migrate/include/migrator_thread.h +++ b/tools/pika_migrate/include/migrator_thread.h @@ -2,15 +2,17 @@ #define MIGRATOR_THREAD_H_ #include +#include -#include "pink/include/redis_cli.h" +#include "storage/storage.h" +#include "net/include/redis_cli.h" #include "include/pika_sender.h" -class MigratorThread : public pink::Thread { +class MigratorThread : public net::Thread { public: - MigratorThread(void *db, std::vector *senders, int type, int thread_num) : - db_(db), + MigratorThread(std::shared_ptr storage_, std::vector> *senders, int type, int thread_num) : + storage_(storage_), should_exit_(false), senders_(senders), type_(type), @@ -22,17 +24,17 @@ class MigratorThread : public pink::Thread { virtual ~ MigratorThread(); int64_t num() { - slash::MutexLock l(&num_mutex_); + std::lock_guard l(num_mutex_); return num_; } void Stop() { - should_exit_ = true; + should_exit_ = true; } private: void PlusNum() { - slash::MutexLock l(&num_mutex_); + std::lock_guard l(num_mutex_); ++num_; } @@ -44,20 +46,21 @@ class MigratorThread : public pink::Thread { void MigrateHashesDB(); void MigrateSetsDB(); void MigrateZsetsDB(); + void MigrateStreamsDB(); virtual void *ThreadMain(); private: - void* db_; + std::shared_ptr storage_; bool should_exit_; - std::vector *senders_; + std::vector> *senders_; int type_; int thread_num_; int thread_index_; int64_t num_; - slash::Mutex num_mutex_; + std::mutex num_mutex_; }; #endif diff --git a/tools/pika_migrate/include/pika_acl.h b/tools/pika_migrate/include/pika_acl.h new file mode 100644 index 0000000000..8d830581f8 --- /dev/null +++ b/tools/pika_migrate/include/pika_acl.h @@ -0,0 +1,48 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +// pika ACL command +#ifndef PIKA_ACL_CMD_H +#define PIKA_ACL_CMD_H + +#include "include/pika_command.h" +#include "include/pika_server.h" + +extern PikaServer* g_pika_server; + +class PikaAclCmd : public Cmd { + public: + PikaAclCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) { + subCmdName_ = {"cat", "deluser", "dryrun", "genpass", "getuser", "list", "load", + "log", "save", "setuser", "users", "whoami", "help"}; + } + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new PikaAclCmd(*this); } + + private: + void DoInitial() override; + void Clear() override {} + + void Cat(); + void DelUser(); + void DryRun(); + void GenPass(); + void GetUser(); + void List(); + void Load(); + void Log(); + void Save(); + void SetUser(); + void Users(); + void WhoAmI(); + void Help(); + + std::string subCmd_; +}; + +#endif // PIKA_ACL_CMD_H diff --git a/tools/pika_migrate/include/pika_admin.h b/tools/pika_migrate/include/pika_admin.h index c8484f7e6e..1b1aa1bad3 100644 --- a/tools/pika_migrate/include/pika_admin.h +++ b/tools/pika_migrate/include/pika_admin.h @@ -6,34 +6,39 @@ #ifndef PIKA_ADMIN_H_ #define PIKA_ADMIN_H_ -#include -#include #include +#include #include +#include +#include +#include +#include +#include -#include "blackwidow/blackwidow.h" - +#include "include/acl.h" #include "include/pika_command.h" +#include "storage/storage.h" +#include "pika_db.h" /* * Admin */ class SlaveofCmd : public Cmd { public: - SlaveofCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), is_noone_(false) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlaveofCmd(*this); - } + SlaveofCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlaveofCmd(*this); } private: std::string master_ip_; - int64_t master_port_; - bool is_noone_; - virtual void DoInitial() override; - virtual void Clear() { - is_noone_ = false; + int64_t master_port_ = -1; + bool is_none_ = false; + void DoInitial() override; + void Clear() override { + is_none_ = false; master_ip_.clear(); master_port_ = 0; } @@ -41,174 +46,203 @@ class SlaveofCmd : public Cmd { class DbSlaveofCmd : public Cmd { public: - DbSlaveofCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new DbSlaveofCmd(*this); - } + DbSlaveofCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new DbSlaveofCmd(*this); } private: std::string db_name_; - bool force_sync_; - bool is_noone_; - bool have_offset_; - int64_t filenum_; - int64_t offset_; - virtual void DoInitial() override; - virtual void Clear() { + bool force_sync_ = false; + bool is_none_ = false; + bool have_offset_ = false; + int64_t filenum_ = 0; + int64_t offset_ = 0; + void DoInitial() override; + void Clear() override { db_name_.clear(); force_sync_ = false; - is_noone_ = false; + is_none_ = false; have_offset_ = false; } }; class AuthCmd : public Cmd { public: - AuthCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new AuthCmd(*this); - } + AuthCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new AuthCmd(*this); } private: - std::string pwd_; - virtual void DoInitial() override; + void DoInitial() override; }; class BgsaveCmd : public Cmd { public: - BgsaveCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new BgsaveCmd(*this); - } + BgsaveCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new BgsaveCmd(*this); } private: - virtual void DoInitial() override; - virtual void Clear() { - bgsave_tables_.clear(); - } - std::set bgsave_tables_; + void DoInitial() override; + void Clear() override { bgsave_dbs_.clear(); } + std::set bgsave_dbs_; }; class CompactCmd : public Cmd { public: - CompactCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new CompactCmd(*this); - } + CompactCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new CompactCmd(*this); } private: - virtual void DoInitial() override; - virtual void Clear() { - struct_type_.clear(); - compact_tables_.clear(); + void DoInitial() override; + void Clear() override { + compact_dbs_.clear(); } - std::string struct_type_; - std::set compact_tables_; + std::set compact_dbs_; +}; + +// we can use pika/tests/helpers/test_queue.py to test this command +class CompactRangeCmd : public Cmd { + public: + CompactRangeCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new CompactRangeCmd(*this); } + + private: + void DoInitial() override; + void Clear() override { + compact_dbs_.clear(); + start_key_.clear(); + end_key_.clear(); + } + std::set compact_dbs_; + std::string start_key_; + std::string end_key_; }; class PurgelogstoCmd : public Cmd { public: - PurgelogstoCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), num_(0) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PurgelogstoCmd(*this); - } + PurgelogstoCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PurgelogstoCmd(*this); } private: - uint32_t num_; - std::string table_; - virtual void DoInitial() override; + uint32_t num_ = 0; + std::string db_; + void DoInitial() override; }; class PingCmd : public Cmd { public: - PingCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PingCmd(*this); - } + PingCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PingCmd(*this); } private: - virtual void DoInitial() override; + void DoInitial() override; }; class SelectCmd : public Cmd { public: - SelectCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SelectCmd(*this); - } + SelectCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SelectCmd(*this); } private: - virtual void DoInitial() override; - virtual void Clear() { - table_name_.clear(); - } - std::string table_name_; + void DoInitial() override; + void Clear() override { db_name_.clear(); } + std::string db_name_; }; class FlushallCmd : public Cmd { public: - FlushallCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new FlushallCmd(*this); - } + FlushallCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)) {} + void Do() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new FlushallCmd(*this); } + bool FlushAllWithoutLock(); + void DoBinlog() override; + void DoBinlogByDB(const std::shared_ptr& sync_db); private: - virtual void DoInitial() override; - virtual std::string ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) override; + void DoInitial() override; + bool DoWithoutLock(std::shared_ptr db); + void DoFlushCache(std::shared_ptr db); + void Clear() override { flushall_succeed_ = false; } + std::string ToRedisProtocol() override; + + bool flushall_succeed_{false}; }; class FlushdbCmd : public Cmd { public: - FlushdbCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new FlushdbCmd(*this); - } + FlushdbCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)) {} + // The flush command belongs to the write categories, so the key cannot be empty + std::vector current_key() const override { return {""}; } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new FlushdbCmd(*this); } + std::string GetFlushDBname() { return db_name_; } + void DoBinlog() override; + bool DoWithoutLock(); private: - std::string db_name_; - virtual void DoInitial() override; - virtual void Clear() { + void DoInitial() override; + void Clear() override { db_name_.clear(); + flush_succeed_ = false; } + + bool flush_succeed_{false}; + std::string db_name_; }; class ClientCmd : public Cmd { public: - ClientCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); + ClientCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) { + subCmdName_ = {"getname", "setname", "list", "addr", "kill"}; + } + void Do() override; const static std::string CLIENT_LIST_S; const static std::string CLIENT_KILL_S; - virtual Cmd* Clone() override { - return new ClientCmd(*this); - } + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new ClientCmd(*this); } private: - std::string operation_, info_; - virtual void DoInitial() override; + const static std::string KILLTYPE_NORMAL; + const static std::string KILLTYPE_PUBSUB; + + std::string operation_, info_, kill_type_; + void DoInitial() override; }; class InfoCmd : public Cmd { @@ -224,24 +258,25 @@ class InfoCmd : public Cmd { kInfoKeyspace, kInfoLog, kInfoData, + kInfoRocksDB, kInfo, kInfoAll, - kInfoDebug + kInfoDebug, + kInfoCommandStats, + kInfoCache }; - - InfoCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), rescan_(false), off_(false) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new InfoCmd(*this); - } + InfoCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new InfoCmd(*this); } + void Execute() override; private: InfoSection info_section_; - bool rescan_; //whether to rescan the keyspace - bool off_; - std::set keyspace_scan_tables_; - + bool rescan_ = false; // whether to rescan the keyspace + bool off_ = false; + std::set keyspace_scan_dbs_; const static std::string kInfoSection; const static std::string kAllSection; const static std::string kServerSection; @@ -252,13 +287,16 @@ class InfoCmd : public Cmd { const static std::string kReplicationSection; const static std::string kKeyspaceSection; const static std::string kDataSection; + const static std::string kRocksDBSection; const static std::string kDebugSection; + const static std::string kCommandStatsSection; + const static std::string kCacheSection; - virtual void DoInitial() override; - virtual void Clear() { + void DoInitial() override; + void Clear() override { rescan_ = false; off_ = false; - keyspace_scan_tables_.clear(); + keyspace_scan_dbs_.clear(); } void InfoServer(std::string& info); @@ -266,141 +304,157 @@ class InfoCmd : public Cmd { void InfoStats(std::string& info); void InfoExecCount(std::string& info); void InfoCPU(std::string& info); - void InfoShardingReplication(std::string& info); void InfoReplication(std::string& info); void InfoKeyspace(std::string& info); void InfoData(std::string& info); + void InfoRocksDB(std::string& info); void InfoDebug(std::string& info); + void InfoCommandStats(std::string& info); + void InfoCache(std::string& info, std::shared_ptr db); + + std::string CacheStatusToString(int status); }; class ShutdownCmd : public Cmd { public: - ShutdownCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ShutdownCmd(*this); - } + ShutdownCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new ShutdownCmd(*this); } private: - virtual void DoInitial() override; + void DoInitial() override; }; class ConfigCmd : public Cmd { public: - ConfigCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ConfigCmd(*this); + ConfigCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) { + subCmdName_ = {"get", "set", "rewrite", "resetstat"}; } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new ConfigCmd(*this); } + void Execute() override; private: std::vector config_args_v_; - virtual void DoInitial() override; - void ConfigGet(std::string &ret); - void ConfigSet(std::string &ret); - void ConfigRewrite(std::string &ret); - void ConfigResetstat(std::string &ret); + void DoInitial() override; + void ConfigGet(std::string& ret); + void ConfigSet(std::shared_ptr db); + void ConfigRewrite(std::string& ret); + void ConfigResetstat(std::string& ret); + void ConfigRewriteReplicationID(std::string& ret); }; class MonitorCmd : public Cmd { public: - MonitorCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new MonitorCmd(*this); - } + MonitorCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new MonitorCmd(*this); } private: - virtual void DoInitial() override; + void DoInitial() override; }; class DbsizeCmd : public Cmd { public: - DbsizeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new DbsizeCmd(*this); - } + DbsizeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new DbsizeCmd(*this); } private: - virtual void DoInitial() override; + void DoInitial() override; }; class TimeCmd : public Cmd { public: - TimeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new TimeCmd(*this); - } + TimeCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new TimeCmd(*this); } + + private: + void DoInitial() override; +}; + +class LastsaveCmd : public Cmd { + public: + LastsaveCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new LastsaveCmd(*this); } private: - virtual void DoInitial() override; + void DoInitial() override; }; class DelbackupCmd : public Cmd { public: - DelbackupCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new DelbackupCmd(*this); - } + DelbackupCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new DelbackupCmd(*this); } private: - virtual void DoInitial() override; + void DoInitial() override; }; class EchoCmd : public Cmd { public: - EchoCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new EchoCmd(*this); - } + EchoCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Merge() override{}; + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + Cmd* Clone() override { return new EchoCmd(*this); } private: std::string body_; - virtual void DoInitial() override; + void DoInitial() override; }; class ScandbCmd : public Cmd { public: - ScandbCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), type_(blackwidow::kAll) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ScandbCmd(*this); - } + ScandbCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new ScandbCmd(*this); } private: - blackwidow::DataType type_; - virtual void DoInitial() override; - virtual void Clear() { - type_ = blackwidow::kAll; - } + storage::DataType type_ = storage::DataType::kAll; + void DoInitial() override; + void Clear() override { type_ = storage::DataType::kAll; } }; class SlowlogCmd : public Cmd { public: - enum SlowlogCondition{kGET, kLEN, kRESET}; - SlowlogCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), condition_(kGET) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlowlogCmd(*this); - } + enum SlowlogCondition { kGET, kLEN, kRESET }; + SlowlogCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlowlogCmd(*this); } + private: - int64_t number_; - SlowlogCmd::SlowlogCondition condition_; - virtual void DoInitial() override; - virtual void Clear() { + int64_t number_ = 10; + SlowlogCmd::SlowlogCondition condition_ = kGET; + void DoInitial() override; + void Clear() override { number_ = 10; condition_ = kGET; } @@ -408,52 +462,289 @@ class SlowlogCmd : public Cmd { class PaddingCmd : public Cmd { public: - PaddingCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PaddingCmd(*this); - } + PaddingCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PaddingCmd(*this); } + + private: + void DoInitial() override; + std::string ToRedisProtocol() override; +}; + +class PKPatternMatchDelCmd : public Cmd { + public: + PKPatternMatchDelCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKPatternMatchDelCmd(*this); } + void DoBinlog() override; + + private: + storage::DataType type_; + std::vector remove_keys_; + std::string pattern_; + int64_t max_count_; + void DoInitial() override; +}; + +class DummyCmd : public Cmd { + public: + DummyCmd() : Cmd("", 0, 0) {} + DummyCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new DummyCmd(*this); } + + private: + void DoInitial() override; +}; + +class QuitCmd : public Cmd { + public: + QuitCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new QuitCmd(*this); } + + private: + void DoInitial() override; +}; + +class HelloCmd : public Cmd { + public: + HelloCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HelloCmd(*this); } + + private: + void DoInitial() override; +}; + +class DiskRecoveryCmd : public Cmd { + public: + DiskRecoveryCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new DiskRecoveryCmd(*this); } + + private: + void DoInitial() override; + std::map background_errors_; +}; + +class ClearReplicationIDCmd : public Cmd { + public: + ClearReplicationIDCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new ClearReplicationIDCmd(*this); } + + private: + void DoInitial() override; +}; + +class DisableWalCmd : public Cmd { + public: + DisableWalCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new DisableWalCmd(*this); } private: - virtual void DoInitial(); - virtual std::string ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) override; + void DoInitial() override; }; -#ifdef TCMALLOC_EXTENSION -class TcmallocCmd : public Cmd { +class CacheCmd : public Cmd { public: - TcmallocCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new TcmallocCmd(*this); + enum CacheCondition {kCLEAR_DB, kCLEAR_HITRATIO, kDEL_KEYS, kRANDOM_KEY}; + CacheCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new CacheCmd(*this); } + + private: + CacheCondition condition_; + std::vector keys_; + rocksdb::Status s_; + void DoInitial() override; + void Clear() override { + keys_.clear(); } +}; + +class ClearCacheCmd : public Cmd { + public: + ClearCacheCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ClearCacheCmd(*this); } private: - int64_t type_; - int64_t rate_; - virtual void DoInitial() override; + void DoInitial() override; }; -#endif -class PKPatternMatchDelCmd : public Cmd { - public: - PKPatternMatchDelCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PKPatternMatchDelCmd(*this); - } +#ifdef WITH_COMMAND_DOCS +class CommandCmd : public Cmd { + public: + CommandCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new CommandCmd(*this); } + + class CommandFieldCompare { + public: + CommandFieldCompare() = default; + bool operator()(const std::string&, const std::string&) const; + + private: + const static std::unordered_map kFieldNameOrder; + }; + + class Encodable; + using EncodablePtr = std::shared_ptr; + + class Encodable { + public: + friend CmdRes& operator<<(CmdRes& res, const Encodable& e) { return e.EncodeTo(res); } + EncodablePtr operator+(const EncodablePtr& other) { return MergeFrom(other); } + + protected: + virtual CmdRes& EncodeTo(CmdRes&) const = 0; + virtual EncodablePtr MergeFrom(const EncodablePtr& other) const = 0; + }; + + class EncodableInt : public Encodable { + public: + EncodableInt(int value) : value_(value) {} + EncodableInt(unsigned long long value) : value_(value) {} + + protected: + CmdRes& EncodeTo(CmdRes& res) const override; + EncodablePtr MergeFrom(const EncodablePtr& other) const override; + + private: + int value_; + }; + + class EncodableString : public Encodable { + public: + EncodableString(std::string value) : value_(std::move(value)) {} + + protected: + CmdRes& EncodeTo(CmdRes& res) const override; + EncodablePtr MergeFrom(const EncodablePtr& other) const override; + + private: + std::string value_; + }; + + class EncodableMap : public Encodable { + public: + using RedisMap = std::map; + EncodableMap(RedisMap values) : values_(std::move(values)) {} + template + static CmdRes& EncodeTo(CmdRes& res, const Map& map, const Map& specialization = Map()); + + protected: + CmdRes& EncodeTo(CmdRes& res) const override; + EncodablePtr MergeFrom(const EncodablePtr& other) const override; + + private: + RedisMap values_; + + const static std::string kPrefix; + }; + + class EncodableSet : public Encodable { + public: + EncodableSet(std::vector values) : values_(std::move(values)) {} + + protected: + CmdRes& EncodeTo(CmdRes& res) const override; + EncodablePtr MergeFrom(const EncodablePtr& other) const override; + + private: + std::vector values_; + + const static std::string kPrefix; + }; + + class EncodableArray : public Encodable { + public: + EncodableArray(std::vector values) : values_(std::move(values)) {} + + protected: + CmdRes& EncodeTo(CmdRes& res) const override; + EncodablePtr MergeFrom(const EncodablePtr& other) const override; + + private: + std::vector values_; + }; + + class EncodableStatus : public Encodable { + public: + EncodableStatus(std::string value) : value_(std::move(value)) {} + + protected: + CmdRes& EncodeTo(CmdRes& res) const override; + EncodablePtr MergeFrom(const EncodablePtr& other) const override; + + private: + std::string value_; + + const static std::string kPrefix; + }; private: - blackwidow::DataType type_; - std::string pattern_; - virtual void DoInitial() override; + void DoInitial() override; + + std::string command_; + std::vector::const_iterator cmds_begin_, cmds_end_; + + const static std::string kPikaField; + const static EncodablePtr kNotSupportedLiteral; + const static EncodablePtr kCompatibleLiteral; + const static EncodablePtr kBitSpecLiteral; + const static EncodablePtr kHyperLogLiteral; + const static EncodablePtr kPubSubLiteral; + + const static EncodablePtr kNotSupportedSpecialization; + const static EncodablePtr kCompatibleSpecialization; + const static EncodablePtr kBitSpecialization; + const static EncodablePtr kHyperLogSpecialization; + const static EncodablePtr kPubSubSpecialization; + + const static std::unordered_map kPikaSpecialization; + const static std::unordered_map kCommandDocs; }; + +static CommandCmd::EncodablePtr operator""_RedisInt(unsigned long long value); +static CommandCmd::EncodablePtr operator""_RedisString(const char* value); +static CommandCmd::EncodablePtr operator""_RedisStatus(const char* value); +static CommandCmd::EncodablePtr RedisMap(CommandCmd::EncodableMap::RedisMap values); +static CommandCmd::EncodablePtr RedisSet(std::vector values); +static CommandCmd::EncodablePtr RedisArray(std::vector values); + +#endif // WITH_COMMAND_DOCS + #endif // PIKA_ADMIN_H_ diff --git a/tools/pika_migrate/include/pika_auxiliary_thread.h b/tools/pika_migrate/include/pika_auxiliary_thread.h index 3e192bf280..ab0fa6aea2 100644 --- a/tools/pika_migrate/include/pika_auxiliary_thread.h +++ b/tools/pika_migrate/include/pika_auxiliary_thread.h @@ -6,20 +6,19 @@ #ifndef PIKA_AUXILIARY_THREAD_H_ #define PIKA_AUXILIARY_THREAD_H_ -#include "pink/include/pink_thread.h" +#include "net/include/net_thread.h" -#include "slash/include/slash_mutex.h" +#include "pstd/include/pstd_mutex.h" -class PikaAuxiliaryThread : public pink::Thread { +class PikaAuxiliaryThread : public net::Thread { public: - PikaAuxiliaryThread() : - mu_(), - cv_(&mu_) {} - virtual ~PikaAuxiliaryThread(); - slash::Mutex mu_; - slash::CondVar cv_; + PikaAuxiliaryThread() { set_thread_name("AuxiliaryThread"); } + ~PikaAuxiliaryThread() override; + pstd::Mutex mu_; + pstd::CondVar cv_; + private: - virtual void* ThreadMain(); + void* ThreadMain() override; }; #endif diff --git a/tools/pika_migrate/include/pika_binlog.h b/tools/pika_migrate/include/pika_binlog.h index 48d6b61f40..851de88746 100644 --- a/tools/pika_migrate/include/pika_binlog.h +++ b/tools/pika_migrate/include/pika_binlog.h @@ -6,110 +6,108 @@ #ifndef PIKA_BINLOG_H_ #define PIKA_BINLOG_H_ -#include "slash/include/env.h" -#include "slash/include/slash_mutex.h" -#include "slash/include/slash_status.h" +#include +#include "pstd/include/env.h" +#include "pstd/include/pstd_mutex.h" +#include "pstd/include/pstd_status.h" +#include "pstd/include/noncopyable.h" #include "include/pika_define.h" -using slash::Status; -using slash::Slice; +std::string NewFileName(const std::string& name, uint32_t current); -std::string NewFileName(const std::string name, const uint32_t current); - -class Version { +class Version final : public pstd::noncopyable { public: - Version(slash::RWFile *save); + Version(const std::shared_ptr& save); ~Version(); - Status Init(); + pstd::Status Init(); // RWLock should be held when access members. - Status StableSave(); + pstd::Status StableSave(); - uint32_t pro_num_; - uint64_t pro_offset_; - uint64_t logic_id_; + uint32_t pro_num_ = 0; + uint64_t pro_offset_ = 0; + uint64_t logic_id_ = 0; + uint32_t term_ = 0; - pthread_rwlock_t rwlock_; + std::shared_mutex rwlock_; void debug() { - slash::RWLock(&rwlock_, false); - printf ("Current pro_num %u pro_offset %lu\n", pro_num_, pro_offset_); + std::shared_lock l(rwlock_); + printf("Current pro_num %u pro_offset %llu\n", pro_num_, pro_offset_); } private: - - slash::RWFile *save_; - - // No copying allowed; - Version(const Version&); - void operator=(const Version&); + // shared with versionfile_ + std::shared_ptr save_; }; -class Binlog { +class Binlog : public pstd::noncopyable { public: - Binlog(const std::string& Binlog_path, const int file_size = 100 * 1024 * 1024); + Binlog(std::string Binlog_path, int file_size = 100 * 1024 * 1024); ~Binlog(); - void Lock() { mutex_.Lock(); } - void Unlock() { mutex_.Unlock(); } - - Status Put(const std::string &item); - Status Put(const char* item, int len); + void Lock() { mutex_.lock(); } + void Unlock() { mutex_.unlock(); } - Status GetProducerStatus(uint32_t* filenum, uint64_t* pro_offset, uint64_t* logic_id = NULL); + pstd::Status Put(const std::string& item); + pstd::Status IsOpened(); + pstd::Status GetProducerStatus(uint32_t* filenum, uint64_t* pro_offset, uint32_t* term = nullptr, uint64_t* logic_id = nullptr); /* * Set Producer pro_num and pro_offset with lock */ - Status SetProducerStatus(uint32_t filenum, uint64_t pro_offset); + pstd::Status SetProducerStatus(uint32_t pro_num, uint64_t pro_offset, uint32_t term = 0, uint64_t index = 0); + // Need to hold Lock(); + pstd::Status Truncate(uint32_t pro_num, uint64_t pro_offset, uint64_t index); - static Status AppendPadding(slash::WritableFile* file, uint64_t* len); + std::string filename() { return filename_; } - slash::WritableFile *queue() { return queue_; } + // need to hold mutex_ + void SetTerm(uint32_t term) { + std::lock_guard l(version_->rwlock_); + version_->term_ = term; + version_->StableSave(); + } - uint64_t file_size() { - return file_size_; + uint32_t term() { + std::shared_lock l(version_->rwlock_); + return version_->term_; } - std::string filename; + void Close(); private: - + pstd::Status Put(const char* item, int len); + pstd::Status EmitPhysicalRecord(RecordType t, const char* ptr, size_t n, int* temp_pro_offset); + static pstd::Status AppendPadding(pstd::WritableFile* file, uint64_t* len); void InitLogFile(); - Status EmitPhysicalRecord(RecordType t, const char *ptr, size_t n, int *temp_pro_offset); - /* * Produce */ - Status Produce(const Slice &item, int *pro_offset); + pstd::Status Produce(const pstd::Slice& item, int* pro_offset); - uint32_t consumer_num_; - uint64_t item_num_; + std::atomic opened_; - Version* version_; - slash::WritableFile *queue_; - slash::RWFile *versionfile_; + std::unique_ptr version_; + std::unique_ptr queue_; + // versionfile_ can only be used as a shared_ptr, and it will be used as a variable version_ in the ~Version() function. + std::shared_ptr versionfile_; - slash::Mutex mutex_; + pstd::Mutex mutex_; - uint32_t pro_num_; + uint32_t pro_num_ = 0; - int block_offset_; + int block_offset_ = 0; - char* pool_; - bool exit_all_consume_; const std::string binlog_path_; - uint64_t file_size_; + uint64_t file_size_ = 0; - // Not use - //int32_t retry_; + std::string filename_; - // No copying allowed - Binlog(const Binlog&); - void operator=(const Binlog&); + std::atomic binlog_io_error_; }; #endif diff --git a/tools/pika_migrate/include/pika_binlog_reader.h b/tools/pika_migrate/include/pika_binlog_reader.h index 6b50fa0b4b..1d604b02f7 100644 --- a/tools/pika_migrate/include/pika_binlog_reader.h +++ b/tools/pika_migrate/include/pika_binlog_reader.h @@ -6,43 +6,43 @@ #ifndef PIKA_BINLOG_READER_H_ #define PIKA_BINLOG_READER_H_ -#include #include +#include +#include -#include "slash/include/slash_status.h" -#include "slash/include/env.h" -#include "slash/include/slash_slice.h" +#include "pstd/include/env.h" +#include "pstd/include/pstd_slice.h" +#include "pstd/include/pstd_status.h" #include "include/pika_binlog.h" -using slash::Status; -using slash::Slice; - class PikaBinlogReader { public: PikaBinlogReader(uint32_t cur_filenum, uint64_t cur_offset); PikaBinlogReader(); - ~PikaBinlogReader(); - Status Get(std::string* scratch, uint32_t* filenum, uint64_t* offset); - int Seek(std::shared_ptr logger, uint32_t filenum, uint64_t offset); + ~PikaBinlogReader() = default; + + pstd::Status Get(std::string* scratch, uint32_t* filenum, uint64_t* offset); + int Seek(const std::shared_ptr& logger, uint32_t filenum, uint64_t offset); bool ReadToTheEnd(); void GetReaderStatus(uint32_t* cur_filenum, uint64_t* cur_offset); + private: bool GetNext(uint64_t* size); - unsigned int ReadPhysicalRecord(slash::Slice *redult, uint32_t* filenum, uint64_t* offset); + unsigned int ReadPhysicalRecord(pstd::Slice* result, uint32_t* filenum, uint64_t* offset); // Returns scratch binflog and corresponding offset - Status Consume(std::string* scratch, uint32_t* filenum, uint64_t* offset); + pstd::Status Consume(std::string* scratch, uint32_t* filenum, uint64_t* offset); - pthread_rwlock_t rwlock_; - uint32_t cur_filenum_; - uint64_t cur_offset_; - uint64_t last_record_offset_; + std::shared_mutex rwlock_; + uint32_t cur_filenum_ = 0; + uint64_t cur_offset_ = 0; + uint64_t last_record_offset_ = 0; std::shared_ptr logger_; - slash::SequentialFile *queue_; + std::unique_ptr queue_; - char* const backing_store_; - Slice buffer_; + std::unique_ptr const backing_store_; + pstd::Slice buffer_; }; #endif // PIKA_BINLOG_READER_H_ diff --git a/tools/pika_migrate/include/pika_binlog_transverter.h b/tools/pika_migrate/include/pika_binlog_transverter.h index 14244f55e0..d85d958667 100644 --- a/tools/pika_migrate/include/pika_binlog_transverter.h +++ b/tools/pika_migrate/include/pika_binlog_transverter.h @@ -6,17 +6,19 @@ #ifndef PIKA_BINLOG_TRANSVERTER_H_ #define PIKA_BINLOG_TRANSVERTER_H_ -#include -#include -#include #include +#include +#include +#include - -/* - * ***********************************************Type First Binlog Item Format*********************************************** - * | | | | | | | | | - * 2 Bytes 4 Bytes 4 Bytes 8 Bytes 4 Bytes 8 Bytes 4 Bytes content length Bytes - * +/******************* Type First Binlog Item Format ****************** + * +-----------------------------------------------------------------+ + * | Type (2 bytes) | Create Time (4 bytes) | Term Id (4 bytes) | + * |-----------------------------------------------------------------| + * | Logic Id (8 bytes) | File Num (4 bytes) | Offset (8 bytes) | + * |-----------------------------------------------------------------| + * | Content Length (4 bytes) | Content (content length bytes) | + * +-----------------------------------------------------------------+ */ #define BINLOG_ENCODE_LEN 34 @@ -24,68 +26,52 @@ enum BinlogType { TypeFirst = 1, }; - const int BINLOG_ITEM_HEADER_SIZE = 34; const int PADDING_BINLOG_PROTOCOL_SIZE = 22; const int SPACE_STROE_PARAMETER_LENGTH = 5; class BinlogItem { - public: - BinlogItem() : - exec_time_(0), - server_id_(0), - logic_id_(0), - filenum_(0), - offset_(0), - content_("") {} - - friend class PikaBinlogTransverter; - - uint32_t exec_time() const; - uint32_t server_id() const; - uint64_t logic_id() const; - uint32_t filenum() const; - uint64_t offset() const; - std::string content() const; - std::string ToString() const; - - void set_exec_time(uint32_t exec_time); - void set_server_id(uint32_t server_id); - void set_logic_id(uint64_t logic_id); - void set_filenum(uint32_t filenum); - void set_offset(uint64_t offset); - - private: - uint32_t exec_time_; - uint32_t server_id_; - uint64_t logic_id_; - uint32_t filenum_; - uint64_t offset_; - std::string content_; - std::vector extends_; + public: + BinlogItem() = default; + + friend class PikaBinlogTransverter; + + uint32_t exec_time() const; + uint32_t term_id() const; + uint64_t logic_id() const; + uint32_t filenum() const; + uint64_t offset() const; + std::string content() const; + std::string ToString() const; + + void set_exec_time(uint32_t exec_time); + void set_term_id(uint32_t term_id); + void set_logic_id(uint64_t logic_id); + void set_filenum(uint32_t filenum); + void set_offset(uint64_t offset); + + private: + uint32_t exec_time_ = 0; + uint32_t term_id_ = 0; + uint64_t logic_id_ = 0; + uint32_t filenum_ = 0; + uint64_t offset_ = 0; + std::string content_; + std::vector extends_; }; -class PikaBinlogTransverter{ - public: - PikaBinlogTransverter() {}; - static std::string BinlogEncode(BinlogType type, - uint32_t exec_time, - uint32_t server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset, - const std::string& content, - const std::vector& extends); +class PikaBinlogTransverter { + public: + PikaBinlogTransverter()= default;; + static std::string BinlogEncode(BinlogType type, uint32_t exec_time, uint32_t term_id, uint64_t logic_id, + uint32_t filenum, uint64_t offset, const std::string& content, + const std::vector& extends); - static bool BinlogDecode(BinlogType type, - const std::string& binlog, - BinlogItem* binlog_item); + static bool BinlogDecode(BinlogType type, const std::string& binlog, BinlogItem* binlog_item); - static std::string ConstructPaddingBinlog(BinlogType type, uint32_t size); + static std::string ConstructPaddingBinlog(BinlogType type, uint32_t size); - static bool BinlogItemWithoutContentDecode(BinlogType type, - const std::string& binlog, - BinlogItem* binlog_item); + static bool BinlogItemWithoutContentDecode(BinlogType type, const std::string& binlog, BinlogItem* binlog_item); }; #endif diff --git a/tools/pika_migrate/include/pika_bit.h b/tools/pika_migrate/include/pika_bit.h index 257e9cb866..94e7767b16 100644 --- a/tools/pika_migrate/include/pika_bit.h +++ b/tools/pika_migrate/include/pika_bit.h @@ -6,110 +6,130 @@ #ifndef PIKA_BIT_H_ #define PIKA_BIT_H_ -#include "blackwidow/blackwidow.h" +#include "storage/storage.h" +#include "include/acl.h" #include "include/pika_command.h" -#include "include/pika_partition.h" +#include "include/pika_kv.h" /* * bitoperation */ class BitGetCmd : public Cmd { public: - BitGetCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { + BitGetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::BITMAP)){}; + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr) override; - virtual Cmd* Clone() override { - return new BitGetCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new BitGetCmd(*this); } + private: std::string key_; - int64_t bit_offset_; - virtual void Clear() { + int64_t bit_offset_ = -1; + rocksdb::Status s_; + void Clear() override { key_ = ""; bit_offset_ = -1; } - virtual void DoInitial() override; + void DoInitial() override; }; class BitSetCmd : public Cmd { public: - BitSetCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { + BitSetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::BITMAP)){}; + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr) override; - virtual Cmd* Clone() override { - return new BitSetCmd(*this); - } + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new BitSetCmd(*this); } + private: std::string key_; - int64_t bit_offset_; - int64_t on_; - virtual void Clear() { + int64_t bit_offset_; + int64_t on_; + rocksdb::Status s_; + void Clear() override { key_ = ""; bit_offset_ = -1; on_ = -1; } - virtual void DoInitial() override; + void DoInitial() override; }; class BitCountCmd : public Cmd { public: - BitCountCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { + BitCountCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::BITMAP)){}; + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr) override; - virtual Cmd* Clone() override { - return new BitCountCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new BitCountCmd(*this); } + private: std::string key_; - bool count_all_; - int64_t start_offset_; - int64_t end_offset_; - virtual void Clear() { + bool count_all_; + int64_t start_offset_; + int64_t end_offset_; + rocksdb::Status s_; + void Clear() override { key_ = ""; count_all_ = false; start_offset_ = -1; end_offset_ = -1; } - virtual void DoInitial() override; + void DoInitial() override; }; class BitPosCmd : public Cmd { public: - BitPosCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { + BitPosCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::BITMAP)){}; + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr) override; - virtual Cmd* Clone() override { - return new BitPosCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new BitPosCmd(*this); } + private: std::string key_; - bool pos_all_; - bool endoffset_set_; + bool pos_all_; + bool endoffset_set_; int64_t bit_val_; - int64_t start_offset_; - int64_t end_offset_; - virtual void Clear() { + int64_t start_offset_; + int64_t end_offset_; + rocksdb::Status s_; + void Clear() override { key_ = ""; pos_all_ = false; endoffset_set_ = false; @@ -117,26 +137,46 @@ class BitPosCmd : public Cmd { start_offset_ = -1; end_offset_ = -1; } - virtual void DoInitial() override; + void DoInitial() override; }; class BitOpCmd : public Cmd { public: - BitOpCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual void Do(std::shared_ptr partition = nullptr) override; - virtual Cmd* Clone() override { - return new BitOpCmd(*this); + BitOpCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::BITMAP)) { + set_cmd_ = std::make_shared(kCmdNameSet, -3, kCmdFlagsWrite | kCmdFlagsKv); + }; + BitOpCmd(const BitOpCmd& other) + : Cmd(other), + dest_key_(other.dest_key_), + src_keys_(other.src_keys_), + op_(other.op_), + value_to_dest_(other.value_to_dest_) { + set_cmd_ = std::make_shared(kCmdNameSet, -3, kCmdFlagsWrite | kCmdFlagsKv); } + + std::vector current_key() const override { return {dest_key_}; } + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new BitOpCmd(*this); } + void DoBinlog() override; + private: std::string dest_key_; + rocksdb::Status s_; std::vector src_keys_; - blackwidow::BitOpType op_; - virtual void Clear() { + storage::BitOpType op_; + void Clear() override { dest_key_ = ""; src_keys_.clear(); - op_ = blackwidow::kBitOpDefault; + op_ = storage::kBitOpDefault; } - virtual void DoInitial() override; + void DoInitial() override; + // used to write binlog + std::string value_to_dest_; + std::shared_ptr set_cmd_; }; #endif diff --git a/tools/pika_migrate/include/pika_cache.h b/tools/pika_migrate/include/pika_cache.h new file mode 100644 index 0000000000..d82627ced7 --- /dev/null +++ b/tools/pika_migrate/include/pika_cache.h @@ -0,0 +1,226 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_CACHE_H_ +#define PIKA_CACHE_H_ + +#include +#include +#include + +#include "include/pika_define.h" +#include "include/pika_zset.h" +#include "include/pika_command.h" +#include "pstd/include/pstd_mutex.h" +#include "pstd/include/pstd_status.h" +#include "cache/include/cache.h" +#include "storage/storage.h" + +class PikaCacheLoadThread; +class ZIncrbyCmd; +class ZRangebyscoreCmd; +class ZRevrangebyscoreCmd; +class ZCountCmd; +enum RangeStatus { RangeError = 1, RangeHit, RangeMiss }; + +struct CacheInfo { + int status = PIKA_CACHE_STATUS_NONE; + uint32_t cache_num = 0; + int64_t keys_num = 0; + size_t used_memory = 0; + int64_t hits = 0; + int64_t misses = 0; + uint64_t async_load_keys_num = 0; + uint32_t waitting_load_keys_num = 0; + void clear() { + status = PIKA_CACHE_STATUS_NONE; + cache_num = 0; + keys_num = 0; + used_memory = 0; + hits = 0; + misses = 0; + async_load_keys_num = 0; + waitting_load_keys_num = 0; + } +}; + +class PikaCache : public pstd::noncopyable, public std::enable_shared_from_this { + public: + PikaCache(int zset_cache_start_direction, int zset_cache_field_num_per_key); + ~PikaCache(); + + rocksdb::Status Init(uint32_t cache_num, cache::CacheConfig *cache_cfg); + rocksdb::Status Reset(uint32_t cache_num, cache::CacheConfig *cache_cfg = nullptr); + int64_t TTL(std::string &key); + void ResetConfig(cache::CacheConfig *cache_cfg); + void Destroy(void); + void SetCacheStatus(int status); + int CacheStatus(void); + void ClearHitRatio(void); + // Normal Commands + void Info(CacheInfo& info); + bool Exists(std::string& key); + void FlushCache(void); + void ProcessCronTask(void); + + rocksdb::Status Del(const std::vector& keys); + rocksdb::Status Expire(std::string& key, int64_t ttl); + rocksdb::Status Expireat(std::string& key, int64_t ttl_sec); + rocksdb::Status TTL(std::string& key, int64_t* ttl); + rocksdb::Status Persist(std::string& key); + rocksdb::Status Type(std::string& key, std::string* value); + rocksdb::Status RandomKey(std::string* key); + rocksdb::Status GetType(const std::string& key, bool single, std::vector& types); + + // String Commands + rocksdb::Status Set(std::string& key, std::string& value, int64_t ttl); + rocksdb::Status Setnx(std::string& key, std::string& value, int64_t ttl); + rocksdb::Status SetnxWithoutTTL(std::string& key, std::string& value); + rocksdb::Status Setxx(std::string& key, std::string& value, int64_t ttl); + rocksdb::Status SetxxWithoutTTL(std::string& key, std::string& value); + rocksdb::Status MSet(const std::vector& kvs); + rocksdb::Status Get(std::string& key, std::string* value); + rocksdb::Status MGet(const std::vector& keys, std::vector* vss); + rocksdb::Status Incrxx(std::string& key); + rocksdb::Status Decrxx(std::string& key); + rocksdb::Status IncrByxx(std::string& key, uint64_t incr); + rocksdb::Status DecrByxx(std::string& key, uint64_t incr); + rocksdb::Status Incrbyfloatxx(std::string& key, long double incr); + rocksdb::Status Appendxx(std::string& key, std::string& value); + rocksdb::Status GetRange(std::string& key, int64_t start, int64_t end, std::string* value); + rocksdb::Status SetRangexx(std::string& key, int64_t start, std::string& value); + rocksdb::Status Strlen(std::string& key, int32_t* len); + + // Hash Commands + rocksdb::Status HDel(std::string& key, std::vector& fields); + rocksdb::Status HSet(std::string& key, std::string& field, std::string& value); + rocksdb::Status HSetIfKeyExist(std::string& key, std::string& field, std::string& value); + rocksdb::Status HSetIfKeyExistAndFieldNotExist(std::string& key, std::string& field, std::string& value); + rocksdb::Status HMSet(std::string& key, std::vector& fvs); + rocksdb::Status HMSetnx(std::string& key, std::vector& fvs, int64_t ttl); + rocksdb::Status HMSetnxWithoutTTL(std::string& key, std::vector& fvs); + rocksdb::Status HMSetxx(std::string& key, std::vector& fvs); + rocksdb::Status HGet(std::string& key, std::string& field, std::string* value); + rocksdb::Status HMGet(std::string& key, std::vector& fields, std::vector* vss); + rocksdb::Status HGetall(std::string& key, std::vector* fvs); + rocksdb::Status HKeys(std::string& key, std::vector* fields); + rocksdb::Status HVals(std::string& key, std::vector* values); + rocksdb::Status HExists(std::string& key, std::string& field); + rocksdb::Status HIncrbyxx(std::string& key, std::string& field, int64_t value); + rocksdb::Status HIncrbyfloatxx(std::string& key, std::string& field, long double value); + rocksdb::Status HLen(std::string& key, uint64_t* len); + rocksdb::Status HStrlen(std::string& key, std::string& field, uint64_t* len); + + // List Commands + rocksdb::Status LIndex(std::string& key, int64_t index, std::string* element); + rocksdb::Status LInsert(std::string& key, storage::BeforeOrAfter& before_or_after, std::string& pivot, std::string& value); + rocksdb::Status LLen(std::string& key, uint64_t* len); + rocksdb::Status LPop(std::string& key, std::string* element); + rocksdb::Status LPush(std::string& key, std::vector &values); + rocksdb::Status LPushx(std::string& key, std::vector &values); + rocksdb::Status LRange(std::string& key, int64_t start, int64_t stop, std::vector* values); + rocksdb::Status LRem(std::string& key, int64_t count, std::string& value); + rocksdb::Status LSet(std::string& key, int64_t index, std::string& value); + rocksdb::Status LTrim(std::string& key, int64_t start, int64_t stop); + rocksdb::Status RPop(std::string& key, std::string* element); + rocksdb::Status RPush(std::string& key, std::vector &values); + rocksdb::Status RPushx(std::string& key, std::vector &values); + rocksdb::Status RPushnx(std::string& key, std::vector &values, int64_t ttl); + rocksdb::Status RPushnxWithoutTTL(std::string& key, std::vector &values); + + // Set Commands + rocksdb::Status SAdd(std::string& key, std::vector& members); + rocksdb::Status SAddIfKeyExist(std::string& key, std::vector& members); + rocksdb::Status SAddnx(std::string& key, std::vector& members, int64_t ttl); + rocksdb::Status SAddnxWithoutTTL(std::string& key, std::vector& members); + rocksdb::Status SCard(std::string& key, uint64_t* len); + rocksdb::Status SIsmember(std::string& key, std::string& member); + rocksdb::Status SMembers(std::string& key, std::vector* members); + rocksdb::Status SRem(std::string& key, std::vector& members); + rocksdb::Status SRandmember(std::string& key, int64_t count, std::vector* members); + + // ZSet Commands + rocksdb::Status ZAdd(std::string& key, std::vector& score_members); + rocksdb::Status ZAddIfKeyExist(std::string& key, std::vector& score_members); + rocksdb::Status ZAddnx(std::string& key, std::vector& score_members, int64_t ttl); + rocksdb::Status ZAddnxWithoutTTL(std::string& key, std::vector& score_members); + rocksdb::Status ZCard(std::string& key, uint32_t* len, const std::shared_ptr& db); + rocksdb::Status ZCount(std::string& key, std::string& min, std::string& max, uint64_t* len, ZCountCmd* cmd); + rocksdb::Status ZIncrby(std::string& key, std::string& member, double increment); + rocksdb::Status ZIncrbyIfKeyExist(std::string& key, std::string& member, double increment, ZIncrbyCmd* cmd, const std::shared_ptr& db); + rocksdb::Status ZRange(std::string& key, int64_t start, int64_t stop, std::vector* score_members, + const std::shared_ptr& db); + rocksdb::Status ZRangebyscore(std::string& key, std::string& min, std::string& max, + std::vector* score_members, ZRangebyscoreCmd* cmd); + rocksdb::Status ZRank(std::string& key, std::string& member, int64_t* rank, const std::shared_ptr& db); + rocksdb::Status ZRem(std::string& key, std::vector& members, std::shared_ptr db); + rocksdb::Status ZRemrangebyrank(std::string& key, std::string& min, std::string& max, int32_t ele_deleted = 0, + const std::shared_ptr& db = nullptr); + rocksdb::Status ZRemrangebyscore(std::string& key, std::string& min, std::string& max, const std::shared_ptr& db); + rocksdb::Status ZRevrange(std::string& key, int64_t start, int64_t stop, std::vector* score_members, + const std::shared_ptr& db); + rocksdb::Status ZRevrangebyscore(std::string& key, std::string& min, std::string& max, + std::vector* score_members, ZRevrangebyscoreCmd* cmd, + const std::shared_ptr& db); + rocksdb::Status ZRevrangebylex(std::string& key, std::string& min, std::string& max, std::vector* members, + const std::shared_ptr& db); + rocksdb::Status ZRevrank(std::string& key, std::string& member, int64_t *rank, const std::shared_ptr& db); + rocksdb::Status ZScore(std::string& key, std::string& member, double* score, const std::shared_ptr& db); + rocksdb::Status ZRangebylex(std::string& key, std::string& min, std::string& max, std::vector* members, const std::shared_ptr& db); + rocksdb::Status ZLexcount(std::string& key, std::string& min, std::string& max, uint64_t* len, + const std::shared_ptr& db); + rocksdb::Status ZRemrangebylex(std::string& key, std::string& min, std::string& max, const std::shared_ptr& db); + + // Bit Commands + rocksdb::Status SetBit(std::string& key, size_t offset, int64_t value); + rocksdb::Status SetBitIfKeyExist(std::string& key, size_t offset, int64_t value); + rocksdb::Status GetBit(std::string& key, size_t offset, int64_t* value); + rocksdb::Status BitCount(std::string& key, int64_t start, int64_t end, int64_t* value, bool have_offset); + rocksdb::Status BitPos(std::string& key, int64_t bit, int64_t* value); + rocksdb::Status BitPos(std::string& key, int64_t bit, int64_t start, int64_t* value); + rocksdb::Status BitPos(std::string& key, int64_t bit, int64_t start, int64_t end, int64_t* value); + + // Cache + rocksdb::Status WriteKVToCache(std::string& key, std::string& value, int64_t ttl); + rocksdb::Status WriteHashToCache(std::string& key, std::vector& fvs, int64_t ttl); + rocksdb::Status WriteListToCache(std::string& key, std::vector &values, int64_t ttl); + rocksdb::Status WriteSetToCache(std::string& key, std::vector& members, int64_t ttl); + rocksdb::Status WriteZSetToCache(std::string& key, std::vector& score_members, int64_t ttl); + void PushKeyToAsyncLoadQueue(const char key_type, std::string& key, const std::shared_ptr& db); + rocksdb::Status CacheZCard(std::string& key, uint64_t* len); + + private: + + rocksdb::Status InitWithoutLock(uint32_t cache_num, cache::CacheConfig* cache_cfg); + void DestroyWithoutLock(void); + int CacheIndex(const std::string& key); + RangeStatus CheckCacheRange(int32_t cache_len, int32_t db_len, int64_t start, int64_t stop, int64_t& out_start, + int64_t& out_stop); + RangeStatus CheckCacheRevRange(int32_t cache_len, int32_t db_len, int64_t start, int64_t stop, int64_t& out_start, + int64_t& out_stop); + RangeStatus CheckCacheRangeByScore(uint64_t cache_len, double cache_min, double cache_max, double min, + double max, bool left_close, bool right_close); + bool CacheSizeEqsDB(std::string& key, const std::shared_ptr& db); + void GetMinMaxScore(std::vector& score_members, double &min, double &max); + bool GetCacheMinMaxSM(cache::RedisCache* cache_obj, std::string& key, storage::ScoreMember &min_m, + storage::ScoreMember &max_m); + bool ReloadCacheKeyIfNeeded(cache::RedisCache* cache_obj, std::string& key, int mem_len = -1, int db_len = -1, + const std::shared_ptr& db = nullptr); + rocksdb::Status CleanCacheKeyIfNeeded(cache::RedisCache* cache_obj, std::string& key); + + private: + std::atomic cache_status_; + uint32_t cache_num_ = 0; + + // currently only take effects to zset + int zset_cache_start_direction_ = 0; + int zset_cache_field_num_per_key_ = 0; + std::shared_mutex rwlock_; + std::unique_ptr cache_load_thread_; + std::vector caches_; + std::vector> cache_mutexs_; +}; + +#endif diff --git a/tools/pika_migrate/include/pika_cache_load_thread.h b/tools/pika_migrate/include/pika_cache_load_thread.h new file mode 100644 index 0000000000..fa949e8d81 --- /dev/null +++ b/tools/pika_migrate/include/pika_cache_load_thread.h @@ -0,0 +1,55 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + + +#ifndef PIKA_CACHE_LOAD_THREAD_H_ +#define PIKA_CACHE_LOAD_THREAD_H_ + +#include +#include +#include +#include + +#include "include/pika_cache.h" +#include "include/pika_define.h" +#include "net/include/net_thread.h" +#include "storage/storage.h" + +class PikaCacheLoadThread : public net::Thread { + public: + PikaCacheLoadThread(int zset_cache_start_direction, int zset_cache_field_num_per_key); + ~PikaCacheLoadThread() override; + + uint64_t AsyncLoadKeysNum(void) { return async_load_keys_num_; } + uint32_t WaittingLoadKeysNum(void) { return waitting_load_keys_num_; } + void Push(const char key_type, std::string& key, const std::shared_ptr& db); + + private: + bool LoadKV(std::string& key, const std::shared_ptr& db); + bool LoadHash(std::string& key, const std::shared_ptr& db); + bool LoadList(std::string& key, const std::shared_ptr& db); + bool LoadSet(std::string& key, const std::shared_ptr& db); + bool LoadZset(std::string& key, const std::shared_ptr& db); + bool LoadKey(const char key_type, std::string& key, const std::shared_ptr& db); + virtual void* ThreadMain() override; + + private: + std::atomic_bool should_exit_; + std::deque>> loadkeys_queue_; + + pstd::CondVar loadkeys_cond_; + pstd::Mutex loadkeys_mutex_; + + std::unordered_map loadkeys_map_; + pstd::Mutex loadkeys_map_mutex_; + std::atomic_uint64_t async_load_keys_num_; + std::atomic_uint32_t waitting_load_keys_num_; + // currently only take effects to zset + int zset_cache_start_direction_; + int zset_cache_field_num_per_key_; + std::shared_ptr cache_; +}; + +#endif // PIKA_CACHE_LOAD_THREAD_H_ diff --git a/tools/pika_migrate/include/pika_client_conn.h b/tools/pika_migrate/include/pika_client_conn.h index 1bbe82ab9e..3124d2036c 100644 --- a/tools/pika_migrate/include/pika_client_conn.h +++ b/tools/pika_migrate/include/pika_client_conn.h @@ -6,72 +6,141 @@ #ifndef PIKA_CLIENT_CONN_H_ #define PIKA_CLIENT_CONN_H_ +#include +#include + +#include "acl.h" #include "include/pika_command.h" +#include "include/pika_define.h" + +// TODO: stat time costing in write out data to connfd +struct TimeStat { + TimeStat() = default; + void Reset() { + enqueue_ts_ = dequeue_ts_ = 0; + process_done_ts_ = 0; + before_queue_ts_ = 0; + } + + uint64_t start_ts() const { return enqueue_ts_; } + + uint64_t total_time() const { return process_done_ts_ > enqueue_ts_ ? process_done_ts_ - enqueue_ts_ : 0; } + + uint64_t queue_time() const { return dequeue_ts_ > enqueue_ts_ ? dequeue_ts_ - enqueue_ts_ : 0; } -class PikaClientConn: public pink::RedisConn { + uint64_t process_time() const { return process_done_ts_ > dequeue_ts_ ? process_done_ts_ - dequeue_ts_ : 0; } + + uint64_t before_queue_time() const { return process_done_ts_ > dequeue_ts_ ? before_queue_ts_ - enqueue_ts_ : 0; } + + uint64_t enqueue_ts_; + uint64_t dequeue_ts_; + uint64_t before_queue_ts_; + uint64_t process_done_ts_; +}; + +class PikaClientConn : public net::RedisConn { public: + using WriteCompleteCallback = std::function; + struct BgTaskArg { - std::shared_ptr pcc; - std::vector redis_cmds; - std::string* response; + std::shared_ptr cmd_ptr; + std::shared_ptr conn_ptr; + std::vector redis_cmds; + std::shared_ptr resp_ptr; + LogOffset offset; + std::string db_name; + bool cache_miss_in_rtc_; }; - // Auth related - class AuthStat { + struct TxnStateBitMask { public: - void Init(); - bool IsAuthed(const std::shared_ptr cmd_ptr); - bool ChecknUpdate(const std::string& arg); - private: - enum StatType { - kNoAuthed = 0, - kAdminAuthed, - kLimitAuthed, - }; - StatType stat_; + static constexpr uint8_t Start = 0; + static constexpr uint8_t InitCmdFailed = 1; + static constexpr uint8_t WatchFailed = 2; + static constexpr uint8_t Execing = 3; }; - PikaClientConn(int fd, std::string ip_port, - pink::Thread *server_thread, - pink::PinkEpoll* pink_epoll, - const pink::HandleType& handle_type); - virtual ~PikaClientConn() {} + PikaClientConn(int fd, const std::string& ip_port, net::Thread* server_thread, net::NetMultiplexer* mpx, + const net::HandleType& handle_type, int max_conn_rbuf_size); + ~PikaClientConn() = default; - void AsynProcessRedisCmds(const std::vector& argvs, std::string* response) override; + bool IsInterceptedByRTC(std::string& opt); - void BatchExecRedisCmd(const std::vector& argvs, std::string* response); - int DealMessage(const pink::RedisCmdArgsType& argv, std::string* response); + void ProcessRedisCmds(const std::vector& argvs, bool async, std::string* response) override; + + bool ReadCmdInCache(const net::RedisCmdArgsType& argv, const std::string& opt); + void BatchExecRedisCmd(const std::vector& argvs, bool cache_miss_in_rtc); + int DealMessage(const net::RedisCmdArgsType& argv, std::string* response) override { return 0; } static void DoBackgroundTask(void* arg); bool IsPubSub() { return is_pubsub_; } void SetIsPubSub(bool is_pubsub) { is_pubsub_ = is_pubsub; } - void SetCurrentTable(const std::string& table_name) {current_table_ = table_name;} + void SetCurrentDb(const std::string& db_name) { current_db_ = db_name; } + void SetWriteCompleteCallback(WriteCompleteCallback cb) { write_completed_cb_ = std::move(cb); } + const std::string& GetCurrentTable() override { return current_db_; } - pink::ServerThread* server_thread() { - return server_thread_; - } + void DoAuth(const std::shared_ptr& user); - AuthStat& auth_stat() { - return auth_stat_; - } + void UnAuth(const std::shared_ptr& user); - private: - pink::ServerThread* const server_thread_; - std::string current_table_; - bool is_pubsub_; + bool IsAuthed() const; + void InitUser(); + bool AuthRequired() const; + + std::string UserName() const; - std::string DoCmd(const PikaCmdArgsType& argv, const std::string& opt); + // Txn + std::queue> GetTxnCmdQue(); + void PushCmdToQue(std::shared_ptr cmd); + void ClearTxnCmdQue(); + void SetTxnWatchFailState(bool is_failed); + void SetTxnInitFailState(bool is_failed); + void SetTxnStartState(bool is_start); + void AddKeysToWatch(const std::vector& db_keys); + void RemoveWatchedKeys(); + void SetTxnFailedFromKeys(const std::vector& db_keys); + void SetTxnFailedIfKeyExists(const std::string target_db_name = ""); + void ExitTxn(); + bool IsInTxn(); + bool IsTxnInitFailed(); + bool IsTxnWatchFailed(); + bool IsTxnExecing(void); - void ProcessSlowlog(const PikaCmdArgsType& argv, uint64_t start_us); + net::ServerThread* server_thread() { return server_thread_; } + void ClientInfoToString(std::string* info, const std::string& cmdName); + + std::atomic resp_num; + std::vector> resp_array; + + std::shared_ptr time_stat_; + + private: + net::ServerThread* const server_thread_; + std::string current_db_; + WriteCompleteCallback write_completed_cb_; + bool is_pubsub_ = false; + std::queue> txn_cmd_que_; + std::bitset<16> txn_state_; + std::unordered_set watched_db_keys_; + std::mutex txn_state_mu_; + + bool authenticated_ = false; + std::shared_ptr user_; + + std::shared_ptr DoCmd(const PikaCmdArgsType& argv, const std::string& opt, + const std::shared_ptr& resp_ptr, bool cache_miss_in_rtc); + + void ProcessSlowlog(const PikaCmdArgsType& argv, uint64_t do_duration); void ProcessMonitor(const PikaCmdArgsType& argv); - AuthStat auth_stat_; + void ExecRedisCmd(const PikaCmdArgsType& argv, std::shared_ptr& resp_ptr, bool cache_miss_in_rtc); + void TryWriteResp(); }; struct ClientInfo { int fd; std::string ip_port; - int64_t last_interaction; + int64_t last_interaction = 0; std::shared_ptr conn; }; diff --git a/tools/pika_migrate/include/pika_client_processor.h b/tools/pika_migrate/include/pika_client_processor.h new file mode 100644 index 0000000000..dccd4ef96c --- /dev/null +++ b/tools/pika_migrate/include/pika_client_processor.h @@ -0,0 +1,28 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_CLIENT_PROCESSOR_H_ +#define PIKA_CLIENT_PROCESSOR_H_ + +#include +#include +#include +#include "net/include/bg_thread.h" +#include "net/include/thread_pool.h" + +class PikaClientProcessor { + public: + PikaClientProcessor(size_t worker_num, size_t max_queue_size, const std::string& name_prefix = "CliProcessor"); + ~PikaClientProcessor(); + int Start(); + void Stop(); + void SchedulePool(net::TaskFunc func, void* arg); + size_t ThreadPoolCurQueueSize(); + size_t ThreadPoolMaxQueueSize(); + + private: + std::unique_ptr pool_; +}; +#endif // PIKA_CLIENT_PROCESSOR_H_ diff --git a/tools/pika_migrate/include/pika_cluster.h b/tools/pika_migrate/include/pika_cluster.h deleted file mode 100644 index bb34c37c31..0000000000 --- a/tools/pika_migrate/include/pika_cluster.h +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_CLUSTER_H_ -#define PIKA_CLUSTER_H_ - -#include "include/pika_command.h" - -class PkClusterInfoCmd : public Cmd { - public: - enum InfoSection { - kInfoErr = 0x0, - kInfoSlot - }; - enum InfoRange { - kSingle = 0x0, - kAll - }; - PkClusterInfoCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), - info_section_(kInfoErr), info_range_(kAll), partition_id_(0) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PkClusterInfoCmd(*this); - } - - private: - InfoSection info_section_; - InfoRange info_range_; - - std::string table_name_; - uint32_t partition_id_; - - virtual void DoInitial() override; - virtual void Clear() { - info_section_ = kInfoErr; - info_range_ = kAll; - table_name_.clear(); - partition_id_ = 0; - } - const static std::string kSlotSection; - void ClusterInfoSlotAll(std::string* info); - Status GetSlotInfo(const std::string table_name, uint32_t partition_id, std::string* info); - bool ParseInfoSlotSubCmd(); -}; - -class SlotParentCmd : public Cmd { - public: - SlotParentCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - - protected: - std::set slots_; - std::set p_infos_; - virtual void DoInitial(); - virtual void Clear() { - slots_.clear(); - p_infos_.clear(); - } -}; - -class PkClusterAddSlotsCmd : public SlotParentCmd { - public: - PkClusterAddSlotsCmd(const std::string& name, int arity, uint16_t flag) - : SlotParentCmd(name, arity, flag) {} - virtual Cmd* Clone() override { - return new PkClusterAddSlotsCmd(*this); - } - virtual void Do(std::shared_ptr partition = nullptr); - private: - virtual void DoInitial() override; - Status AddSlotsSanityCheck(const std::string& table_name); -}; - -class PkClusterDelSlotsCmd : public SlotParentCmd { - public: - PkClusterDelSlotsCmd(const std::string& name, int32_t arity, uint16_t flag) - : SlotParentCmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PkClusterDelSlotsCmd(*this); - } - private: - virtual void DoInitial() override; - Status RemoveSlotsSanityCheck(const std::string& table_name); -}; - -class PkClusterSlotsSlaveofCmd : public Cmd { - public: - PkClusterSlotsSlaveofCmd(const std::string& name , int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PkClusterSlotsSlaveofCmd(*this); - } - private: - std::string ip_; - int64_t port_; - std::set slots_; - bool force_sync_; - bool is_noone_; - virtual void DoInitial() override; - virtual void Clear() { - ip_.clear(); - port_ = 0; - slots_.clear(); - force_sync_ = false; - is_noone_ = false; - } -}; - -#endif // PIKA_CLUSTER_H_ diff --git a/tools/pika_migrate/include/pika_cmd_table_manager.h b/tools/pika_migrate/include/pika_cmd_table_manager.h index bd87296698..8177fa63b9 100644 --- a/tools/pika_migrate/include/pika_cmd_table_manager.h +++ b/tools/pika_migrate/include/pika_cmd_table_manager.h @@ -6,27 +6,59 @@ #ifndef PIKA_CMD_TABLE_MANAGER_H_ #define PIKA_CMD_TABLE_MANAGER_H_ +#include +#include + +#include "include/acl.h" #include "include/pika_command.h" #include "include/pika_data_distribution.h" +struct CommandStatistics { + CommandStatistics() = default; + CommandStatistics(const CommandStatistics& other) { + cmd_time_consuming.store(other.cmd_time_consuming.load()); + cmd_count.store(other.cmd_count.load()); + } + std::atomic cmd_count = 0; + std::atomic cmd_time_consuming = 0; +}; class PikaCmdTableManager { + friend AclSelector; + public: PikaCmdTableManager(); - virtual ~PikaCmdTableManager(); + virtual ~PikaCmdTableManager() = default; + void InitCmdTable(void); + void RenameCommand(const std::string before, const std::string after); std::shared_ptr GetCmd(const std::string& opt); - uint32_t DistributeKey(const std::string& key, uint32_t partition_num); + bool CmdExist(const std::string& cmd) const; + CmdTable* GetCmdTable(); + uint32_t GetMaxCmdId(); + + std::vector GetAclCategoryCmdNames(uint32_t flag); + + /* + * Info Commandstats used + */ + std::unordered_map* GetCommandStatMap(); + private: std::shared_ptr NewCommand(const std::string& opt); void InsertCurrentThreadDistributionMap(); - bool CheckCurrentThreadDistributionMapExist(const pid_t& tid); + bool CheckCurrentThreadDistributionMapExist(const std::thread::id& tid); + + std::unique_ptr cmds_; - void TryChangeToAlias(std::string *internal_opt); + uint32_t cmdId_ = 0; - CmdTable* cmds_; + std::shared_mutex map_protector_; + std::unordered_map> thread_distribution_map_; - pthread_rwlock_t map_protector_; - std::unordered_map thread_distribution_map_; + /* + * Info Commandstats used + */ + std::unordered_map cmdstat_map_; }; #endif diff --git a/tools/pika_migrate/include/pika_command.h b/tools/pika_migrate/include/pika_command.h index dec0b50924..c132eae9c5 100644 --- a/tools/pika_migrate/include/pika_command.h +++ b/tools/pika_migrate/include/pika_command.h @@ -6,21 +6,31 @@ #ifndef PIKA_COMMAND_H_ #define PIKA_COMMAND_H_ +#include +#include #include +#include +#include -#include "pink/include/redis_conn.h" -#include "pink/include/pink_conn.h" -#include "slash/include/slash_string.h" +#include "rocksdb/status.h" -#include "include/pika_partition.h" +#include "net/include/net_conn.h" +#include "net/include/redis_conn.h" +#include "pstd/include/pstd_string.h" -//Constant for command name -//Admin +#include "net/src/dispatch_thread.h" + +class SyncMasterDB; +class SyncSlaveDB; +class DB; +// Constant for command name +// Admin const std::string kCmdNameSlaveof = "slaveof"; const std::string kCmdNameDbSlaveof = "dbslaveof"; const std::string kCmdNameAuth = "auth"; const std::string kCmdNameBgsave = "bgsave"; const std::string kCmdNameCompact = "compact"; +const std::string kCmdNameCompactRange = "compactrange"; const std::string kCmdNamePurgelogsto = "purgelogsto"; const std::string kCmdNamePing = "ping"; const std::string kCmdNameSelect = "select"; @@ -38,15 +48,42 @@ const std::string kCmdNameEcho = "echo"; const std::string kCmdNameScandb = "scandb"; const std::string kCmdNameSlowlog = "slowlog"; const std::string kCmdNamePadding = "padding"; -#ifdef TCMALLOC_EXTENSION -const std::string kCmdNameTcmalloc = "tcmalloc"; -#endif const std::string kCmdNamePKPatternMatchDel = "pkpatternmatchdel"; +const std::string kCmdDummy = "dummy"; +const std::string kCmdNameQuit = "quit"; +const std::string kCmdNameHello = "hello"; +const std::string kCmdNameCommand = "command"; +const std::string kCmdNameDiskRecovery = "diskrecovery"; +const std::string kCmdNameClearReplicationID = "clearreplicationid"; +const std::string kCmdNameDisableWal = "disablewal"; +const std::string kCmdNameLastSave = "lastsave"; +const std::string kCmdNameCache = "cache"; +const std::string kCmdNameClearCache = "clearcache"; + +// Migrate slot +const std::string kCmdNameSlotsMgrtSlot = "slotsmgrtslot"; +const std::string kCmdNameSlotsMgrtTagSlot = "slotsmgrttagslot"; +const std::string kCmdNameSlotsMgrtOne = "slotsmgrtone"; +const std::string kCmdNameSlotsMgrtTagOne = "slotsmgrttagone"; +const std::string kCmdNameSlotsInfo = "slotsinfo"; +const std::string kCmdNameSlotsHashKey = "slotshashkey"; +const std::string kCmdNameSlotsReload = "slotsreload"; +const std::string kCmdNameSlotsReloadOff = "slotsreloadoff"; +const std::string kCmdNameSlotsDel = "slotsdel"; +const std::string kCmdNameSlotsScan = "slotsscan"; +const std::string kCmdNameSlotsCleanup = "slotscleanup"; +const std::string kCmdNameSlotsCleanupOff = "slotscleanupoff"; +const std::string kCmdNameSlotsMgrtTagSlotAsync = "slotsmgrttagslot-async"; +const std::string kCmdNameSlotsMgrtSlotAsync = "slotsmgrtslot-async"; +const std::string kCmdNameSlotsMgrtExecWrapper = "slotsmgrt-exec-wrapper"; +const std::string kCmdNameSlotsMgrtAsyncStatus = "slotsmgrt-async-status"; +const std::string kCmdNameSlotsMgrtAsyncCancel = "slotsmgrt-async-cancel"; -//Kv +// Kv const std::string kCmdNameSet = "set"; const std::string kCmdNameGet = "get"; const std::string kCmdNameDel = "del"; +const std::string kCmdNameUnlink = "unlink"; const std::string kCmdNameIncr = "incr"; const std::string kCmdNameIncrby = "incrby"; const std::string kCmdNameIncrbyfloat = "incrbyfloat"; @@ -80,7 +117,7 @@ const std::string kCmdNamePKSetexAt = "pksetexat"; const std::string kCmdNamePKScanRange = "pkscanrange"; const std::string kCmdNamePKRScanRange = "pkrscanrange"; -//Hash +// Hash const std::string kCmdNameHDel = "hdel"; const std::string kCmdNameHSet = "hset"; const std::string kCmdNameHGet = "hget"; @@ -100,10 +137,11 @@ const std::string kCmdNameHScanx = "hscanx"; const std::string kCmdNamePKHScanRange = "pkhscanrange"; const std::string kCmdNamePKHRScanRange = "pkhrscanrange"; -//List +// List const std::string kCmdNameLIndex = "lindex"; const std::string kCmdNameLInsert = "linsert"; const std::string kCmdNameLLen = "llen"; +const std::string kCmdNameBLPop = "blpop"; const std::string kCmdNameLPop = "lpop"; const std::string kCmdNameLPush = "lpush"; const std::string kCmdNameLPushx = "lpushx"; @@ -111,19 +149,20 @@ const std::string kCmdNameLRange = "lrange"; const std::string kCmdNameLRem = "lrem"; const std::string kCmdNameLSet = "lset"; const std::string kCmdNameLTrim = "ltrim"; +const std::string kCmdNameBRpop = "brpop"; const std::string kCmdNameRPop = "rpop"; const std::string kCmdNameRPopLPush = "rpoplpush"; const std::string kCmdNameRPush = "rpush"; const std::string kCmdNameRPushx = "rpushx"; -//BitMap +// BitMap const std::string kCmdNameBitSet = "setbit"; const std::string kCmdNameBitGet = "getbit"; const std::string kCmdNameBitPos = "bitpos"; const std::string kCmdNameBitOp = "bitop"; const std::string kCmdNameBitCount = "bitcount"; -//Zset +// Zset const std::string kCmdNameZAdd = "zadd"; const std::string kCmdNameZCard = "zcard"; const std::string kCmdNameZScan = "zscan"; @@ -148,7 +187,7 @@ const std::string kCmdNameZRemrangebyscore = "zremrangebyscore"; const std::string kCmdNameZPopmax = "zpopmax"; const std::string kCmdNameZPopmin = "zpopmin"; -//Set +// Set const std::string kCmdNameSAdd = "sadd"; const std::string kCmdNameSPop = "spop"; const std::string kCmdNameSCard = "scard"; @@ -165,12 +204,19 @@ const std::string kCmdNameSDiffstore = "sdiffstore"; const std::string kCmdNameSMove = "smove"; const std::string kCmdNameSRandmember = "srandmember"; -//HyperLogLog +// transation +const std::string kCmdNameMulti = "multi"; +const std::string kCmdNameExec = "exec"; +const std::string kCmdNameDiscard = "discard"; +const std::string kCmdNameWatch = "watch"; +const std::string kCmdNameUnWatch = "unwatch"; + +// HyperLogLog const std::string kCmdNamePfAdd = "pfadd"; const std::string kCmdNamePfCount = "pfcount"; const std::string kCmdNamePfMerge = "pfmerge"; -//GEO +// GEO const std::string kCmdNameGeoAdd = "geoadd"; const std::string kCmdNameGeoPos = "geopos"; const std::string kCmdNameGeoDist = "geodist"; @@ -178,7 +224,7 @@ const std::string kCmdNameGeoHash = "geohash"; const std::string kCmdNameGeoRadius = "georadius"; const std::string kCmdNameGeoRadiusByMember = "georadiusbymember"; -//Pub/Sub +// Pub/Sub const std::string kCmdNamePublish = "publish"; const std::string kCmdNameSubscribe = "subscribe"; const std::string kCmdNameUnSubscribe = "unsubscribe"; @@ -186,76 +232,77 @@ const std::string kCmdNamePubSub = "pubsub"; const std::string kCmdNamePSubscribe = "psubscribe"; const std::string kCmdNamePUnSubscribe = "punsubscribe"; -//Codis Slots -const std::string kCmdNameSlotsInfo = "slotsinfo"; -const std::string kCmdNameSlotsHashKey = "slotshashkey"; -const std::string kCmdNameSlotsMgrtTagSlotAsync = "slotsmgrttagslot-async"; -const std::string kCmdNameSlotsMgrtSlotAsync = "slotsmgrtslot-async"; -const std::string kCmdNameSlotsDel = "slotsdel"; -const std::string kCmdNameSlotsScan = "slotsscan"; -const std::string kCmdNameSlotsMgrtExecWrapper = "slotsmgrt-exec-wrapper"; -const std::string kCmdNameSlotsMgrtAsyncStatus = "slotsmgrt-async-status"; -const std::string kCmdNameSlotsMgrtAsyncCancel = "slotsmgrt-async-cancel"; -const std::string kCmdNameSlotsMgrtSlot = "slotsmgrtslot"; -const std::string kCmdNameSlotsMgrtTagSlot = "slotsmgrttagslot"; -const std::string kCmdNameSlotsMgrtOne = "slotsmgrtone"; -const std::string kCmdNameSlotsMgrtTagOne = "slotsmgrttagone"; +// ACL +const std::string KCmdNameAcl = "acl"; - -//Cluster -const std::string kCmdNamePkClusterInfo = "pkclusterinfo"; -const std::string kCmdNamePkClusterAddSlots = "pkclusteraddslots"; -const std::string kCmdNamePkClusterDelSlots = "pkclusterdelslots"; -const std::string kCmdNamePkClusterSlotsSlaveof = "pkclusterslotsslaveof"; +// Stream +const std::string kCmdNameXAdd = "xadd"; +const std::string kCmdNameXDel = "xdel"; +const std::string kCmdNameXRead = "xread"; +const std::string kCmdNameXLen = "xlen"; +const std::string kCmdNameXRange = "xrange"; +const std::string kCmdNameXRevrange = "xrevrange"; +const std::string kCmdNameXTrim = "xtrim"; +const std::string kCmdNameXInfo = "xinfo"; const std::string kClusterPrefix = "pkcluster"; -typedef pink::RedisCmdArgsType PikaCmdArgsType; -static const int RAW_ARGS_LEN = 1024 * 1024; + + +/* + * If a type holds a key, a new data structure + * that uses the key will use this error + */ +constexpr const char* ErrTypeMessage = "Invalid argument: WRONGTYPE"; + +using PikaCmdArgsType = net::RedisCmdArgsType; +static const int RAW_ARGS_LEN = 1024 * 1024; enum CmdFlagsMask { - kCmdFlagsMaskRW = 1, - kCmdFlagsMaskType = 30, - kCmdFlagsMaskLocal = 32, - kCmdFlagsMaskSuspend = 64, - kCmdFlagsMaskPrior = 128, - kCmdFlagsMaskAdminRequire = 256, - kCmdFlagsMaskPartition = 1536 + kCmdFlagsMaskRW = 1, + kCmdFlagsMaskLocal = (1 << 1), + kCmdFlagsMaskSuspend = (1 << 2), + kCmdFlagsMaskReadCache = (1 << 3), + kCmdFlagsMaskAdminRequire = (1 << 4), + kCmdFlagsMaskUpdateCache = (1 << 5), + kCmdFlagsMaskDoThrouhDB = (1 << 6), }; enum CmdFlags { - kCmdFlagsRead = 0, //default rw - kCmdFlagsWrite = 1, - kCmdFlagsAdmin = 0, //default type - kCmdFlagsKv = 2, - kCmdFlagsHash = 4, - kCmdFlagsList = 6, - kCmdFlagsSet = 8, - kCmdFlagsZset = 10, - kCmdFlagsBit = 12, - kCmdFlagsHyperLogLog = 14, - kCmdFlagsGeo = 16, - kCmdFlagsPubSub = 18, - kCmdFlagsNoLocal = 0, //default nolocal - kCmdFlagsLocal = 32, - kCmdFlagsNoSuspend = 0, //default nosuspend - kCmdFlagsSuspend = 64, - kCmdFlagsNoPrior = 0, //default noprior - kCmdFlagsPrior = 128, - kCmdFlagsNoAdminRequire = 0, //default no need admin - kCmdFlagsAdminRequire = 256, - kCmdFlagsDoNotSpecifyPartition = 0, //default do not specify partition - kCmdFlagsSinglePartition = 512, - kCmdFlagsMultiPartition = 1024 + kCmdFlagsRead = 1, // default rw + kCmdFlagsWrite = (1 << 1), + kCmdFlagsAdmin = (1 << 2), // default type + kCmdFlagsKv = (1 << 3), + kCmdFlagsHash = (1 << 4), + kCmdFlagsList = (1 << 5), + kCmdFlagsSet = (1 << 6), + kCmdFlagsZset = (1 << 7), + kCmdFlagsBit = (1 << 8), + kCmdFlagsHyperLogLog = (1 << 9), + kCmdFlagsGeo = (1 << 10), + kCmdFlagsPubSub = (1 << 11), + kCmdFlagsLocal = (1 << 12), + kCmdFlagsSuspend = (1 << 13), + kCmdFlagsAdminRequire = (1 << 14), + kCmdFlagsNoAuth = (1 << 15), // command no auth can also be executed + kCmdFlagsReadCache = (1 << 16), + kCmdFlagsUpdateCache = (1 << 17), + kCmdFlagsDoThroughDB = (1 << 18), + kCmdFlagsOperateKey = (1 << 19), // redis keySpace + kCmdFlagsStream = (1 << 20), + kCmdFlagsFast = (1 << 21), + kCmdFlagsSlow = (1 << 22) }; - void inline RedisAppendContent(std::string& str, const std::string& value); -void inline RedisAppendLen(std::string& str, int64_t ori, const std::string &prefix); +void inline RedisAppendLen(std::string& str, int64_t ori, const std::string& prefix); +void inline RedisAppendLenUint64(std::string& str, uint64_t ori, const std::string& prefix) { + RedisAppendLen(str, static_cast(ori), prefix); +} const std::string kNewLine = "\r\n"; class CmdRes { -public: + public: enum CmdRet { kNone = 0, kOk, @@ -277,210 +324,318 @@ class CmdRes { kWrongNum, kInvalidIndex, kInvalidDbType, - kInvalidTable, + kInvalidDB, + kInconsistentHashTag, kErrOther, + kCacheMiss, + KIncrByOverFlow, + kInvalidTransaction, + kTxnQueued, + kTxnAbort, + kMultiKey, + kNoExists, }; - CmdRes():ret_(kNone) {} + CmdRes() = default; - bool none() const { - return ret_ == kNone && message_.empty(); - } - bool ok() const { - return ret_ == kOk || ret_ == kNone; - } + bool none() const { return ret_ == kNone && message_.empty(); } + bool noexist() const { return ret_ == kNoExists; } + bool ok() const { return ret_ == kOk || ret_ == kNone || ret_ == kNoExists; } + CmdRet ret() const { return ret_; } void clear() { message_.clear(); ret_ = kNone; } - std::string raw_message() const { - return message_; - } + bool CacheMiss() const { return ret_ == kCacheMiss; } + std::string raw_message() const { return message_; } std::string message() const { std::string result; switch (ret_) { - case kNone: - return message_; - case kOk: - return "+OK\r\n"; - case kPong: - return "+PONG\r\n"; - case kSyntaxErr: - return "-ERR syntax error\r\n"; - case kInvalidInt: - return "-ERR value is not an integer or out of range\r\n"; - case kInvalidBitInt: - return "-ERR bit is not an integer or out of range\r\n"; - case kInvalidBitOffsetInt: - return "-ERR bit offset is not an integer or out of range\r\n"; - case kWrongBitOpNotNum: - return "-ERR BITOP NOT must be called with a single source key.\r\n"; - - case kInvalidBitPosArgument: - return "-ERR The bit argument must be 1 or 0.\r\n"; - case kInvalidFloat: - return "-ERR value is not a valid float\r\n"; - case kOverFlow: - return "-ERR increment or decrement would overflow\r\n"; - case kNotFound: - return "-ERR no such key\r\n"; - case kOutOfRange: - return "-ERR index out of range\r\n"; - case kInvalidPwd: - return "-ERR invalid password\r\n"; - case kNoneBgsave: - return "-ERR No BGSave Works now\r\n"; - case kPurgeExist: - return "-ERR binlog already in purging...\r\n"; - case kInvalidParameter: - return "-ERR Invalid Argument\r\n"; - case kWrongNum: - result = "-ERR wrong number of arguments for '"; - result.append(message_); - result.append("' command\r\n"); - break; - case kInvalidIndex: - result = "-ERR invalid DB index for '"; - result.append(message_); - result.append("'\r\n"); - break; - case kInvalidDbType: - result = "-ERR invalid DB for '"; - result.append(message_); - result.append("'\r\n"); - break; - case kInvalidTable: - result = "-ERR invalid Table for '"; - result.append(message_); - result.append("'\r\n"); - break; - case kErrOther: - result = "-ERR "; - result.append(message_); - result.append(kNewLine); - break; - default: - break; + case kNone: + return message_; + case kOk: + return "+OK\r\n"; + case kPong: + return "+PONG\r\n"; + case kSyntaxErr: + return "-ERR syntax error\r\n"; + case kInvalidInt: + return "-ERR value is not an integer or out of range\r\n"; + case kInvalidBitInt: + return "-ERR bit is not an integer or out of range\r\n"; + case kInvalidBitOffsetInt: + return "-ERR bit offset is not an integer or out of range\r\n"; + case kWrongBitOpNotNum: + return "-ERR BITOP NOT must be called with a single source key.\r\n"; + case kInvalidBitPosArgument: + return "-ERR The bit argument must be 1 or 0.\r\n"; + case kInvalidFloat: + return "-ERR value is not a valid float\r\n"; + case kOverFlow: + return "-ERR increment or decrement would overflow\r\n"; + case kNotFound: + return "-ERR no such key\r\n"; + case kOutOfRange: + return "-ERR index out of range\r\n"; + case kInvalidPwd: + return "-ERR invalid password\r\n"; + case kNoneBgsave: + return "-ERR No BGSave Works now\r\n"; + case kPurgeExist: + return "-ERR binlog already in purging...\r\n"; + case kInvalidParameter: + return "-ERR Invalid Argument\r\n"; + case kWrongNum: + result = "-ERR wrong number of arguments for '"; + result.append(message_); + result.append("' command\r\n"); + break; + case kInvalidIndex: + result = "-ERR invalid DB index for '"; + result.append(message_); + result.append("'\r\n"); + break; + case kInvalidDbType: + result = "-ERR invalid DB for '"; + result.append(message_); + result.append("'\r\n"); + break; + case kInconsistentHashTag: + return "-ERR parameters hashtag is inconsistent\r\n"; + case kInvalidDB: + result = "-ERR invalid DB for '"; + result.append(message_); + result.append("'\r\n"); + break; + case kInvalidTransaction: + return "-ERR WATCH inside MULTI is not allowed\r\n"; + case kTxnQueued: + result = "+QUEUED"; + result.append("\r\n"); + break; + case kTxnAbort: + result = "-EXECABORT "; + result.append(message_); + result.append(kNewLine); + break; + case kErrOther: + result = "-ERR "; + result.append(message_); + result.append(kNewLine); + break; + case KIncrByOverFlow: + result = "-ERR increment would produce NaN or Infinity"; + result.append(message_); + result.append(kNewLine); + break; + case kMultiKey: + result = "-WRONGTYPE Operation against a key holding the wrong kind of value"; + result.append(kNewLine); + break; + case kNoExists: + return message_; + default: + break; } return result; } // Inline functions for Create Redis protocol - void AppendStringLen(int64_t ori) { - RedisAppendLen(message_, ori, "$"); - } - void AppendArrayLen(int64_t ori) { - RedisAppendLen(message_, ori, "*"); - } - void AppendInteger(int64_t ori) { - RedisAppendLen(message_, ori, ":"); - } - void AppendContent(const std::string& value) { - RedisAppendContent(message_, value); - } + void AppendStringLen(int64_t ori) { RedisAppendLen(message_, ori, "$"); } + void AppendStringLenUint64(uint64_t ori) { RedisAppendLenUint64(message_, ori, "$"); } + void AppendArrayLen(int64_t ori) { RedisAppendLen(message_, ori, "*"); } + void AppendArrayLenUint64(uint64_t ori) { RedisAppendLenUint64(message_, ori, "*"); } + void AppendInteger(int64_t ori) { RedisAppendLen(message_, ori, ":"); } + void AppendContent(const std::string& value) { RedisAppendContent(message_, value); } void AppendString(const std::string& value) { - AppendStringLen(value.size()); + AppendStringLenUint64(value.size()); AppendContent(value); } - void AppendStringRaw(const std::string& value) { - message_.append(value); + void AppendStringRaw(const std::string& value) { message_.append(value); } + + void AppendStringVector(const std::vector& strArray) { + if (strArray.empty()) { + AppendArrayLen(0); + return; + } + AppendArrayLen(strArray.size()); + for (const auto& item : strArray) { + AppendString(item); + } } - void SetRes(CmdRet _ret, const std::string content = "") { + + void SetRes(CmdRet _ret, const std::string& content = "") { ret_ = _ret; if (!content.empty()) { message_ = content; } } -private: + private: std::string message_; - CmdRet ret_; + CmdRet ret_ = kNone; +}; + +/** + * Current used by: + * blpop,brpop + */ +struct UnblockTaskArgs { + std::string key; + std::shared_ptr db; + net::DispatchThread* dispatchThread{ nullptr }; + UnblockTaskArgs(std::string key_, std::shared_ptr db_, net::DispatchThread* dispatchThread_) + : key(std::move(key_)), db(db_), dispatchThread(dispatchThread_) {} }; -class Cmd { +class PikaClientConn; + +class Cmd : public std::enable_shared_from_this { public: - Cmd(const std::string& name, int arity, uint16_t flag) - : name_(name), arity_(arity), flag_(flag) {} - virtual ~Cmd() {} + friend class PikaClientConn; + enum CmdStage { kNone, kBinlogStage, kExecuteStage }; + struct HintKeys { + HintKeys() = default; + + bool empty() const { return keys.empty() && hints.empty(); } + std::vector keys; + std::vector hints; + }; + struct ProcessArg { + ProcessArg() = default; + ProcessArg(std::shared_ptr _db, std::shared_ptr _sync_db, HintKeys _hint_keys) + : db(std::move(_db)), sync_db(std::move(_sync_db)), hint_keys(std::move(_hint_keys)) {} + std::shared_ptr db; + std::shared_ptr sync_db; + HintKeys hint_keys; + }; + struct CommandStatistics { + CommandStatistics() = default; + CommandStatistics(const CommandStatistics& other) { + cmd_time_consuming.store(other.cmd_time_consuming.load()); + cmd_count.store(other.cmd_count.load()); + } + std::atomic cmd_count = {0}; + std::atomic cmd_time_consuming = {0}; + }; + CommandStatistics state; + Cmd(std::string name, int arity, uint32_t flag, uint32_t aclCategory = 0); + virtual ~Cmd() = default; virtual std::vector current_key() const; virtual void Execute(); - virtual void ProcessFlushDBCmd(); - virtual void ProcessFlushAllCmd(); - virtual void ProcessSinglePartitionCmd(); - virtual void ProcessMultiPartitionCmd(); - virtual void ProcessDoNotSpecifyPartitionCmd(); - virtual void Do(std::shared_ptr partition = nullptr) = 0; + virtual void Do() {}; + virtual void DoThroughDB() {} + virtual void DoUpdateCache() {} + virtual void ReadCache() {} virtual Cmd* Clone() = 0; - - void Initial(const PikaCmdArgsType& argv, - const std::string& table_name); - - bool is_write() const; - bool is_local() const; - bool is_suspend() const; - bool is_admin_require() const; - bool is_single_partition() const; - bool is_multi_partition() const; + // used for execute multikey command into different slots + virtual void Split(const HintKeys& hint_keys) = 0; + virtual void Merge() = 0; + virtual bool IsTooLargeKey(const int &max_sz) { return false; } + + int8_t SubCmdIndex(const std::string& cmdName); // if the command no subCommand,return -1; + + void Initial(const PikaCmdArgsType& argv, const std::string& db_name); + uint32_t flag() const; + bool hasFlag(uint32_t flag) const; + bool is_read() const; + bool is_write() const; + bool isCacheRead() const; + + bool IsLocal() const; + bool IsSuspend() const; + bool IsAdmin() const; + bool HasSubCommand() const; // The command is there a sub command + std::vector SubCommand() const; // Get command is there a sub command + bool IsNeedUpdateCache() const; + bool IsNeedReadCache() const; + bool IsNeedCacheDo() const; + bool HashtagIsConsistent(const std::string& lhs, const std::string& rhs) const; + uint64_t GetDoDuration() const { return do_duration_; }; + std::shared_ptr GetDB() const { return db_; }; + uint32_t AclCategory() const; + void AddAclCategory(uint32_t aclCategory); + void SetDbName(const std::string& db_name) { db_name_ = db_name; } + std::string GetDBName() { return db_name_; } std::string name() const; CmdRes& res(); + std::string db_name() const; + PikaCmdArgsType& argv(); + virtual std::string ToRedisProtocol(); - virtual std::string ToBinlog(uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset); + void SetConn(const std::shared_ptr& conn); + std::shared_ptr GetConn(); - void SetConn(const std::shared_ptr conn); - std::shared_ptr GetConn(); + void SetResp(const std::shared_ptr& resp); + std::shared_ptr GetResp(); + + void SetStage(CmdStage stage); + void SetCmdId(uint32_t cmdId){cmdId_ = cmdId;} + + virtual void DoBinlog(); + + uint32_t GetCmdId() const { return cmdId_; }; + bool CheckArg(uint64_t num) const; + + bool IsCacheMissedInRtc() const; + void SetCacheMissedInRtc(bool value); protected: // enable copy, used default copy - //Cmd(const Cmd&); - void ProcessCommand(std::shared_ptr partition); - void DoCommand(std::shared_ptr partition); - void DoBinlog(std::shared_ptr partition); - bool CheckArg(int num) const; + // Cmd(const Cmd&); + void ProcessCommand(const HintKeys& hint_key = HintKeys()); + void InternalProcessCommand(const HintKeys& hint_key); + void DoCommand(const HintKeys& hint_key); + bool DoReadCommandInCache(); void LogCommand() const; std::string name_; - int arity_; - uint16_t flag_; + int arity_ = -2; + uint32_t flag_ = 0; + std::vector subCmdName_; // sub command name, may be empty + + protected: CmdRes res_; PikaCmdArgsType argv_; - std::string table_name_; - - std::weak_ptr conn_; + std::string db_name_; + rocksdb::Status s_; + std::shared_ptr db_; + std::shared_ptr sync_db_; + std::weak_ptr conn_; + std::weak_ptr resp_; + CmdStage stage_ = kNone; + uint64_t do_duration_ = 0; + uint32_t cmdId_ = 0; + uint32_t aclCategory_ = 0; + bool cache_missed_in_rtc_{false}; private: virtual void DoInitial() = 0; - virtual void Clear() {}; + virtual void Clear(){}; Cmd& operator=(const Cmd&); }; -typedef std::unordered_map CmdTable; +using CmdTable = std::unordered_map>; // Method for Cmd Table void InitCmdTable(CmdTable* cmd_table); -Cmd* GetCmdFromTable(const std::string& opt, const CmdTable& cmd_table); -void DestoryCmdTable(CmdTable* cmd_table); +Cmd* GetCmdFromDB(const std::string& opt, const CmdTable& cmd_table); void RedisAppendContent(std::string& str, const std::string& value) { str.append(value.data(), value.size()); str.append(kNewLine); } -void RedisAppendLen(std::string& str, int64_t ori, const std::string &prefix) { +void RedisAppendLen(std::string& str, int64_t ori, const std::string& prefix) { char buf[32]; - slash::ll2string(buf, 32, static_cast(ori)); + pstd::ll2string(buf, 32, static_cast(ori)); str.append(prefix); str.append(buf); str.append(kNewLine); } -void TryAliasChange(std::vector* argv); - #endif diff --git a/tools/pika_migrate/include/pika_conf.h b/tools/pika_migrate/include/pika_conf.h index 83149be514..19ef33afde 100644 --- a/tools/pika_migrate/include/pika_conf.h +++ b/tools/pika_migrate/include/pika_conf.h @@ -6,314 +6,1070 @@ #ifndef PIKA_CONF_H_ #define PIKA_CONF_H_ +#include #include #include #include -#include -#include "slash/include/base_conf.h" -#include "slash/include/slash_mutex.h" -#include "slash/include/slash_string.h" +#include "rocksdb/compression_type.h" +#include "pstd/include/base_conf.h" +#include "pstd/include/pstd_mutex.h" +#include "pstd/include/pstd_string.h" + +#include "acl.h" #include "include/pika_define.h" -#include "include/pika_meta.h" +#include "rocksdb/compression_type.h" #define kBinlogReadWinDefaultSize 9000 #define kBinlogReadWinMaxSize 90000 - -typedef slash::RWLock RWLock; +const uint32_t configRunIDSize = 40; +const uint32_t configReplicationIDSize = 50; // global class, class members well initialized -class PikaConf : public slash::BaseConf { +class PikaConf : public pstd::BaseConf { public: + enum CompactionStrategy { + NONE, + FullCompact, + OldestOrBestDeleteRatioSstCompact + }; PikaConf(const std::string& path); - ~PikaConf(); + ~PikaConf() override = default; // Getter - int port() { RWLock l(&rwlock_, false); return port_; } - std::string slaveof() { RWLock l(&rwlock_, false); return slaveof_;} - int slave_priority() { RWLock l(&rwlock_, false); return slave_priority_;} - bool write_binlog() { RWLock l(&rwlock_, false); return write_binlog_;} - int thread_num() { RWLock l(&rwlock_, false); return thread_num_; } - int thread_pool_size() { RWLock l(&rwlock_, false); return thread_pool_size_; } - int sync_thread_num() { RWLock l(&rwlock_, false); return sync_thread_num_; } - std::string log_path() { RWLock l(&rwlock_, false); return log_path_; } - std::string db_path() { RWLock l(&rwlock_, false); return db_path_; } - std::string db_sync_path() { RWLock l(&rwlock_, false); return db_sync_path_; } - int db_sync_speed() { RWLock l(&rwlock_, false); return db_sync_speed_; } - std::string compact_cron() { RWLock l(&rwlock_, false); return compact_cron_; } - std::string compact_interval() { RWLock l(&rwlock_, false); return compact_interval_; } - int64_t write_buffer_size() { RWLock l(&rwlock_, false); return write_buffer_size_; } - int64_t max_write_buffer_size() { RWLock l(&rwlock_, false); return max_write_buffer_size_; } - int64_t max_client_response_size() { RWLock L(&rwlock_, false); return max_client_response_size_;} - int timeout() { RWLock l(&rwlock_, false); return timeout_; } - std::string server_id() { RWLock l(&rwlock_, false); return server_id_; } - std::string requirepass() { RWLock l(&rwlock_, false); return requirepass_; } - std::string masterauth() { RWLock l(&rwlock_, false); return masterauth_; } - std::string bgsave_path() { RWLock l(&rwlock_, false); return bgsave_path_; } - int expire_dump_days() { RWLock l(&rwlock_, false); return expire_dump_days_; } - std::string bgsave_prefix() { RWLock l(&rwlock_, false); return bgsave_prefix_; } - std::string userpass() { RWLock l(&rwlock_, false); return userpass_; } - const std::string suser_blacklist() { RWLock l(&rwlock_, false); return slash::StringConcat(user_blacklist_, COMMA); } - const std::vector& vuser_blacklist() { RWLock l(&rwlock_, false); return user_blacklist_;} - bool classic_mode() { return classic_mode_.load();} - int databases() { RWLock l(&rwlock_, false); return databases_;} - int default_slot_num() { RWLock l(&rwlock_, false); return default_slot_num_;} - const std::vector& table_structs() { RWLock l(&rwlock_, false); return table_structs_; } - std::string default_table() { RWLock l(&rwlock_, false); return default_table_;} - std::string compression() { RWLock l(&rwlock_, false); return compression_; } - int target_file_size_base() { RWLock l(&rwlock_, false); return target_file_size_base_; } - int max_cache_statistic_keys() { RWLock l(&rwlock_, false); return max_cache_statistic_keys_;} - int small_compaction_threshold() { RWLock l(&rwlock_, false); return small_compaction_threshold_;} - int max_background_flushes() { RWLock l(&rwlock_, false); return max_background_flushes_; } - int max_background_compactions() { RWLock l(&rwlock_, false); return max_background_compactions_; } - int max_cache_files() { RWLock l(&rwlock_, false); return max_cache_files_; } - int max_bytes_for_level_multiplier() { RWLock l(&rwlock_, false); return max_bytes_for_level_multiplier_; } - int64_t block_size() { RWLock l(&rwlock_, false); return block_size_; } - int64_t block_cache() { RWLock l(&rwlock_, false); return block_cache_; } - bool share_block_cache() { RWLock l(&rwlock_, false); return share_block_cache_; } - bool cache_index_and_filter_blocks() { RWLock l(&rwlock_, false); return cache_index_and_filter_blocks_; } - bool optimize_filters_for_hits() { RWLock l(&rwlock_, false); return optimize_filters_for_hits_; } - bool level_compaction_dynamic_level_bytes() { RWLock l(&rwlock_, false); return level_compaction_dynamic_level_bytes_; } - int expire_logs_nums() { RWLock l(&rwlock_, false); return expire_logs_nums_; } - int expire_logs_days() { RWLock l(&rwlock_, false); return expire_logs_days_; } - std::string conf_path() { RWLock l(&rwlock_, false); return conf_path_; } - bool slave_read_only() { RWLock l(&rwlock_, false); return slave_read_only_; } - int maxclients() { RWLock l(&rwlock_, false); return maxclients_; } - int root_connection_num() { RWLock l(&rwlock_, false); return root_connection_num_; } - bool slowlog_write_errorlog() { return slowlog_write_errorlog_.load();} - int slowlog_slower_than() { return slowlog_log_slower_than_.load(); } - int slowlog_max_len() { RWLock L(&rwlock_, false); return slowlog_max_len_; } - std::string network_interface() { RWLock l(&rwlock_, false); return network_interface_; } - int sync_window_size() { return sync_window_size_.load(); } + int port() { + std::shared_lock l(rwlock_); + return port_; + } + std::string slaveof() { + std::shared_lock l(rwlock_); + return slaveof_; + } + int slave_priority() { + std::shared_lock l(rwlock_); + return slave_priority_; + } + bool write_binlog() { + std::shared_lock l(rwlock_); + return write_binlog_; + } + int thread_num() { + std::shared_lock l(rwlock_); + return thread_num_; + } + int thread_pool_size() { + std::shared_lock l(rwlock_); + return thread_pool_size_; + } + int slow_cmd_thread_pool_size() { + std::shared_lock l(rwlock_); + return slow_cmd_thread_pool_size_; + } + int admin_thread_pool_size() { + std::shared_lock l(rwlock_); + return admin_thread_pool_size_; + } + int sync_thread_num() { + std::shared_lock l(rwlock_); + return sync_thread_num_; + } + int sync_binlog_thread_num() { + std::shared_lock l(rwlock_); + return sync_binlog_thread_num_; + } + std::string log_path() { + std::shared_lock l(rwlock_); + return log_path_; + } + int log_retention_time() { + std::shared_lock l(rwlock_); + return log_retention_time_; + } + std::string log_level() { + std::shared_lock l(rwlock_); + return log_level_; + } + std::string db_path() { + std::shared_lock l(rwlock_); + return db_path_; + } + int db_instance_num() { + return db_instance_num_; + } + uint64_t rocksdb_ttl_second() { + return rocksdb_ttl_second_.load(); + } + uint64_t rocksdb_periodic_compaction_second() { + return rocksdb_periodic_second_.load(); + } + std::string db_sync_path() { + std::shared_lock l(rwlock_); + return db_sync_path_; + } + int db_sync_speed() { + std::shared_lock l(rwlock_); + return db_sync_speed_; + } + std::string compact_cron() { + std::shared_lock l(rwlock_); + return compact_cron_; + } + std::string compact_interval() { + std::shared_lock l(rwlock_); + return compact_interval_; + } + int max_subcompactions() { + std::shared_lock l(rwlock_); + return max_subcompactions_; + } + int compact_every_num_of_files() { + std::shared_lock l(rwlock_); + return compact_every_num_of_files_; + } + int force_compact_file_age_seconds() { + std::shared_lock l(rwlock_); + return force_compact_file_age_seconds_; + } + int force_compact_min_delete_ratio() { + std::shared_lock l(rwlock_); + return force_compact_min_delete_ratio_; + } + int dont_compact_sst_created_in_seconds() { + std::shared_lock l(rwlock_); + return dont_compact_sst_created_in_seconds_; + } + int best_delete_min_ratio() { + std::shared_lock l(rwlock_); + return best_delete_min_ratio_; + } + CompactionStrategy compaction_strategy() { + std::shared_lock l(rwlock_); + return compaction_strategy_; + } + bool disable_auto_compactions() { + std::shared_lock l(rwlock_); + return disable_auto_compactions_; + } + int64_t least_resume_free_disk_size() { + std::shared_lock l(rwlock_); + return least_free_disk_to_resume_; + } + int64_t resume_interval() { + std::shared_lock l(rwlock_); + return resume_check_interval_; + } + double min_check_resume_ratio() { + std::shared_lock l(rwlock_); + return min_check_resume_ratio_; + } + int64_t write_buffer_size() { + std::shared_lock l(rwlock_); + return write_buffer_size_; + } + int min_write_buffer_number_to_merge() { + std::shared_lock l(rwlock_); + return min_write_buffer_number_to_merge_; + } + int level0_stop_writes_trigger() { + std::shared_lock l(rwlock_); + return level0_stop_writes_trigger_; + } + int level0_slowdown_writes_trigger() { + std::shared_lock l(rwlock_); + return level0_slowdown_writes_trigger_; + } + int level0_file_num_compaction_trigger() { + std::shared_lock l(rwlock_); + return level0_file_num_compaction_trigger_; + } + int64_t arena_block_size() { + std::shared_lock l(rwlock_); + return arena_block_size_; + } + int64_t slotmigrate_thread_num() { + std::shared_lock l(rwlock_); + return slotmigrate_thread_num_; + } + int64_t thread_migrate_keys_num() { + std::shared_lock l(rwlock_); + return thread_migrate_keys_num_; + } + int64_t max_write_buffer_size() { + std::shared_lock l(rwlock_); + return max_write_buffer_size_; + } + int max_write_buffer_number() { + std::shared_lock l(rwlock_); + return max_write_buffer_num_; + } + uint64_t MaxTotalWalSize() { + std::shared_lock l(rwlock_); + return max_total_wal_size_; + } + bool enable_db_statistics() { + return enable_db_statistics_; + } + int db_statistics_level() { + std::shared_lock l(rwlock_); + return db_statistics_level_; + } + int64_t max_client_response_size() { + std::shared_lock l(rwlock_); + return max_client_response_size_; + } + int timeout() { + std::shared_lock l(rwlock_); + return timeout_; + } + int binlog_writer_num() { + std::shared_lock l(rwlock_); + return binlog_writer_num_; + } + bool slotmigrate() { + std::shared_lock l(rwlock_); + return slotmigrate_; + } + bool slow_cmd_pool() { + std::shared_lock l(rwlock_); + return slow_cmd_pool_; + } + std::string server_id() { + std::shared_lock l(rwlock_); + return server_id_; + } + std::string run_id() { + std::shared_lock l(rwlock_); + return run_id_; + } + std::string replication_id() { + std::shared_lock l(rwlock_); + return replication_id_; + } + std::string requirepass() { + std::shared_lock l(rwlock_); + return requirepass_; + } + std::string masterauth() { + std::shared_lock l(rwlock_); + return masterauth_; + } + std::string userpass() { + std::shared_lock l(rwlock_); + return userpass_; + } + std::string bgsave_path() { + std::shared_lock l(rwlock_); + return bgsave_path_; + } + int expire_dump_days() { + std::shared_lock l(rwlock_); + return expire_dump_days_; + } + std::string bgsave_prefix() { + std::shared_lock l(rwlock_); + return bgsave_prefix_; + } + std::string user_blacklist_string() { + std::shared_lock l(rwlock_); + return pstd::StringConcat(user_blacklist_, COMMA); + } + const std::vector& user_blacklist_vector() { + std::shared_lock l(rwlock_); + return user_blacklist_; + } + bool classic_mode() { return classic_mode_.load(); } + int databases() { + std::shared_lock l(rwlock_); + return databases_; + } + int default_slot_num() { + std::shared_lock l(rwlock_); + return default_slot_num_; + } + const std::vector& db_structs() { + std::shared_lock l(rwlock_); + return db_structs_; + } + std::string default_db() { + std::shared_lock l(rwlock_); + return default_db_; + } + std::string compression() { + std::shared_lock l(rwlock_); + return compression_; + } + int64_t target_file_size_base() { + std::shared_lock l(rwlock_); + return target_file_size_base_; + } + + uint64_t max_compaction_bytes() { + std::shared_lock l(rwlock_); + return static_cast(max_compaction_bytes_); + } + + int max_cache_statistic_keys() { + std::shared_lock l(rwlock_); + return max_cache_statistic_keys_; + } + int small_compaction_threshold() { + std::shared_lock l(rwlock_); + return small_compaction_threshold_; + } + int small_compaction_duration_threshold() { + std::shared_lock l(rwlock_); + return small_compaction_duration_threshold_; + } + int max_background_flushes() { + std::shared_lock l(rwlock_); + return max_background_flushes_; + } + int max_background_compactions() { + std::shared_lock l(rwlock_); + return max_background_compactions_; + } + int max_background_jobs() { + std::shared_lock l(rwlock_); + return max_background_jobs_; + } + uint64_t delayed_write_rate(){ + std::shared_lock l(rwlock_); + return static_cast(delayed_write_rate_); + } + int max_cache_files() { + std::shared_lock l(rwlock_); + return max_cache_files_; + } + int max_bytes_for_level_multiplier() { + std::shared_lock l(rwlock_); + return max_bytes_for_level_multiplier_; + } + int64_t block_size() { + std::shared_lock l(rwlock_); + return block_size_; + } + int64_t block_cache() { + std::shared_lock l(rwlock_); + return block_cache_; + } + int64_t num_shard_bits() { + std::shared_lock l(rwlock_); + return num_shard_bits_; + } + bool share_block_cache() { + std::shared_lock l(rwlock_); + return share_block_cache_; + } + bool wash_data() { + std::shared_lock l(rwlock_); + return wash_data_; + } + bool enable_partitioned_index_filters() { + std::shared_lock l(rwlock_); + return enable_partitioned_index_filters_; + } + bool cache_index_and_filter_blocks() { + std::shared_lock l(rwlock_); + return cache_index_and_filter_blocks_; + } + bool pin_l0_filter_and_index_blocks_in_cache() { + std::shared_lock l(rwlock_); + return pin_l0_filter_and_index_blocks_in_cache_; + } + bool optimize_filters_for_hits() { + std::shared_lock l(rwlock_); + return optimize_filters_for_hits_; + } + bool level_compaction_dynamic_level_bytes() { + std::shared_lock l(rwlock_); + return level_compaction_dynamic_level_bytes_; + } + int expire_logs_nums() { + std::shared_lock l(rwlock_); + return expire_logs_nums_; + } + int expire_logs_days() { + std::shared_lock l(rwlock_); + return expire_logs_days_; + } + std::string conf_path() { + std::shared_lock l(rwlock_); + return conf_path_; + } + bool slave_read_only() { + std::shared_lock l(rwlock_); + return slave_read_only_; + } + int maxclients() { + std::shared_lock l(rwlock_); + return maxclients_; + } + int root_connection_num() { + std::shared_lock l(rwlock_); + return root_connection_num_; + } + bool slowlog_write_errorlog() { return slowlog_write_errorlog_.load(); } + int slowlog_slower_than() { return slowlog_log_slower_than_.load(); } + int slowlog_max_len() { + std::shared_lock l(rwlock_); + return slowlog_max_len_; + } + std::string network_interface() { + std::shared_lock l(rwlock_); + return network_interface_; + } + int cache_mode() { return cache_mode_; } + int sync_window_size() { return sync_window_size_.load(); } + int max_conn_rbuf_size() { return max_conn_rbuf_size_.load(); } + int consensus_level() { return consensus_level_.load(); } + int replication_num() { return replication_num_.load(); } std::string target_redis_host() { return target_redis_host_; } int target_redis_port() { return target_redis_port_; } std::string target_redis_pwd() { return target_redis_pwd_; } int sync_batch_num() { return sync_batch_num_; } int redis_sender_num() { return redis_sender_num_; } + + int rate_limiter_mode() { + std::shared_lock l(rwlock_); + return rate_limiter_mode_; + } + int64_t rate_limiter_bandwidth() { + std::shared_lock l(rwlock_); + return rate_limiter_bandwidth_; + } + int64_t rate_limiter_refill_period_us() { + std::shared_lock l(rwlock_); + return rate_limiter_refill_period_us_; + } + int64_t rate_limiter_fairness() { + std::shared_lock l(rwlock_); + return rate_limiter_fairness_; + } + bool rate_limiter_auto_tuned() { + std::shared_lock l(rwlock_); + return rate_limiter_auto_tuned_; + } + bool IsCacheDisabledTemporarily() { return tmp_cache_disable_flag_; } + int GetCacheString() { return cache_string_; } + int GetCacheSet() { return cache_set_; } + int GetCacheZset() { return cache_zset_; } + int GetCacheHash() { return cache_hash_; } + int GetCacheList() { return cache_list_; } + int GetCacheBit() { return cache_bit_; } + int GetCacheNum() { return cache_num_; } + void SetCacheNum(const int value) { cache_num_ = value; } + void SetCacheMode(const int value) { cache_mode_ = value; } + void SetCacheStartDirection(const int value) { zset_cache_start_direction_ = value; } + void SetCacheItemsPerKey(const int value) { zset_cache_field_num_per_key_ = value; } + void SetCacheMaxKeySize(const int value) { max_key_size_in_cache_ = value; } + void SetCacheMaxmemory(const int64_t value) { cache_maxmemory_ = value; } + void SetCacheMaxmemoryPolicy(const int value) { cache_maxmemory_policy_ = value; } + void SetCacheMaxmemorySamples(const int value) { cache_maxmemory_samples_ = value; } + void SetCacheLFUDecayTime(const int value) { cache_lfu_decay_time_ = value; } + void UnsetCacheDisableFlag() { tmp_cache_disable_flag_ = false; } + bool enable_blob_files() { return enable_blob_files_; } + int64_t min_blob_size() { return min_blob_size_; } + int64_t blob_file_size() { return blob_file_size_; } + std::string blob_compression_type() { return blob_compression_type_; } + bool enable_blob_garbage_collection() { return enable_blob_garbage_collection_; } + double blob_garbage_collection_age_cutoff() { return blob_garbage_collection_age_cutoff_; } + double blob_garbage_collection_force_threshold() { return blob_garbage_collection_force_threshold_; } + int64_t blob_cache() { return blob_cache_; } + int64_t blob_num_shard_bits() { return blob_num_shard_bits_; } + + // Rsync Rate limiting configuration + int throttle_bytes_per_second() { + std::shared_lock l(rwlock_); + return throttle_bytes_per_second_; + } + int max_rsync_parallel_num() { + std::shared_lock l(rwlock_); + return max_rsync_parallel_num_; + } + int64_t rsync_timeout_ms() { + return rsync_timeout_ms_.load(std::memory_order::memory_order_relaxed); + } + + // Slow Commands configuration + const std::string GetSlowCmd() { + std::shared_lock l(rwlock_); + return pstd::Set2String(slow_cmd_set_, ','); + } + + // Admin Commands configuration + const std::string GetAdminCmd() { + std::shared_lock l(rwlock_); + return pstd::Set2String(admin_cmd_set_, ','); + } + + const std::string GetUserBlackList() { + std::shared_lock l(rwlock_); + return userblacklist_; + } + + bool is_slow_cmd(const std::string& cmd) { + std::shared_lock l(rwlock_); + return slow_cmd_set_.find(cmd) != slow_cmd_set_.end(); + } + + bool is_admin_cmd(const std::string& cmd) { + return admin_cmd_set_.find(cmd) != admin_cmd_set_.end(); + } // Immutable config items, we don't use lock. - bool daemonize() { return daemonize_; } - std::string pidfile() { return pidfile_; } - int binlog_file_size() { return binlog_file_size_; } + bool daemonize() { return daemonize_; } + bool rtc_cache_read_enabled() { return rtc_cache_read_enabled_; } + std::string pidfile() { return pidfile_; } + int binlog_file_size() { return binlog_file_size_; } + std::vector compression_per_level(); + std::string compression_all_levels() const { return compression_per_level_; }; + static rocksdb::CompressionType GetCompression(const std::string& value); + + std::vector& users() { return users_; }; + std::string acl_file() { return aclFile_; }; + + uint32_t acl_pubsub_default() { return acl_pubsub_default_.load(); } + uint32_t acl_log_max_len() { return acl_Log_max_len_.load(); } // Setter void SetPort(const int value) { - RWLock l(&rwlock_, true); + std::lock_guard l(rwlock_); port_ = value; } void SetThreadNum(const int value) { - RWLock l(&rwlock_, true); + std::lock_guard l(rwlock_); thread_num_ = value; } void SetTimeout(const int value) { - RWLock l(&rwlock_, true); + std::lock_guard l(rwlock_); TryPushDiffCommands("timeout", std::to_string(value)); timeout_ = value; } void SetThreadPoolSize(const int value) { - RWLock l(&rwlock_, true); + std::lock_guard l(rwlock_); thread_pool_size_ = value; } - void SetSlaveof(const std::string value) { - RWLock l(&rwlock_, true); + + void SetLowLevelThreadPoolSize(const int value) { + std::lock_guard l(rwlock_); + slow_cmd_thread_pool_size_ = value; + } + + void SetAdminThreadPoolSize(const int value) { + std::lock_guard l(rwlock_); + admin_thread_pool_size_ = value; + } + + void SetSlaveof(const std::string& value) { + std::lock_guard l(rwlock_); TryPushDiffCommands("slaveof", value); slaveof_ = value; } + + void SetRocksdbTTLSecond(uint64_t ttl) { + rocksdb_ttl_second_.store(ttl); + } + + void SetRocksdbPeriodicSecond(uint64_t value) { + rocksdb_periodic_second_.store(value); + } + + void SetReplicationID(const std::string& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("replication-id", value); + replication_id_ = value; + } void SetSlavePriority(const int value) { - RWLock l(&rwlock_, true); + std::lock_guard l(rwlock_); TryPushDiffCommands("slave-priority", std::to_string(value)); slave_priority_ = value; } void SetWriteBinlog(const std::string& value) { - RWLock l(&rwlock_, true); + std::lock_guard l(rwlock_); TryPushDiffCommands("write-binlog", value); - write_binlog_ = (value == "yes") ? true : false; + write_binlog_ = value == "yes"; } void SetMaxCacheStatisticKeys(const int value) { - RWLock l(&rwlock_, true); + std::lock_guard l(rwlock_); TryPushDiffCommands("max-cache-statistic-keys", std::to_string(value)); max_cache_statistic_keys_ = value; } void SetSmallCompactionThreshold(const int value) { - RWLock l(&rwlock_, true); + std::lock_guard l(rwlock_); TryPushDiffCommands("small-compaction-threshold", std::to_string(value)); small_compaction_threshold_ = value; } + void SetSmallCompactionDurationThreshold(const int value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("small-compaction-duration-threshold", std::to_string(value)); + small_compaction_duration_threshold_ = value; + } void SetMaxClientResponseSize(const int value) { - RWLock l(&rwlock_, true); + std::lock_guard l(rwlock_); TryPushDiffCommands("max-client-response-size", std::to_string(value)); max_client_response_size_ = value; } - void SetBgsavePath(const std::string &value) { - RWLock l(&rwlock_, true); + void SetBgsavePath(const std::string& value) { + std::lock_guard l(rwlock_); bgsave_path_ = value; if (value[value.length() - 1] != '/') { bgsave_path_ += "/"; } } void SetExpireDumpDays(const int value) { - RWLock l(&rwlock_, true); + std::lock_guard l(rwlock_); TryPushDiffCommands("dump-expire", std::to_string(value)); expire_dump_days_ = value; } - void SetBgsavePrefix(const std::string &value) { - RWLock l(&rwlock_, true); + void SetBgsavePrefix(const std::string& value) { + std::lock_guard l(rwlock_); TryPushDiffCommands("dump-prefix", value); bgsave_prefix_ = value; } - void SetRequirePass(const std::string &value) { - RWLock l(&rwlock_, true); + void SetRunID(const std::string& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("run-id", value); + run_id_ = value; + } + void SetRequirePass(const std::string& value) { + std::lock_guard l(rwlock_); TryPushDiffCommands("requirepass", value); requirepass_ = value; } - void SetMasterAuth(const std::string &value) { - RWLock l(&rwlock_, true); + void SetMasterAuth(const std::string& value) { + std::lock_guard l(rwlock_); TryPushDiffCommands("masterauth", value); masterauth_ = value; } - void SetUserPass(const std::string &value) { - RWLock l(&rwlock_, true); + void SetUserPass(const std::string& value) { + std::lock_guard l(rwlock_); TryPushDiffCommands("userpass", value); userpass_ = value; } - void SetUserBlackList(const std::string &value) { - RWLock l(&rwlock_, true); + void SetUserBlackList(const std::string& value) { + std::lock_guard l(rwlock_); TryPushDiffCommands("userblacklist", value); - slash::StringSplit(value, COMMA, user_blacklist_); + pstd::StringSplit(value, COMMA, user_blacklist_); for (auto& item : user_blacklist_) { - slash::StringToLower(item); + pstd::StringToLower(item); } } + void SetSlotMigrate(const bool value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("slotmigrate", value ? "yes" : "no"); + slotmigrate_.store(value); + } + void SetSlowCmdPool(const bool value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("slow-cmd-pool", value ? "yes" : "no"); + slow_cmd_pool_.store(value); + } + void SetSlotMigrateThreadNum(const int value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("slotmigrate-thread-num", std::to_string(value)); + slotmigrate_thread_num_ = value; + } + void SetThreadMigrateKeysNum(const int value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("thread-migrate-keys-num", std::to_string(value)); + thread_migrate_keys_num_ = value; + } void SetExpireLogsNums(const int value) { - RWLock l(&rwlock_, true); + std::lock_guard l(rwlock_); TryPushDiffCommands("expire-logs-nums", std::to_string(value)); expire_logs_nums_ = value; } void SetExpireLogsDays(const int value) { - RWLock l(&rwlock_, true); + std::lock_guard l(rwlock_); TryPushDiffCommands("expire-logs-days", std::to_string(value)); expire_logs_days_ = value; } void SetMaxConnection(const int value) { - RWLock l(&rwlock_, true); + std::lock_guard l(rwlock_); TryPushDiffCommands("maxclients", std::to_string(value)); maxclients_ = value; } void SetRootConnectionNum(const int value) { - RWLock l(&rwlock_, true); + std::lock_guard l(rwlock_); TryPushDiffCommands("root-connection-num", std::to_string(value)); root_connection_num_ = value; } void SetSlowlogWriteErrorlog(const bool value) { - RWLock l(&rwlock_, true); - TryPushDiffCommands("slowlog-write-errorlog", value == true ? "yes" : "no"); + std::lock_guard l(rwlock_); + TryPushDiffCommands("slowlog-write-errorlog", value ? "yes" : "no"); slowlog_write_errorlog_.store(value); } void SetSlowlogSlowerThan(const int value) { - RWLock l(&rwlock_, true); + std::lock_guard l(rwlock_); TryPushDiffCommands("slowlog-log-slower-than", std::to_string(value)); slowlog_log_slower_than_.store(value); } void SetSlowlogMaxLen(const int value) { - RWLock l(&rwlock_, true); + std::lock_guard l(rwlock_); TryPushDiffCommands("slowlog-max-len", std::to_string(value)); slowlog_max_len_ = value; } void SetDbSyncSpeed(const int value) { - RWLock l(&rwlock_, true); + std::lock_guard l(rwlock_); TryPushDiffCommands("db-sync-speed", std::to_string(value)); db_sync_speed_ = value; } - void SetCompactCron(const std::string &value) { - RWLock l(&rwlock_, true); + void SetCompactCron(const std::string& value) { + std::lock_guard l(rwlock_); TryPushDiffCommands("compact-cron", value); compact_cron_ = value; } - void SetCompactInterval(const std::string &value) { - RWLock l(&rwlock_, true); + void SetCompactInterval(const std::string& value) { + std::lock_guard l(rwlock_); TryPushDiffCommands("compact-interval", value); compact_interval_ = value; } - void SetSyncWindowSize(const int &value) { + void SetDisableAutoCompaction(const std::string& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("disable_auto_compactions", value); + disable_auto_compactions_ = value == "true"; + } + void SetMaxSubcompactions(const int& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("max-subcompactions", std::to_string(value)); + max_subcompactions_ = value; + } + void SetLeastResumeFreeDiskSize(const int64_t& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("least-free-disk-resume-size", std::to_string(value)); + least_free_disk_to_resume_ = value; + } + void SetResumeInterval(const int64_t& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("manually-resume-interval", std::to_string(value)); + resume_check_interval_ = value; + } + void SetMinCheckResumeRatio(const double& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("min-check-resume-ratio", std::to_string(value)); + min_check_resume_ratio_ = value; + } + void SetSyncWindowSize(const int& value) { TryPushDiffCommands("sync-window-size", std::to_string(value)); sync_window_size_.store(value); } + void SetMaxConnRbufSize(const int& value) { + TryPushDiffCommands("max-conn-rbuf-size", std::to_string(value)); + max_conn_rbuf_size_.store(value); + } + void SetMaxCacheFiles(const int& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("max-cache-files", std::to_string(value)); + max_cache_files_ = value; + } + void SetMaxBackgroudCompactions(const int& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("max-background-compactions", std::to_string(value)); + max_background_compactions_ = value; + } + void SetMaxBackgroudJobs(const int& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("max-background-jobs", std::to_string(value)); + max_background_jobs_ = value; + } + void SetWriteBufferSize(const int& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("write-buffer-size", std::to_string(value)); + write_buffer_size_ = value; + } + void SetMinWriteBufferNumberToMerge(const int& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("min-write-buffer-number-to-merge", std::to_string(value)); + min_write_buffer_number_to_merge_ = value; + } + void SetLevel0StopWritesTrigger(const int& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("level0-stop-writes-trigger", std::to_string(value)); + level0_stop_writes_trigger_ = value; + } + void SetLevel0SlowdownWritesTrigger(const int& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("level0-slowdown-writes-trigger", std::to_string(value)); + level0_slowdown_writes_trigger_ = value; + } + void SetLevel0FileNumCompactionTrigger(const int& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("level0-file-num-compaction-trigger", std::to_string(value)); + level0_file_num_compaction_trigger_ = value; + } + void SetMaxWriteBufferNumber(const int& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("max-write-buffer-num", std::to_string(value)); + max_write_buffer_num_ = value; + } + void SetMaxTotalWalSize(uint64_t value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("max-total-wal-size", std::to_string(value)); + max_total_wal_size_ = value; + } + void SetArenaBlockSize(const int& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("arena-block-size", std::to_string(value)); + arena_block_size_ = value; + } + + void SetRateLmiterBandwidth(int64_t value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("rate-limiter-bandwidth", std::to_string(value)); + rate_limiter_bandwidth_ = value; + } + + void SetDelayedWriteRate(int64_t value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("delayed-write-rate", std::to_string(value)); + delayed_write_rate_ = value; + } + + void SetMaxCompactionBytes(int64_t value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("max-compaction-bytes", std::to_string(value)); + max_compaction_bytes_ = value; + } + + void SetLogLevel(const std::string& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("loglevel", value); + log_level_ = value; + } + + // Rsync Rate limiting configuration + void SetThrottleBytesPerSecond(const int value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("throttle-bytes-per-second", std::to_string(value)); + throttle_bytes_per_second_ = value; + } + + void SetMaxRsyncParallelNum(const int value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("max-rsync-parallel-num", std::to_string(value)); + max_rsync_parallel_num_ = value; + } + + void SetRsyncTimeoutMs(int64_t value){ + std::lock_guard l(rwlock_); + TryPushDiffCommands("rsync-timeout-ms", std::to_string(value)); + rsync_timeout_ms_.store(value); + } + + void SetAclPubsubDefault(const std::string& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("acl-pubsub-default", value); + if (value == "resetchannels") { + acl_pubsub_default_ = 0; + } else { + acl_pubsub_default_ = static_cast(AclSelectorFlag::ALL_CHANNELS); + } + } + void SetAclLogMaxLen(const int value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("acllog-max-len", std::to_string(value)); + acl_Log_max_len_ = value; + } + + const std::string scache_type() { + std::shared_lock l(rwlock_); + return pstd::StringConcat(cache_type_, COMMA); + } - Status TablePartitionsSanityCheck(const std::string& table_name, - const std::set& partition_ids, - bool is_add); - Status AddTablePartitions(const std::string& table_name, - const std::set& partition_ids); - Status RemoveTablePartitions(const std::string& table_name, - const std::set& partition_ids); + int64_t cache_maxmemory() { return cache_maxmemory_; } + void SetSlowCmd(const std::string& value) { + std::lock_guard l(rwlock_); + std::string lower_value = value; + pstd::StringToLower(lower_value); + TryPushDiffCommands("slow-cmd-list", lower_value); + pstd::StringSplit2Set(lower_value, ',', slow_cmd_set_); + } + + void SetAdminCmd(const std::string& value) { + std::lock_guard l(rwlock_); + std::string lower_value = value; + pstd::StringToLower(lower_value); + TryPushDiffCommands("admin-cmd-list", lower_value); + pstd::StringSplit2Set(lower_value, ',', admin_cmd_set_); + } + + void SetInternalUsedUnFinishedFullSync(const std::string& value) { + std::lock_guard l(rwlock_); + std::string lower_value = value; + pstd::StringToLower(lower_value); + TryPushDiffCommands("internal-used-unfinished-full-sync", lower_value); + pstd::StringSplit2Set(lower_value, ',', internal_used_unfinished_full_sync_); + } + + void AddInternalUsedUnfinishedFullSync(const std::string& db_name) { + { + std::lock_guard l(rwlock_); + internal_used_unfinished_full_sync_.insert(db_name); + std::string lower_value = pstd::Set2String(internal_used_unfinished_full_sync_, ','); + pstd::StringToLower(lower_value); + TryPushDiffCommands("internal-used-unfinished-full-sync", lower_value); + } + ConfigRewrite(); + } + + void RemoveInternalUsedUnfinishedFullSync(const std::string& db_name) { + { + std::lock_guard l(rwlock_); + internal_used_unfinished_full_sync_.erase(db_name); + std::string lower_value = pstd::Set2String(internal_used_unfinished_full_sync_, ','); + pstd::StringToLower(lower_value); + TryPushDiffCommands("internal-used-unfinished-full-sync", lower_value); + } + ConfigRewrite(); + } + + size_t GetUnfinishedFullSyncCount() { + std::shared_lock l(rwlock_); + return internal_used_unfinished_full_sync_.size(); + } + void SetCacheType(const std::string &value); + void SetCacheDisableFlag() { tmp_cache_disable_flag_ = true; } + int zset_cache_start_direction() { return zset_cache_start_direction_; } + int zset_cache_field_num_per_key() { return zset_cache_field_num_per_key_; } + int max_key_size_in_cache() { return max_key_size_in_cache_; } + int cache_maxmemory_policy() { return cache_maxmemory_policy_; } + int cache_maxmemory_samples() { return cache_maxmemory_samples_; } + int cache_lfu_decay_time() { return cache_lfu_decay_time_; } int Load(); int ConfigRewrite(); + int ConfigRewriteReplicationID(); private: - Status InternalGetTargetTable(const std::string& table_name, - uint32_t* const target); - - int port_; + // TODO: replace mutex with atomic value + int port_ = 0; + int slave_priority_ = 100; + int thread_num_ = 0; + int thread_pool_size_ = 0; + int slow_cmd_thread_pool_size_ = 0; + int admin_thread_pool_size_ = 0; + std::unordered_set slow_cmd_set_; + std::unordered_set admin_cmd_set_ = {"info", "ping", "monitor"}; + int sync_thread_num_ = 0; + int sync_binlog_thread_num_ = 0; + int expire_dump_days_ = 3; + int db_sync_speed_ = 0; std::string slaveof_; - int slave_priority_; - int thread_num_; - int thread_pool_size_; - int sync_thread_num_; std::string log_path_; + int log_retention_time_; + std::string log_level_; std::string db_path_; + int db_instance_num_ = 0; std::string db_sync_path_; - int expire_dump_days_; - int db_sync_speed_; + + // compact std::string compact_cron_; std::string compact_interval_; - int64_t write_buffer_size_; - int64_t max_write_buffer_size_; - int64_t max_client_response_size_; - bool daemonize_; - int timeout_; + int max_subcompactions_ = 1; + bool disable_auto_compactions_ = false; + + // for obd_compact + int compact_every_num_of_files_; + int force_compact_file_age_seconds_; + int force_compact_min_delete_ratio_; + int dont_compact_sst_created_in_seconds_; + int best_delete_min_ratio_; + CompactionStrategy compaction_strategy_; + + int64_t resume_check_interval_ = 60; // seconds + int64_t least_free_disk_to_resume_ = 268435456; // 256 MB + double min_check_resume_ratio_ = 0.7; + int64_t write_buffer_size_ = 0; + int64_t arena_block_size_ = 0; + int64_t slotmigrate_thread_num_ = 0; + int64_t thread_migrate_keys_num_ = 0; + int64_t max_write_buffer_size_ = 0; + int64_t max_total_wal_size_ = 0; + bool enable_db_statistics_ = false; + int db_statistics_level_ = 0; + int max_write_buffer_num_ = 0; + int min_write_buffer_number_to_merge_ = 1; + int level0_stop_writes_trigger_ = 36; + int level0_slowdown_writes_trigger_ = 20; + int level0_file_num_compaction_trigger_ = 4; + int64_t max_client_response_size_ = 0; + bool daemonize_ = false; + bool rtc_cache_read_enabled_ = false; + int timeout_ = 0; std::string server_id_; + std::string run_id_; + std::string replication_id_; std::string requirepass_; std::string masterauth_; std::string userpass_; std::vector user_blacklist_; std::atomic classic_mode_; - int databases_; - int default_slot_num_; - std::vector table_structs_; - std::string default_table_; + int databases_ = 0; + int default_slot_num_ = 1; + std::vector db_structs_; + std::string default_db_; std::string bgsave_path_; std::string bgsave_prefix_; std::string pidfile_; + std::atomic slow_cmd_pool_; std::string compression_; - int maxclients_; - int root_connection_num_; + std::string compression_per_level_; + int maxclients_ = 0; + int root_connection_num_ = 0; std::atomic slowlog_write_errorlog_; std::atomic slowlog_log_slower_than_; - int slowlog_max_len_; - int expire_logs_days_; - int expire_logs_nums_; - bool slave_read_only_; + std::atomic slotmigrate_; + std::atomic binlog_writer_num_; + int slowlog_max_len_ = 0; + int expire_logs_days_ = 0; + int expire_logs_nums_ = 0; + bool slave_read_only_ = false; std::string conf_path_; - int max_cache_statistic_keys_; - int small_compaction_threshold_; - int max_background_flushes_; - int max_background_compactions_; - int max_cache_files_; - int max_bytes_for_level_multiplier_; - int64_t block_size_; - int64_t block_cache_; - bool share_block_cache_; - bool cache_index_and_filter_blocks_; - bool optimize_filters_for_hits_; - bool level_compaction_dynamic_level_bytes_; + + int max_cache_statistic_keys_ = 0; + int small_compaction_threshold_ = 0; + int small_compaction_duration_threshold_ = 0; + int max_background_flushes_ = -1; + int max_background_compactions_ = -1; + int max_background_jobs_ = 0; + int64_t delayed_write_rate_ = 0; + int max_cache_files_ = 0; + std::atomic rocksdb_ttl_second_ = 0; + std::atomic rocksdb_periodic_second_ = 0; + int max_bytes_for_level_multiplier_ = 0; + int64_t block_size_ = 0; + int64_t block_cache_ = 0; + int64_t num_shard_bits_ = 0; + bool share_block_cache_ = false; + bool enable_partitioned_index_filters_ = false; + bool cache_index_and_filter_blocks_ = false; + bool pin_l0_filter_and_index_blocks_in_cache_ = false; + bool optimize_filters_for_hits_ = false; + bool level_compaction_dynamic_level_bytes_ = true; + int rate_limiter_mode_ = 0; // kReadsOnly = 0, kWritesOnly = 1, kAllIo = 2 + int64_t rate_limiter_bandwidth_ = 0; + int64_t rate_limiter_refill_period_us_ = 0; + int64_t rate_limiter_fairness_ = 0; + bool rate_limiter_auto_tuned_ = true; + std::atomic sync_window_size_; + std::atomic max_conn_rbuf_size_; + std::atomic consensus_level_; + std::atomic replication_num_; std::string network_interface_; + std::string userblacklist_; + std::vector users_; // acl user rules + + std::string aclFile_; + std::vector cmds_; + std::atomic acl_pubsub_default_ = 0; // default channel pub/sub permission + std::atomic acl_Log_max_len_ = 0; // default acl log max len + // diff commands between cached commands and config file commands std::map diff_commands_; void TryPushDiffCommands(const std::string& command, const std::string& value); @@ -328,13 +1084,53 @@ class PikaConf : public slash::BaseConf { // // Critical configure items // - bool write_binlog_; - int target_file_size_base_; - int binlog_file_size_; + bool write_binlog_ = false; + int64_t target_file_size_base_ = 0; + int64_t max_compaction_bytes_ = 0; + int binlog_file_size_ = 0; + + // cache + std::vector cache_type_; + std::atomic_bool tmp_cache_disable_flag_ = false; + std::atomic_int64_t cache_maxmemory_ = 10737418240; + std::atomic_int cache_num_ = 5; + std::atomic_int cache_mode_ = 1; + std::atomic_int cache_string_ = 1; + std::atomic_int cache_set_ = 1; + std::atomic_int cache_zset_ = 1; + std::atomic_int cache_hash_ = 1; + std::atomic_int cache_list_ = 1; + std::atomic_int cache_bit_ = 1; + std::atomic_int zset_cache_start_direction_ = 0; + std::atomic_int zset_cache_field_num_per_key_ = 512; + std::atomic_int max_key_size_in_cache_ = 512; + std::atomic_int cache_maxmemory_policy_ = 1; + std::atomic_int cache_maxmemory_samples_ = 5; + std::atomic_int cache_lfu_decay_time_ = 1; + + // rocksdb blob + bool enable_blob_files_ = false; + bool enable_blob_garbage_collection_ = false; + double blob_garbage_collection_age_cutoff_ = 0.25; + double blob_garbage_collection_force_threshold_ = 1.0; + int64_t min_blob_size_ = 4096; // 4K + int64_t blob_cache_ = 0; + int64_t blob_num_shard_bits_ = 0; + int64_t blob_file_size_ = 256 * 1024 * 1024; // 256M + std::string blob_compression_type_ = "none"; + + std::shared_mutex rwlock_; + + // Rsync Rate limiting configuration + int throttle_bytes_per_second_ = 200 << 20; // 200MB/s + int max_rsync_parallel_num_ = kMaxRsyncParallelNum; + std::atomic_int64_t rsync_timeout_ms_ = 1000; - PikaMeta* local_meta_; + //Internal used metrics Persisted by pika.conf + std::unordered_set internal_used_unfinished_full_sync_; - pthread_rwlock_t rwlock_; + // for wash data from 4.0.0 to 4.0.1 + bool wash_data_; }; #endif diff --git a/tools/pika_migrate/include/pika_consensus.h b/tools/pika_migrate/include/pika_consensus.h new file mode 100644 index 0000000000..bb774b5e3b --- /dev/null +++ b/tools/pika_migrate/include/pika_consensus.h @@ -0,0 +1,203 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +#ifndef PIKA_CONSENSUS_H_ +#define PIKA_CONSENSUS_H_ + +#include + +#include "include/pika_define.h" +#include "pstd/include/env.h" +#include "include/pika_binlog_transverter.h" +#include "include/pika_client_conn.h" +#include "include/pika_slave_node.h" +#include "include/pika_stable_log.h" + +class Context : public pstd::noncopyable { + public: + Context(std::string path); + + pstd::Status Init(); + // RWLock should be held when access members. + pstd::Status StableSave(); + void UpdateAppliedIndex(const LogOffset& offset); + void Reset(const LogOffset& offset); + + std::shared_mutex rwlock_; + LogOffset applied_index_; + SyncWindow applied_win_; + + std::string ToString() { + std::stringstream tmp_stream; + std::shared_lock l(rwlock_); + tmp_stream << " Applied_index " << applied_index_.ToString() << "\r\n"; + tmp_stream << " Applied window " << applied_win_.ToStringStatus(); + return tmp_stream.str(); + } + + private: + std::string path_; + std::unique_ptr save_; +}; + +class SyncProgress { + public: + SyncProgress() = default; + ~SyncProgress() = default; + std::shared_ptr GetSlaveNode(const std::string& ip, int port); + std::unordered_map> GetAllSlaveNodes(); + pstd::Status AddSlaveNode(const std::string& ip, int port, const std::string& db_name, int session_id); + pstd::Status RemoveSlaveNode(const std::string& ip, int port); + pstd::Status Update(const std::string& ip, int port, const LogOffset& start, const LogOffset& end, + LogOffset* committed_index); + int SlaveSize(); + + private: + std::shared_mutex rwlock_; + std::unordered_map> slaves_; + std::unordered_map match_index_; +}; + +class MemLog { + public: + struct LogItem { + LogItem(const LogOffset& _offset, std::shared_ptr _cmd_ptr, std::shared_ptr _conn_ptr, + std::shared_ptr _resp_ptr) + : offset(_offset), cmd_ptr(std::move(_cmd_ptr)), conn_ptr(std::move(_conn_ptr)), resp_ptr(std::move(_resp_ptr)) {} + LogOffset offset; + std::shared_ptr cmd_ptr; + std::shared_ptr conn_ptr; + std::shared_ptr resp_ptr; + }; + + MemLog(); + int Size(); + void AppendLog(const LogItem& item) { + std::lock_guard lock(logs_mu_); + logs_.push_back(item); + last_offset_ = item.offset; + } + pstd::Status TruncateTo(const LogOffset& offset); + + void Reset(const LogOffset& offset); + + LogOffset last_offset() { + std::lock_guard lock(logs_mu_); + return last_offset_; + } + void SetLastOffset(const LogOffset& offset) { + std::lock_guard lock(logs_mu_); + last_offset_ = offset; + } + bool FindLogItem(const LogOffset& offset, LogOffset* found_offset); + + private: + int InternalFindLogByBinlogOffset(const LogOffset& offset); + int InternalFindLogByLogicIndex(const LogOffset& offset); + pstd::Mutex logs_mu_; + std::vector logs_; + LogOffset last_offset_; +}; + +class ConsensusCoordinator { + public: + ConsensusCoordinator(const std::string& db_name); + ~ConsensusCoordinator(); + // since it is invoked in constructor all locks not hold + void Init(); + // invoked by dbsync process + pstd::Status Reset(const LogOffset& offset); + + pstd::Status ProposeLog(const std::shared_ptr& cmd_ptr); + pstd::Status UpdateSlave(const std::string& ip, int port, const LogOffset& start, const LogOffset& end); + pstd::Status AddSlaveNode(const std::string& ip, int port, int session_id); + pstd::Status RemoveSlaveNode(const std::string& ip, int port); + void UpdateTerm(uint32_t term); + uint32_t term(); + + // invoked by follower + pstd::Status ProcessLeaderLog(const std::shared_ptr& cmd_ptr, const BinlogItem& attribute); + + // Negotiate + pstd::Status LeaderNegotiate(const LogOffset& f_last_offset, bool* reject, std::vector* hints); + pstd::Status FollowerNegotiate(const std::vector& hints, LogOffset* reply_offset); + + SyncProgress& SyncPros() { return sync_pros_; } + std::shared_ptr StableLogger() { return stable_logger_; } + std::shared_ptr MemLogger() { return mem_logger_; } + + LogOffset committed_index() { + std::lock_guard lock(index_mu_); + return committed_index_; + } + + std::shared_ptr context() { return context_; } + + // redis parser cb + struct CmdPtrArg { + CmdPtrArg(std::shared_ptr ptr) : cmd_ptr(std::move(ptr)) {} + std::shared_ptr cmd_ptr; + }; + static int InitCmd(net::RedisParser* parser, const net::RedisCmdArgsType& argv); + + std::string ToStringStatus() { + std::stringstream tmp_stream; + { + std::lock_guard lock(index_mu_); + tmp_stream << " Committed_index: " << committed_index_.ToString() << "\r\n"; + } + tmp_stream << " Context: " + << "\r\n" + << context_->ToString(); + { + std::shared_lock lock(term_rwlock_); + tmp_stream << " Term: " << term_ << "\r\n"; + } + tmp_stream << " Mem_logger size: " << mem_logger_->Size() << " last offset " + << mem_logger_->last_offset().ToString() << "\r\n"; + tmp_stream << " Stable_logger first offset " << stable_logger_->first_offset().ToString() << "\r\n"; + LogOffset log_status; + stable_logger_->Logger()->GetProducerStatus(&(log_status.b_offset.filenum), &(log_status.b_offset.offset), + &(log_status.l_offset.term), &(log_status.l_offset.index)); + tmp_stream << " Physical Binlog Status: " << log_status.ToString() << "\r\n"; + return tmp_stream.str(); + } + + private: + pstd::Status TruncateTo(const LogOffset& offset); + + pstd::Status InternalAppendLog(const std::shared_ptr& cmd_ptr); + pstd::Status InternalAppendBinlog(const std::shared_ptr& cmd_ptr); + void InternalApply(const MemLog::LogItem& log); + void InternalApplyFollower(const std::shared_ptr& cmd_ptr); + + pstd::Status GetBinlogOffset(const BinlogOffset& start_offset, LogOffset* log_offset); + pstd::Status GetBinlogOffset(const BinlogOffset& start_offset, const BinlogOffset& end_offset, + std::vector* log_offset); + pstd::Status FindBinlogFileNum(const std::map& binlogs, uint64_t target_index, uint32_t start_filenum, + uint32_t* founded_filenum); + pstd::Status FindLogicOffsetBySearchingBinlog(const BinlogOffset& hint_offset, uint64_t target_index, + LogOffset* found_offset); + pstd::Status FindLogicOffset(const BinlogOffset& start_offset, uint64_t target_index, LogOffset* found_offset); + pstd::Status GetLogsBefore(const BinlogOffset& start_offset, std::vector* hints); + + private: + // keep members in this class works in order + pstd::Mutex order_mu_; + + pstd::Mutex index_mu_; + LogOffset committed_index_; + + std::shared_ptr context_; + + std::shared_mutex term_rwlock_; + uint32_t term_ = 0; + + std::string db_name_; + + SyncProgress sync_pros_; + std::shared_ptr stable_logger_; + std::shared_ptr mem_logger_; +}; +#endif // INCLUDE_PIKA_CONSENSUS_H_ diff --git a/tools/pika_migrate/include/pika_data_distribution.h b/tools/pika_migrate/include/pika_data_distribution.h index 19128e3704..7f8d494fe0 100644 --- a/tools/pika_migrate/include/pika_data_distribution.h +++ b/tools/pika_migrate/include/pika_data_distribution.h @@ -6,35 +6,23 @@ #ifndef PIKA_DATA_DISTRIBUTION_H_ #define PIKA_DATA_DISTRIBUTION_H_ -#include "slash/include/slash_status.h" +#include +#include // polynomial reserved Crc32 magic num const uint32_t IEEE_POLY = 0xedb88320; class PikaDataDistribution { public: - virtual ~PikaDataDistribution() = default; + virtual ~PikaDataDistribution() = default; // Initialization virtual void Init() = 0; - // key map to partition id - virtual uint32_t Distribute(const std::string& str, uint32_t partition_num) = 0; }; class HashModulo : public PikaDataDistribution { public: - virtual ~HashModulo() = default; - virtual void Init(); - virtual uint32_t Distribute(const std::string& str, uint32_t partition_num); -}; - -class Crc32 : public PikaDataDistribution { - public: - virtual void Init(); - virtual uint32_t Distribute(const std::string& str, uint32_t partition_num); - private: - void Crc32TableInit(uint32_t poly); - uint32_t Crc32Update(uint32_t crc, const char* buf, int len); - uint32_t crc32tab[256]; + ~HashModulo() override = default; + void Init() override; }; #endif diff --git a/tools/pika_migrate/include/pika_db.h b/tools/pika_migrate/include/pika_db.h new file mode 100644 index 0000000000..3dfe3b69f5 --- /dev/null +++ b/tools/pika_migrate/include/pika_db.h @@ -0,0 +1,206 @@ +// Copyright (c) 2018-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_DB_H_ +#define PIKA_DB_H_ + +#include + +#include "storage/storage.h" +#include "include/pika_command.h" +#include "lock_mgr.h" +#include "pika_cache.h" +#include "pika_define.h" +#include "storage/backupable.h" + +class PikaCache; +class CacheInfo; +/* + *Keyscan used + */ +struct KeyScanInfo { + time_t start_time = 0; + std::string s_start_time; + int32_t duration = -3; + std::vector key_infos; // the order is strings, hashes, lists, zsets, sets, streams + bool key_scaning_ = false; + KeyScanInfo() : + s_start_time("0"), + key_infos({{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}) + {} +}; + +struct BgSaveInfo { + bool bgsaving = false; + time_t start_time = 0; + std::string s_start_time; + std::string path; + LogOffset offset; + BgSaveInfo() = default; + void Clear() { + bgsaving = false; + path.clear(); + offset = LogOffset(); + } +}; + +struct DisplayCacheInfo { + int status = 0; + uint32_t cache_num = 0; + uint64_t keys_num = 0; + uint64_t used_memory = 0; + uint64_t hits = 0; + uint64_t misses = 0; + uint64_t hits_per_sec = 0; + uint64_t read_cmd_per_sec = 0; + double hitratio_per_sec = 0.0; + double hitratio_all = 0.0; + uint64_t load_keys_per_sec = 0; + uint64_t last_time_us = 0; + uint64_t last_load_keys_num = 0; + uint32_t waitting_load_keys_num = 0; + DisplayCacheInfo& operator=(const DisplayCacheInfo &obj) { + status = obj.status; + cache_num = obj.cache_num; + keys_num = obj.keys_num; + used_memory = obj.used_memory; + hits = obj.hits; + misses = obj.misses; + hits_per_sec = obj.hits_per_sec; + read_cmd_per_sec = obj.read_cmd_per_sec; + hitratio_per_sec = obj.hitratio_per_sec; + hitratio_all = obj.hitratio_all; + load_keys_per_sec = obj.load_keys_per_sec; + last_time_us = obj.last_time_us; + last_load_keys_num = obj.last_load_keys_num; + waitting_load_keys_num = obj.waitting_load_keys_num; + return *this; + } +}; + +class DB : public std::enable_shared_from_this, public pstd::noncopyable { + public: + DB(std::string db_name, const std::string& db_path, const std::string& log_path); + virtual ~DB(); + + friend class Cmd; + friend class InfoCmd; + friend class PkClusterInfoCmd; + friend class PikaServer; + + /** + * When it is the first time for upgrading version from 4.0.0 to 4.0.1, you should call + * this function to wash data. true if successful, false otherwise. + * @see https://github.com/OpenAtomFoundation/pika/issues/2886 + */ + bool WashData(); + + std::string GetDBName(); + std::shared_ptr storage() const; + void GetBgSaveMetaData(std::vector* fileNames, std::string* snapshot_uuid); + void BgSaveDB(); + void SetBinlogIoError(); + void SetBinlogIoErrorrelieve(); + bool IsBinlogIoError(); + std::shared_ptr cache() const; + std::shared_mutex& GetDBLock() { + return dbs_rw_; + } + void DBLock() { + dbs_rw_.lock(); + } + void DBLockShared() { + dbs_rw_.lock_shared(); + } + void DBUnlock() { + dbs_rw_.unlock(); + } + void DBUnlockShared() { + dbs_rw_.unlock_shared(); + } + + // KeyScan use; + void KeyScan(); + bool IsKeyScaning(); + void RunKeyScan(); + void StopKeyScan(); + void ScanDatabase(const storage::DataType& type); + KeyScanInfo GetKeyScanInfo(); + + // Compact use; + void Compact(const storage::DataType& type); + void CompactRange(const storage::DataType& type, const std::string& start, const std::string& end); + void LongestNotCompactionSstCompact(const storage::DataType& type); + + void SetCompactRangeOptions(const bool is_canceled); + + std::shared_ptr LockMgr(); + /* + * Cache used + */ + DisplayCacheInfo GetCacheInfo(); + void UpdateCacheInfo(CacheInfo& cache_info); + void ResetDisplayCacheInfo(int status); + uint64_t cache_usage_; + void Init(); + bool TryUpdateMasterOffset(); + /* + * FlushDB used + */ + bool FlushDBWithoutLock(); + bool ChangeDb(const std::string& new_path); + pstd::Status GetBgSaveUUID(std::string* snapshot_uuid); + void PrepareRsync(); + bool IsBgSaving(); + BgSaveInfo bgsave_info(); + pstd::Status GetKeyNum(std::vector* key_info); + + private: + bool opened_ = false; + std::string dbsync_path_; + std::string db_name_; + std::string db_path_; + std::string snapshot_uuid_; + std::string log_path_; + std::string bgsave_sub_path_; + pstd::Mutex key_info_protector_; + std::atomic binlog_io_error_; + std::shared_mutex dbs_rw_; + // class may be shared, using shared_ptr would be a better choice + std::shared_ptr lock_mgr_; + std::shared_ptr storage_; + std::shared_ptr cache_; + /* + * KeyScan use + */ + static void DoKeyScan(void* arg); + void InitKeyScan(); + pstd::Mutex key_scan_protector_; + KeyScanInfo key_scan_info_; + /* + * Cache used + */ + DisplayCacheInfo cache_info_; + std::shared_mutex cache_info_rwlock_; + /* + * BgSave use + */ + static void DoBgSave(void* arg); + bool RunBgsaveEngine(); + + bool InitBgsaveEnv(); + bool InitBgsaveEngine(); + void ClearBgsave(); + void FinishBgsave(); + BgSaveInfo bgsave_info_; + pstd::Mutex bgsave_protector_; + std::shared_ptr bgsave_engine_; +}; + +struct BgTaskArg { + std::shared_ptr db; +}; + +#endif diff --git a/tools/pika_migrate/include/pika_define.h b/tools/pika_migrate/include/pika_define.h index 4610a84df0..3968f9072f 100644 --- a/tools/pika_migrate/include/pika_define.h +++ b/tools/pika_migrate/include/pika_define.h @@ -6,364 +6,318 @@ #ifndef PIKA_DEFINE_H_ #define PIKA_DEFINE_H_ -#include #include +#include +#include -#include "pink/include/redis_cli.h" +#include "net/include/redis_cli.h" -#define PIKA_SYNC_BUFFER_SIZE 10 -#define PIKA_MAX_WORKER_THREAD_NUM 24 -#define PIKA_REPL_SERVER_TP_SIZE 3 -#define PIKA_META_SYNC_MAX_WAIT_TIME 10 -#define PIKA_SCAN_STEP_LENGTH 1000 +/* + * TTL type + */ +#define PIKA_TTL_ZERO 0 +#define PIKA_TTL_NONE (-1) +#define PIKA_TTL_STALE (-2) + +#define PIKA_SYNC_BUFFER_SIZE 1000 +#define PIKA_MAX_WORKER_THREAD_NUM 24 +#define PIKA_REPL_SERVER_TP_SIZE 3 +#define PIKA_META_SYNC_MAX_WAIT_TIME 10 +#define PIKA_SCAN_STEP_LENGTH 1000 +#define PIKA_MAX_CONN_RBUF (1 << 28) // 256MB +#define PIKA_MAX_CONN_RBUF_LB (1 << 26) // 64MB +#define PIKA_MAX_CONN_RBUF_HB (1 << 29) // 512MB +#define PIKA_SERVER_ID_MAX 65535 class PikaServer; +/* Global Const */ +constexpr int MAX_DB_NUM = 8; /* Port shift */ -const int kPortShiftRSync = 1000; +const int kPortShiftRSync = 1000; const int kPortShiftReplServer = 2000; - +const int kPortShiftRsync2 = 10001; const std::string kPikaPidFile = "pika.pid"; const std::string kPikaSecretFile = "rsync.secret"; const std::string kDefaultRsyncAuth = "default"; -struct TableStruct { - TableStruct(const std::string& tn, - const uint32_t pn, - const std::set& pi) - : table_name(tn), partition_num(pn), partition_ids(pi) {} +/* Rsync */ +const int kMaxRsyncParallelNum = 4; +constexpr int kMaxRsyncInitReTryTimes = 64; - bool operator == (const TableStruct& table_struct) const { - return table_name == table_struct.table_name - && partition_num == table_struct.partition_num - && partition_ids == table_struct.partition_ids; - } - std::string table_name; - uint32_t partition_num; - std::set partition_ids; -}; +struct DBStruct { + DBStruct(std::string tn, int32_t inst_num) + : db_name(std::move(tn)), db_instance_num(inst_num) {} -struct WorkerCronTask { - int task; - std::string ip_port; + bool operator==(const DBStruct& db_struct) const { + return db_name == db_struct.db_name && db_instance_num == db_struct.db_instance_num; + } + std::string db_name; + int32_t db_instance_num = 0; }; -typedef WorkerCronTask MonitorCronTask; -//task define -#define TASK_KILL 0 -#define TASK_KILLALL 1 -//slave item struct SlaveItem { std::string ip_port; std::string ip; int port; int conn_fd; int stage; - std::vector table_structs; + std::vector db_structs; struct timeval create_time; }; enum ReplState { - kNoConnect = 0, + kNoConnect = 0, kTryConnect = 1, - kTryDBSync = 2, + kTryDBSync = 2, kWaitDBSync = 3, - kWaitReply = 4, - kConnected = 5, - kError = 6 + kWaitReply = 4, + kConnected = 5, + kError = 6, + // set to kDBNoConnect if execute cmd 'dbslaveof db no one' + kDBNoConnect = 7 }; // debug only -const std::string ReplStateMsg[] = { - "kNoConnect", - "kTryConnect", - "kTryDBSync", - "kWaitDBSync", - "kWaitReply", - "kConnected", - "kError" -}; +const std::string ReplStateMsg[] = {"kNoConnect", "kTryConnect", "kTryDBSync", "kWaitDBSync", + "kWaitReply", "kConnected", "kError", "kDBNoConnect"}; + +struct LogicOffset { + uint32_t term{0}; + uint64_t index{0}; + LogicOffset() = default; + LogicOffset(uint32_t _term, uint64_t _index) : term(_term), index(_index) {} + LogicOffset(const LogicOffset& other) { + term = other.term; + index = other.index; + } + bool operator==(const LogicOffset& other) const { return term == other.term && index == other.index; } + bool operator!=(const LogicOffset& other) const { return term != other.term || index != other.index; } -enum SlotState { - INFREE = 0, - INBUSY = 1, + std::string ToString() const { return "term: " + std::to_string(term) + " index: " + std::to_string(index); } }; struct BinlogOffset { - uint32_t filenum; - uint64_t offset; - BinlogOffset() - : filenum(0), offset(0) {} - BinlogOffset(uint32_t num, uint64_t off) - : filenum(num), offset(off) {} + uint32_t filenum{0}; + uint64_t offset{0}; + BinlogOffset() = default; + BinlogOffset(uint32_t num, uint64_t off) : filenum(num), offset(off) {} BinlogOffset(const BinlogOffset& other) { filenum = other.filenum; offset = other.offset; } - std::string ToString() const { - return "filenum: " + std::to_string(filenum) + " offset: " + std::to_string(offset); - } + std::string ToString() const { return "filenum: " + std::to_string(filenum) + " offset: " + std::to_string(offset); } bool operator==(const BinlogOffset& other) const { - if (filenum == other.filenum && offset == other.offset) { - return true; - } - return false; + return filenum == other.filenum && offset == other.offset; + } + bool operator!=(const BinlogOffset& other) const { + return filenum != other.filenum || offset != other.offset; + } + + bool operator>(const BinlogOffset& other) const { + return filenum > other.filenum || (filenum == other.filenum && offset > other.offset); + } + bool operator<(const BinlogOffset& other) const { + return filenum < other.filenum || (filenum == other.filenum && offset < other.offset); + } + bool operator<=(const BinlogOffset& other) const { + return filenum < other.filenum || (filenum == other.filenum && offset <= other.offset); + } + bool operator>=(const BinlogOffset& other) const { + return filenum > other.filenum || (filenum == other.filenum && offset >= other.offset); } }; -//dbsync arg +struct LogOffset { + LogOffset(const LogOffset& _log_offset) { + b_offset = _log_offset.b_offset; + l_offset = _log_offset.l_offset; + } + LogOffset() = default; + LogOffset(const BinlogOffset& _b_offset, const LogicOffset& _l_offset) : b_offset(_b_offset), l_offset(_l_offset) {} + bool operator<(const LogOffset& other) const { return b_offset < other.b_offset; } + bool operator==(const LogOffset& other) const { return b_offset == other.b_offset; } + bool operator<=(const LogOffset& other) const { return b_offset <= other.b_offset; } + bool operator>=(const LogOffset& other) const { return b_offset >= other.b_offset; } + bool operator>(const LogOffset& other) const { return b_offset > other.b_offset; } + std::string ToString() const { return b_offset.ToString() + " " + l_offset.ToString(); } + BinlogOffset b_offset; + LogicOffset l_offset; +}; + +// dbsync arg struct DBSyncArg { PikaServer* p; std::string ip; int port; - std::string table_name; - uint32_t partition_id; - DBSyncArg(PikaServer* const _p, - const std::string& _ip, - int _port, - const std::string& _table_name, - uint32_t _partition_id) - : p(_p), ip(_ip), port(_port), - table_name(_table_name), partition_id(_partition_id) {} + std::string db_name; + DBSyncArg(PikaServer* const _p, std::string _ip, int _port, std::string _db_name) + : p(_p), ip(std::move(_ip)), port(_port), db_name(std::move(_db_name)) {} }; // rm define enum SlaveState { - kSlaveNotSync = 0, - kSlaveDbSync = 1, + kSlaveNotSync = 0, + kSlaveDbSync = 1, kSlaveBinlogSync = 2, }; // debug only -const std::string SlaveStateMsg[] = { - "SlaveNotSync", - "SlaveDbSync", - "SlaveBinlogSync" -}; +const std::string SlaveStateMsg[] = {"SlaveNotSync", "SlaveDbSync", "SlaveBinlogSync"}; enum BinlogSyncState { - kNotSync = 0, - kReadFromCache = 1, - kReadFromFile = 2, + kNotSync = 0, + kReadFromCache = 1, + kReadFromFile = 2, }; // debug only -const std::string BinlogSyncStateMsg[] = { - "NotSync", - "ReadFromCache", - "ReadFromFile" -}; +const std::string BinlogSyncStateMsg[] = {"NotSync", "ReadFromCache", "ReadFromFile"}; struct BinlogChip { - BinlogOffset offset_; + LogOffset offset_; std::string binlog_; - BinlogChip(BinlogOffset offset, std::string binlog) : offset_(offset), binlog_(binlog) { - } + BinlogChip(const LogOffset& offset, std::string binlog) : offset_(offset), binlog_(std::move(binlog)) {} BinlogChip(const BinlogChip& binlog_chip) { offset_ = binlog_chip.offset_; binlog_ = binlog_chip.binlog_; } }; -struct PartitionInfo { - PartitionInfo(const std::string& table_name, uint32_t partition_id) - : table_name_(table_name), partition_id_(partition_id) { - } - PartitionInfo() : partition_id_(0) { - } - bool operator==(const PartitionInfo& other) const { - if (table_name_ == other.table_name_ - && partition_id_ == other.partition_id_) { - return true; - } - return false; - } - int operator<(const PartitionInfo& other) const { - int ret = strcmp(table_name_.data(), other.table_name_.data()); - if (!ret) { - if (partition_id_ < other.partition_id_) { - ret = -1; - } else if (partition_id_ > other.partition_id_) { - ret = 1; - } else { - ret = 0; - } - } - return ret; +struct DBInfo { + DBInfo(std::string db_name) + : db_name_(std::move(db_name)) {} + + DBInfo() = default; + + bool operator==(const DBInfo& other) const { + return db_name_ == other.db_name_; } - std::string ToString() const { - return "(" + table_name_ + ":" + std::to_string(partition_id_) + ")"; + + bool operator<(const DBInfo& other) const { + return db_name_ < other.db_name_ || (db_name_ == other.db_name_); } - std::string table_name_; - uint32_t partition_id_; + + std::string ToString() const { return "(" + db_name_ + ")"; } + std::string db_name_; }; -struct hash_partition_info { - size_t operator()(const PartitionInfo& n) const { - return std::hash()(n.table_name_) ^ std::hash()(n.partition_id_); +/* + * Used to define the sorting rule of the db in the map + */ +struct hash_db_info { + size_t operator()(const DBInfo& n) const { + return std::hash()(n.db_name_); } }; class Node { public: - Node(const std::string& ip, int port) : ip_(ip), port_(port) { - } + Node(std::string ip, int port) : ip_(std::move(ip)), port_(port) {} virtual ~Node() = default; - Node() : port_(0) { - } - const std::string& Ip() const { - return ip_; - } - int Port() const { - return port_; - } - std::string ToString() const { - return ip_ + ":" + std::to_string(port_); - } + Node() = default; + const std::string& Ip() const { return ip_; } + int Port() const { return port_; } + std::string ToString() const { return ip_ + ":" + std::to_string(port_); } + private: std::string ip_; - int port_; + int port_ = 0; }; class RmNode : public Node { public: - RmNode(const std::string& ip, int port, - const PartitionInfo& partition_info) - : Node(ip, port), - partition_info_(partition_info), - session_id_(0), - last_send_time_(0), - last_recv_time_(0) {} - - RmNode(const std::string& ip, - int port, - const std::string& table_name, - uint32_t partition_id) + RmNode(const std::string& ip, int port, DBInfo db_info) + : Node(ip, port), db_info_(std::move(db_info)) {} + + RmNode(const std::string& ip, int port, const std::string& db_name) : Node(ip, port), - partition_info_(table_name, partition_id), - session_id_(0), - last_send_time_(0), - last_recv_time_(0) {} - - RmNode(const std::string& ip, - int port, - const std::string& table_name, - uint32_t partition_id, - int32_t session_id) + db_info_(db_name) + {} + + RmNode(const std::string& ip, int port, const std::string& db_name, int32_t session_id) : Node(ip, port), - partition_info_(table_name, partition_id), - session_id_(session_id), - last_send_time_(0), - last_recv_time_(0) {} - - RmNode(const std::string& table_name, - uint32_t partition_id) - : Node(), - partition_info_(table_name, partition_id), - session_id_(0), - last_send_time_(0), - last_recv_time_(0) {} - RmNode() - : Node(), - partition_info_(), - session_id_(0), - last_send_time_(0), - last_recv_time_(0) {} - - virtual ~RmNode() = default; + db_info_(db_name), + session_id_(session_id) + {} + + RmNode(const std::string& db_name) + : db_info_(db_name) {} + RmNode() = default; + + ~RmNode() override = default; bool operator==(const RmNode& other) const { - if (partition_info_.table_name_ == other.TableName() - && partition_info_.partition_id_ == other.PartitionId() - && Ip() == other.Ip() && Port() == other.Port()) { - return true; - } - return false; + return db_info_.db_name_ == other.DBName() && + Ip() == other.Ip() && Port() == other.Port(); } - const std::string& TableName() const { - return partition_info_.table_name_; - } - uint32_t PartitionId() const { - return partition_info_.partition_id_; - } - const PartitionInfo& NodePartitionInfo() const { - return partition_info_; - } - void SetSessionId(uint32_t session_id) { - session_id_ = session_id; - } - int32_t SessionId() const { - return session_id_; - } + const std::string& DBName() const { return db_info_.db_name_; } + const DBInfo& NodeDBInfo() const { return db_info_; } + void SetSessionId(int32_t session_id) { session_id_ = session_id; } + int32_t SessionId() const { return session_id_; } std::string ToString() const { - return "partition=" + TableName() + "_" + std::to_string(PartitionId()) + ",ip_port=" - + Ip() + ":" + std::to_string(Port()) + ",session id=" + std::to_string(SessionId()); - } - void SetLastSendTime(uint64_t last_send_time) { - last_send_time_ = last_send_time; - } - uint64_t LastSendTime() const { - return last_send_time_; + return "db=" + DBName() + "_,ip_port=" + Ip() + ":" + + std::to_string(Port()) + ",session id=" + std::to_string(SessionId()); } - void SetLastRecvTime(uint64_t last_recv_time) { - last_recv_time_ = last_recv_time; - } - uint64_t LastRecvTime() const { - return last_recv_time_; - } - private: - PartitionInfo partition_info_; - int32_t session_id_; - uint64_t last_send_time_; - uint64_t last_recv_time_; -}; + void SetLastSendTime(uint64_t last_send_time) { last_send_time_ = last_send_time; } + uint64_t LastSendTime() const { return last_send_time_; } + void SetLastRecvTime(uint64_t last_recv_time) { last_recv_time_ = last_recv_time; } + uint64_t LastRecvTime() const { return last_recv_time_; } -struct hash_rm_node { - size_t operator()(const RmNode& n) const { - return std::hash()(n.TableName()) ^ std::hash()(n.PartitionId()) ^ std::hash()(n.Ip()) ^ std::hash()(n.Port()); - } + private: + DBInfo db_info_; + int32_t session_id_ = 0; + uint64_t last_send_time_ = 0; + uint64_t last_recv_time_ = 0; }; struct WriteTask { struct RmNode rm_node_; struct BinlogChip binlog_chip_; - WriteTask(RmNode rm_node, BinlogChip binlog_chip) : rm_node_(rm_node), binlog_chip_(binlog_chip) { - } + LogOffset prev_offset_; + WriteTask(const RmNode& rm_node, const BinlogChip& binlog_chip, const LogOffset& prev_offset) + : rm_node_(rm_node), binlog_chip_(binlog_chip), prev_offset_(prev_offset) {} }; -//slowlog define +// slowlog define #define SLOWLOG_ENTRY_MAX_ARGC 32 #define SLOWLOG_ENTRY_MAX_STRING 128 -//slowlog entry +// slowlog entry struct SlowlogEntry { int64_t id; int64_t start_time; int64_t duration; - pink::RedisCmdArgsType argv; + net::RedisCmdArgsType argv; }; #define PIKA_MIN_RESERVED_FDS 5000 -const int SLAVE_ITEM_STAGE_ONE = 1; -const int SLAVE_ITEM_STAGE_TWO = 2; +const int SLAVE_ITEM_STAGE_ONE = 1; +const int SLAVE_ITEM_STAGE_TWO = 2; -//repl_state_ -const int PIKA_REPL_NO_CONNECT = 0; -const int PIKA_REPL_SHOULD_META_SYNC = 1; -const int PIKA_REPL_META_SYNC_DONE = 2; -const int PIKA_REPL_ERROR = 3; +// repl_state_ +const int PIKA_REPL_NO_CONNECT = 0; +const int PIKA_REPL_SHOULD_META_SYNC = 1; +const int PIKA_REPL_META_SYNC_DONE = 2; +const int PIKA_REPL_ERROR = 3; -//role -const int PIKA_ROLE_SINGLE = 0; -const int PIKA_ROLE_SLAVE = 1; -const int PIKA_ROLE_MASTER = 2; +// role +const int PIKA_ROLE_SINGLE = 0; +const int PIKA_ROLE_SLAVE = 1; +const int PIKA_ROLE_MASTER = 2; /* - * The size of Binlogfile + * cache mode */ -//static uint64_t kBinlogSize = 128; -//static const uint64_t kBinlogSize = 1024 * 1024 * 100; +constexpr int PIKA_CACHE_NONE = 0; +constexpr int PIKA_CACHE_READ = 1; + +/* + * cache size + */ +#define PIKA_CACHE_SIZE_MIN 536870912 // 512M +#define PIKA_CACHE_SIZE_DEFAULT 10737418240 // 10G enum RecordType { kZeroType = 0, @@ -398,16 +352,15 @@ const size_t kBinlogPrefixLen = 10; const std::string kPikaMeta = "meta"; const std::string kManifest = "manifest"; +const std::string kContext = "context"; /* * define common character - * */ #define COMMA ',' /* * define reply between master and slave - * */ const std::string kInnerReplOk = "ok"; const std::string kInnerReplWait = "wait"; @@ -421,4 +374,39 @@ const uint32_t kDBSyncMaxGap = 50; const std::string kDBSyncModule = "document"; const std::string kBgsaveInfoFile = "info"; + +/* + * cache status + */ +const int PIKA_CACHE_STATUS_NONE = 0; +const int PIKA_CACHE_STATUS_INIT = 1; +const int PIKA_CACHE_STATUS_OK = 2; +const int PIKA_CACHE_STATUS_RESET = 3; +const int PIKA_CACHE_STATUS_DESTROY = 4; +const int PIKA_CACHE_STATUS_CLEAR = 5; +const int CACHE_START_FROM_BEGIN = 0; +const int CACHE_START_FROM_END = -1; + +/* + * key type + */ +const char PIKA_KEY_TYPE_KV = 'k'; +const char PIKA_KEY_TYPE_HASH = 'h'; +const char PIKA_KEY_TYPE_LIST = 'l'; +const char PIKA_KEY_TYPE_SET = 's'; +const char PIKA_KEY_TYPE_ZSET = 'z'; + +/* + * cache task type + */ +enum CacheBgTask { + CACHE_BGTASK_CLEAR = 0, + CACHE_BGTASK_RESET_NUM = 1, + CACHE_BGTASK_RESET_CFG = 2 +}; + +const int64_t CACHE_LOAD_QUEUE_MAX_SIZE = 2048; +const int64_t CACHE_VALUE_ITEM_MAX_SIZE = 2048; +const int64_t CACHE_LOAD_NUM_ONE_TIME = 256; + #endif diff --git a/tools/pika_migrate/include/pika_dispatch_thread.h b/tools/pika_migrate/include/pika_dispatch_thread.h index 8c52053013..01a6fe96b0 100644 --- a/tools/pika_migrate/include/pika_dispatch_thread.h +++ b/tools/pika_migrate/include/pika_dispatch_thread.h @@ -10,48 +10,47 @@ class PikaDispatchThread { public: - PikaDispatchThread(std::set &ips, int port, int work_num, - int cron_interval, int queue_limit); + PikaDispatchThread(std::set& ips, int port, int work_num, int cron_interval, int queue_limit, + int max_conn_rbuf_size); ~PikaDispatchThread(); int StartThread(); - - int64_t ThreadClientList(std::vector *clients); + void StopThread(); + uint64_t ThreadClientList(std::vector* clients); bool ClientKill(const std::string& ip_port); void ClientKillAll(); - void SetQueueLimit(int queue_limit) { - thread_rep_->SetQueueLimit(queue_limit); - } + void SetQueueLimit(int queue_limit) { thread_rep_->SetQueueLimit(queue_limit); } + + void UnAuthUserAndKillClient(const std::set &users, const std::shared_ptr& defaultUser); + net::ServerThread* server_thread() { return thread_rep_; } private: - class ClientConnFactory : public pink::ConnFactory { + class ClientConnFactory : public net::ConnFactory { public: - virtual std::shared_ptr NewPinkConn( - int connfd, - const std::string &ip_port, - pink::Thread* server_thread, - void* worker_specific_data, - pink::PinkEpoll* pink_epoll) const { - return std::make_shared(connfd, ip_port, server_thread, pink_epoll, pink::HandleType::kAsynchronous); + explicit ClientConnFactory(int max_conn_rbuf_size) : max_conn_rbuf_size_(max_conn_rbuf_size) {} + std::shared_ptr NewNetConn(int connfd, const std::string& ip_port, net::Thread* server_thread, + void* worker_specific_data, net::NetMultiplexer* net) const override { + return std::make_shared(connfd, ip_port, server_thread, net, net::HandleType::kAsynchronous, max_conn_rbuf_size_); } + + private: + int max_conn_rbuf_size_ = 0; }; - class Handles : public pink::ServerHandle { + class Handles : public net::ServerHandle { public: - explicit Handles(PikaDispatchThread* pika_disptcher) - : pika_disptcher_(pika_disptcher) { - } - using pink::ServerHandle::AccessHandle; + explicit Handles(PikaDispatchThread* pika_disptcher) : pika_disptcher_(pika_disptcher) {} + using net::ServerHandle::AccessHandle; bool AccessHandle(std::string& ip) const override; void CronHandle() const override; private: - PikaDispatchThread* pika_disptcher_; + PikaDispatchThread* pika_disptcher_ = nullptr; }; ClientConnFactory conn_factory_; Handles handles_; - pink::ServerThread* thread_rep_; + net::ServerThread* thread_rep_ = nullptr; }; #endif diff --git a/tools/pika_migrate/include/pika_geo.h b/tools/pika_migrate/include/pika_geo.h index 6d8ac4495c..70b287da03 100644 --- a/tools/pika_migrate/include/pika_geo.h +++ b/tools/pika_migrate/include/pika_geo.h @@ -6,14 +6,16 @@ #ifndef PIKA_GEO_H_ #define PIKA_GEO_H_ +#include "include/pika_db.h" +#include "include/acl.h" #include "include/pika_command.h" -#include "include/pika_partition.h" +#include "storage/storage.h" /* * zset */ enum Sort { - Unsort, //default + Unsort, // default Asc, Desc }; @@ -50,92 +52,97 @@ struct GeoRange { class GeoAddCmd : public Cmd { public: - GeoAddCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + GeoAddCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::GEO)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new GeoAddCmd(*this); - } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new GeoAddCmd(*this); } + private: std::string key_; std::vector pos_; - virtual void DoInitial(); + void DoInitial() override; }; class GeoPosCmd : public Cmd { public: - GeoPosCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + GeoPosCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::GEO)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new GeoPosCmd(*this); - } + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new GeoPosCmd(*this); } + private: std::string key_; std::vector members_; - virtual void DoInitial(); + void DoInitial() override; }; class GeoDistCmd : public Cmd { public: - GeoDistCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + GeoDistCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::GEO)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new GeoDistCmd(*this); - } + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new GeoDistCmd(*this); } + private: std::string key_, first_pos_, second_pos_, unit_; - virtual void DoInitial(); + void DoInitial() override; }; class GeoHashCmd : public Cmd { public: - GeoHashCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + GeoHashCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::GEO)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new GeoHashCmd(*this); - } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override{}; + Cmd* Clone() override { return new GeoHashCmd(*this); } + private: std::string key_; std::vector members_; - virtual void DoInitial(); + void DoInitial() override; }; class GeoRadiusCmd : public Cmd { public: - GeoRadiusCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new GeoRadiusCmd(*this); - } + GeoRadiusCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::GEO)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new GeoRadiusCmd(*this); } + private: std::string key_; GeoRange range_; - virtual void DoInitial(); - virtual void Clear() { + void DoInitial() override; + void Clear() override { range_.withdist = false; range_.withcoord = false; range_.withhash = false; @@ -150,17 +157,18 @@ class GeoRadiusCmd : public Cmd { class GeoRadiusByMemberCmd : public Cmd { public: - GeoRadiusByMemberCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new GeoRadiusByMemberCmd(*this); - } + GeoRadiusByMemberCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::GEO)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new GeoRadiusByMemberCmd(*this); } + private: std::string key_; GeoRange range_; - virtual void DoInitial(); - virtual void Clear() { + void DoInitial() override; + void Clear() override { range_.withdist = false; range_.withcoord = false; range_.withhash = false; diff --git a/tools/pika_migrate/include/pika_geohash.h b/tools/pika_migrate/include/pika_geohash.h index e963839a4a..1ba348515e 100644 --- a/tools/pika_migrate/include/pika_geohash.h +++ b/tools/pika_migrate/include/pika_geohash.h @@ -32,9 +32,8 @@ #ifndef PIKA_GEOHASH_H_ #define PIKA_GEOHASH_H_ -#include -#include -#include +#include +#include #if defined(__cplusplus) extern "C" { @@ -42,75 +41,59 @@ extern "C" { #define HASHISZERO(r) (!(r).bits && !(r).step) #define RANGEISZERO(r) (!(r).max && !(r).min) -#define RANGEPISZERO(r) (r == NULL || RANGEISZERO(*r)) +#define RANGEPISZERO(r) ((r) == nullptr || RANGEISZERO(*(r))) -#define GEO_STEP_MAX 26 /* 26*2 = 52 bits. */ +#define GEO_STEP_MAX 26 /* 26 * 2 = 52 bits. */ /* Limits from EPSG:900913 / EPSG:3785 / OSGEO:41001 */ -#define GEO_LAT_MIN -85.05112878 -#define GEO_LAT_MAX 85.05112878 -#define GEO_LONG_MIN -180 -#define GEO_LONG_MAX 180 +constexpr double GEO_LAT_MIN{-85.05112878}; +constexpr double GEO_LAT_MAX{85.05112878}; +constexpr int64_t GEO_LONG_MIN{-180}; +constexpr int64_t GEO_LONG_MAX{180}; -typedef enum { - GEOHASH_NORTH = 0, - GEOHASH_EAST, - GEOHASH_WEST, - GEOHASH_SOUTH, - GEOHASH_SOUTH_WEST, - GEOHASH_SOUTH_EAST, - GEOHASH_NORT_WEST, - GEOHASH_NORT_EAST -} GeoDirection; +struct GeoHashBits { + uint64_t bits; + uint8_t step; +}; -typedef struct { - uint64_t bits; - uint8_t step; -} GeoHashBits; +struct GeoHashRange { + double min; + double max; +}; -typedef struct { - double min; - double max; -} GeoHashRange; +struct GeoHashArea { + GeoHashBits hash; + GeoHashRange longitude; + GeoHashRange latitude; +}; -typedef struct { - GeoHashBits hash; - GeoHashRange longitude; - GeoHashRange latitude; -} GeoHashArea; - -typedef struct { - GeoHashBits north; - GeoHashBits east; - GeoHashBits west; - GeoHashBits south; - GeoHashBits north_east; - GeoHashBits south_east; - GeoHashBits north_west; - GeoHashBits south_west; -} GeoHashNeighbors; +struct GeoHashNeighbors { + GeoHashBits north; + GeoHashBits east; + GeoHashBits west; + GeoHashBits south; + GeoHashBits north_east; + GeoHashBits south_east; + GeoHashBits north_west; + GeoHashBits south_west; +}; /* * 0:success * -1:failed */ -void geohashGetCoordRange(GeoHashRange *long_range, GeoHashRange *lat_range); -int geohashEncode(const GeoHashRange *long_range, const GeoHashRange *lat_range, - double longitude, double latitude, uint8_t step, - GeoHashBits *hash); -int geohashEncodeType(double longitude, double latitude, - uint8_t step, GeoHashBits *hash); -int geohashEncodeWGS84(double longitude, double latitude, uint8_t step, - GeoHashBits *hash); -int geohashDecode(const GeoHashRange long_range, const GeoHashRange lat_range, - const GeoHashBits hash, GeoHashArea *area); -int geohashDecodeType(const GeoHashBits hash, GeoHashArea *area); -int geohashDecodeWGS84(const GeoHashBits hash, GeoHashArea *area); -int geohashDecodeAreaToLongLat(const GeoHashArea *area, double *xy); -int geohashDecodeToLongLatType(const GeoHashBits hash, double *xy); -int geohashDecodeToLongLatWGS84(const GeoHashBits hash, double *xy); -int geohashDecodeToLongLatMercator(const GeoHashBits hash, double *xy); -void geohashNeighbors(const GeoHashBits *hash, GeoHashNeighbors *neighbors); +void geohashGetCoordRange(GeoHashRange* long_range, GeoHashRange* lat_range); +int geohashEncode(const GeoHashRange* long_range, const GeoHashRange* lat_range, double longitude, double latitude, + uint8_t step, GeoHashBits* hash); +int geohashEncodeType(double longitude, double latitude, uint8_t step, GeoHashBits* hash); +int geohashEncodeWGS84(double longitude, double latitude, uint8_t step, GeoHashBits* hash); +int geohashDecode(GeoHashRange long_range, GeoHashRange lat_range, GeoHashBits hash, + GeoHashArea* area); +int geohashDecodeType(GeoHashBits hash, GeoHashArea* area); +int geohashDecodeAreaToLongLat(const GeoHashArea* area, double* xy); +int geohashDecodeToLongLatType(GeoHashBits hash, double* xy); +int geohashDecodeToLongLatWGS84(GeoHashBits hash, double* xy); +void geohashNeighbors(const GeoHashBits* hash, GeoHashNeighbors* neighbors); #if defined(__cplusplus) } diff --git a/tools/pika_migrate/include/pika_geohash_helper.h b/tools/pika_migrate/include/pika_geohash_helper.h index 0642455fa4..63ad4782a2 100644 --- a/tools/pika_migrate/include/pika_geohash_helper.h +++ b/tools/pika_migrate/include/pika_geohash_helper.h @@ -34,37 +34,23 @@ #include "include/pika_geohash.h" -#define GZERO(s) s.bits = s.step = 0; -#define GISZERO(s) (!s.bits && !s.step) -#define GISNOTZERO(s) (s.bits || s.step) +#define GZERO(s) s.bits = (s).step = 0; -typedef uint64_t GeoHashFix52Bits; -typedef uint64_t GeoHashVarBits; +using GeoHashFix52Bits = uint64_t; -typedef struct { - GeoHashBits hash; - GeoHashArea area; - GeoHashNeighbors neighbors; -} GeoHashRadius; +struct GeoHashRadius { + GeoHashBits hash; + GeoHashArea area; + GeoHashNeighbors neighbors; +}; -int GeoHashBitsComparator(const GeoHashBits *a, const GeoHashBits *b); uint8_t geohashEstimateStepsByRadius(double range_meters, double lat); -int geohashBoundingBox(double longitude, double latitude, double radius_meters, - double *bounds); -GeoHashRadius geohashGetAreasByRadius(double longitude, - double latitude, double radius_meters); -GeoHashRadius geohashGetAreasByRadiusWGS84(double longitude, double latitude, - double radius_meters); -GeoHashRadius geohashGetAreasByRadiusMercator(double longitude, double latitude, - double radius_meters); -GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits hash); -double geohashGetDistance(double lon1d, double lat1d, - double lon2d, double lat2d); -int geohashGetDistanceIfInRadius(double x1, double y1, - double x2, double y2, double radius, - double *distance); -int geohashGetDistanceIfInRadiusWGS84(double x1, double y1, double x2, - double y2, double radius, - double *distance); +int geohashBoundingBox(double longitude, double latitude, double radius_meters, double* bounds); +GeoHashRadius geohashGetAreasByRadius(double longitude, double latitude, double radius_meters); +GeoHashRadius geohashGetAreasByRadiusWGS84(double longitude, double latitude, double radius_meters); +GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits& hash); +double geohashGetDistance(double lon1d, double lat1d, double lon2d, double lat2d); +int geohashGetDistanceIfInRadius(double x1, double y1, double x2, double y2, double radius, double* distance); +int geohashGetDistanceIfInRadiusWGS84(double x1, double y1, double x2, double y2, double radius, double* distance); #endif /* PIKA_GEOHASH_HELPER_HPP_ */ diff --git a/tools/pika_migrate/include/pika_hash.h b/tools/pika_migrate/include/pika_hash.h index 0658f0e73e..1362040682 100644 --- a/tools/pika_migrate/include/pika_hash.h +++ b/tools/pika_migrate/include/pika_hash.h @@ -6,288 +6,358 @@ #ifndef PIKA_HASH_H_ #define PIKA_HASH_H_ -#include "blackwidow/blackwidow.h" - +#include "storage/storage.h" +#include "include/acl.h" #include "include/pika_command.h" -#include "include/pika_partition.h" +#include "include/pika_db.h" +#include "storage/storage.h" /* * hash */ class HDelCmd : public Cmd { public: - HDelCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + HDelCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HDelCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HDelCmd(*this); } + private: std::string key_; std::vector fields_; - virtual void DoInitial() override; + int32_t deleted_ = 0; + void DoInitial() override; + rocksdb::Status s_; }; class HGetCmd : public Cmd { public: - HGetCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + HGetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HGetCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + bool IsTooLargeKey(const int &max_sz) override { return key_.size() > static_cast(max_sz); } + Cmd* Clone() override { return new HGetCmd(*this); } + private: std::string key_, field_; - virtual void DoInitial() override; + void DoInitial() override; + rocksdb::Status s_; }; class HGetallCmd : public Cmd { public: - HGetallCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + HGetallCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HGetallCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HGetallCmd(*this); } + private: std::string key_; - virtual void DoInitial() override; + void DoInitial() override; + rocksdb::Status s_; }; class HSetCmd : public Cmd { public: - HSetCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + HSetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HSetCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HSetCmd(*this); } + private: std::string key_, field_, value_; - virtual void DoInitial() override; + void DoInitial() override; + rocksdb::Status s_; }; class HExistsCmd : public Cmd { public: - HExistsCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + HExistsCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HExistsCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HExistsCmd(*this); } + private: std::string key_, field_; - virtual void DoInitial() override; + void DoInitial() override; + rocksdb::Status s_; }; class HIncrbyCmd : public Cmd { public: - HIncrbyCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + HIncrbyCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HIncrbyCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HIncrbyCmd(*this); } + private: std::string key_, field_; - int64_t by_; - virtual void DoInitial() override; + int64_t by_ = 0; + void DoInitial() override; + rocksdb::Status s_; }; class HIncrbyfloatCmd : public Cmd { public: - HIncrbyfloatCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + HIncrbyfloatCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HIncrbyfloatCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HIncrbyfloatCmd(*this); } + private: std::string key_, field_, by_; - virtual void DoInitial() override; + void DoInitial() override; + rocksdb::Status s_; }; class HKeysCmd : public Cmd { public: - HKeysCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + HKeysCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HKeysCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HKeysCmd(*this); } + private: std::string key_; - virtual void DoInitial() override; + void DoInitial() override; + rocksdb::Status s_; }; class HLenCmd : public Cmd { public: - HLenCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + HLenCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HLenCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HLenCmd(*this); } + private: std::string key_; - virtual void DoInitial() override; + void DoInitial() override; + rocksdb::Status s_; }; class HMgetCmd : public Cmd { public: - HMgetCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + HMgetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HMgetCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HMgetCmd(*this); } + private: std::string key_; std::vector fields_; - virtual void DoInitial() override; + void DoInitial() override; + rocksdb::Status s_; }; class HMsetCmd : public Cmd { public: - HMsetCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + HMsetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HMsetCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HMsetCmd(*this); } + private: std::string key_; - std::vector fvs_; - virtual void DoInitial() override; + std::vector fvs_; + void DoInitial() override; + rocksdb::Status s_; }; class HSetnxCmd : public Cmd { public: - HSetnxCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + HSetnxCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HSetnxCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HSetnxCmd(*this); } + private: std::string key_, field_, value_; - virtual void DoInitial() override; + void DoInitial() override; + rocksdb::Status s_; }; class HStrlenCmd : public Cmd { public: - HStrlenCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + HStrlenCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HStrlenCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HStrlenCmd(*this); } + private: std::string key_, field_; - virtual void DoInitial() override; + void DoInitial() override; + rocksdb::Status s_; }; class HValsCmd : public Cmd { public: - HValsCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + HValsCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HValsCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HValsCmd(*this); } + private: std::string key_, field_; - virtual void DoInitial() override; + void DoInitial() override; + rocksdb::Status s_; }; class HScanCmd : public Cmd { public: - HScanCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), pattern_("*"), count_(10) {} - virtual std::vector current_key() const { + HScanCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)), pattern_("*") {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HScanCmd(*this); - } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HScanCmd(*this); } + private: - std::string key_, pattern_; - int64_t cursor_, count_; - virtual void DoInitial() override; - virtual void Clear() { + std::string key_; + std::string pattern_; + int64_t cursor_; + int64_t count_{10}; + void DoInitial() override; + void Clear() override { pattern_ = "*"; count_ = 10; } @@ -295,22 +365,25 @@ class HScanCmd : public Cmd { class HScanxCmd : public Cmd { public: - HScanxCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), pattern_("*"), count_(10) {} - virtual std::vector current_key() const { + HScanxCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)), pattern_("*") {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HScanxCmd(*this); - } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HScanxCmd(*this); } + private: - std::string key_, start_field_, pattern_; - int64_t count_; - virtual void DoInitial() override; - virtual void Clear() { + std::string key_; + std::string start_field_; + std::string pattern_; + int64_t count_{10}; + void DoInitial() override; + void Clear() override { pattern_ = "*"; count_ = 10; } @@ -318,25 +391,26 @@ class HScanxCmd : public Cmd { class PKHScanRangeCmd : public Cmd { public: - PKHScanRangeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), pattern_("*"), limit_(10) {} - virtual std::vector current_key() const { + PKHScanRangeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)), pattern_("*") {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PKHScanRangeCmd(*this); - } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKHScanRangeCmd(*this); } + private: std::string key_; std::string field_start_; std::string field_end_; std::string pattern_; - int64_t limit_; - virtual void DoInitial() override; - virtual void Clear() { + int64_t limit_ = 10; + void DoInitial() override; + void Clear() override { pattern_ = "*"; limit_ = 10; } @@ -344,25 +418,26 @@ class PKHScanRangeCmd : public Cmd { class PKHRScanRangeCmd : public Cmd { public: - PKHRScanRangeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), pattern_("*"), limit_(10) {} - virtual std::vector current_key() const { + PKHRScanRangeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)), pattern_("*") {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PKHRScanRangeCmd(*this); - } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKHRScanRangeCmd(*this); } + private: std::string key_; std::string field_start_; std::string field_end_; - std::string pattern_; - int64_t limit_; - virtual void DoInitial() override; - virtual void Clear() { + std::string pattern_ = "*"; + int64_t limit_ = 10; + void DoInitial() override; + void Clear() override { pattern_ = "*"; limit_ = 10; } diff --git a/tools/pika_migrate/include/pika_hyperloglog.h b/tools/pika_migrate/include/pika_hyperloglog.h index ecf3b9036f..77c374642f 100644 --- a/tools/pika_migrate/include/pika_hyperloglog.h +++ b/tools/pika_migrate/include/pika_hyperloglog.h @@ -7,63 +7,69 @@ #define PIKA_HYPERLOGLOG_H_ #include "include/pika_command.h" -#include "include/pika_partition.h" - +#include "include/pika_kv.h" /* * hyperloglog */ class PfAddCmd : public Cmd { public: - PfAddCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + PfAddCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PfAddCmd(*this); - } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PfAddCmd(*this); } + private: std::string key_; std::vector values_; - virtual void DoInitial() override; - virtual void Clear() { - values_.clear(); - } + void DoInitial() override; + void Clear() override { values_.clear(); } }; class PfCountCmd : public Cmd { public: - PfCountCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PfCountCmd(*this); - } + PfCountCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PfCountCmd(*this); } + private: std::vector keys_; - virtual void DoInitial() override; - virtual void Clear() { - keys_.clear(); - } + void DoInitial() override; + void Clear() override { keys_.clear(); } }; class PfMergeCmd : public Cmd { public: - PfMergeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PfMergeCmd(*this); + PfMergeCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) { + set_cmd_ = std::make_shared(kCmdNameSet, -3, kCmdFlagsWrite | kCmdFlagsKv); + } + PfMergeCmd(const PfMergeCmd& other) + : Cmd(other), keys_(other.keys_), value_to_dest_(other.value_to_dest_) { + set_cmd_ = std::make_shared(kCmdNameSet, -3, kCmdFlagsWrite | kCmdFlagsKv); + } + std::vector current_key() const override { + return keys_; } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PfMergeCmd(*this); } + void DoBinlog() override; + private: std::vector keys_; - virtual void DoInitial() override; - virtual void Clear() { - keys_.clear(); - } + void DoInitial() override; + void Clear() override { keys_.clear(); } + // used for write binlog + std::string value_to_dest_; + std::shared_ptr set_cmd_; }; #endif diff --git a/tools/pika_migrate/include/pika_instant.h b/tools/pika_migrate/include/pika_instant.h new file mode 100644 index 0000000000..630e5478a0 --- /dev/null +++ b/tools/pika_migrate/include/pika_instant.h @@ -0,0 +1,39 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_PIKA_INSTANT_H +#define PIKA_PIKA_INSTANT_H + +#include +#include + +inline constexpr size_t STATS_METRIC_SAMPLES = 16; /* Number of samples per metric. */ +inline const std::string STATS_METRIC_NET_INPUT = "stats_metric_net_input"; +inline const std::string STATS_METRIC_NET_OUTPUT = "stats_metric_net_output"; +inline const std::string STATS_METRIC_NET_INPUT_REPLICATION = "stats_metric_net_input_replication"; +inline const std::string STATS_METRIC_NET_OUTPUT_REPLICATION = "stats_metric_net_output_replication"; + +/* The following two are used to track instantaneous metrics, like +* number of operations per second, network traffic. */ +struct InstMetric{ + size_t last_sample_base; /* The divisor of last sample window */ + size_t last_sample_value; /* The dividend of last sample window */ + double samples[STATS_METRIC_SAMPLES]; + int idx; +}; + +class Instant { + public: + Instant() = default; + ~Instant() = default; + + void trackInstantaneousMetric(std::string metric, size_t current_value, size_t current_base, size_t factor); + double getInstantaneousMetric(std::string metric); + + private: + std::unordered_map inst_metrics_; +}; + +#endif // PIKA_PIKA_INSTANT_H diff --git a/tools/pika_migrate/include/pika_kv.h b/tools/pika_migrate/include/pika_kv.h index f23c8c07ca..7da694705b 100644 --- a/tools/pika_migrate/include/pika_kv.h +++ b/tools/pika_migrate/include/pika_kv.h @@ -6,731 +6,874 @@ #ifndef PIKA_KV_H_ #define PIKA_KV_H_ -#include "blackwidow/blackwidow.h" - +#include "storage/storage.h" +#include "include/pika_db.h" +#include "include/acl.h" #include "include/pika_command.h" -#include "include/pika_partition.h" - /* * kv */ class SetCmd : public Cmd { public: - enum SetCondition {kNONE, kNX, kXX, kVX, kEXORPX}; - SetCmd(const std::string& name , int arity, uint16_t flag) - : Cmd(name, arity, flag), sec_(0), condition_(kNONE) {}; - virtual std::vector current_key() const { + enum SetCondition { kNONE, kNX, kXX, kVX, kEXORPX }; + SetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)){}; + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SetCmd(*this); - } + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + bool IsTooLargeKey(const int& max_sz) override { return key_.size() > static_cast(max_sz); } + Cmd* Clone() override { return new SetCmd(*this); } private: std::string key_; std::string value_; std::string target_; - int32_t success_; - int64_t sec_; - SetCmd::SetCondition condition_; - virtual void DoInitial() override; - virtual void Clear() override { - sec_ = 0; + int32_t success_ = 0; + int64_t ttl_millsec = 0; + bool has_ttl_ = false; + SetCmd::SetCondition condition_{kNONE}; + void DoInitial() override; + void Clear() override { + ttl_millsec = 0; success_ = 0; condition_ = kNONE; } - virtual std::string ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) override; + std::string ToRedisProtocol() override; + rocksdb::Status s_; }; class GetCmd : public Cmd { public: - GetCmd(const std::string& name , int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { + GetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)){}; + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new GetCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void ReadCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + bool IsTooLargeKey(const int &max_sz) override { return key_.size() > static_cast(max_sz); } + Cmd* Clone() override { return new GetCmd(*this); } + private: std::string key_; - virtual void DoInitial() override; + std::string value_; + int64_t ttl_millsec_ = 0; + void DoInitial() override; + rocksdb::Status s_; }; class DelCmd : public Cmd { public: - DelCmd(const std::string& name , int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual void Do(std::shared_ptr partition = nullptr); - virtual std::vector current_key() const { - return keys_; - } - virtual Cmd* Clone() override { - return new DelCmd(*this); - } + DelCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)){}; + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + std::vector current_key() const override { return keys_; } + void Split(const HintKeys& hint_keys) override; + void Merge() override; + Cmd* Clone() override { return new DelCmd(*this); } + void DoBinlog() override; private: std::vector keys_; - virtual void DoInitial() override; + int64_t split_res_ = 0; + void DoInitial() override; + rocksdb::Status s_; }; class IncrCmd : public Cmd { public: - IncrCmd(const std::string& name , int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { + IncrCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)){}; + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new IncrCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new IncrCmd(*this); } + private: std::string key_; - int64_t new_value_; - virtual void DoInitial() override; + int64_t new_value_ = 0; + void DoInitial() override; + rocksdb::Status s_; + int64_t expired_timestamp_millsec_ = 0; + std::string ToRedisProtocol() override; }; class IncrbyCmd : public Cmd { public: - IncrbyCmd(const std::string& name , int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { + IncrbyCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)){}; + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new IncrbyCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new IncrbyCmd(*this); } + private: std::string key_; - int64_t by_, new_value_; - virtual void DoInitial() override; + int64_t by_ = 0, new_value_ = 0; + void DoInitial() override; + rocksdb::Status s_; + int64_t expired_timestamp_millsec_ = 0; + std::string ToRedisProtocol() override; }; class IncrbyfloatCmd : public Cmd { public: - IncrbyfloatCmd(const std::string& name , int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { + IncrbyfloatCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)){}; + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new IncrbyfloatCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new IncrbyfloatCmd(*this); } + private: std::string key_, value_, new_value_; - double by_; - virtual void DoInitial() override; + double by_ = 0; + void DoInitial() override; + rocksdb::Status s_; + int64_t expired_timestamp_millsec_ = 0; + std::string ToRedisProtocol() override; }; class DecrCmd : public Cmd { public: - DecrCmd(const std::string& name , int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { + DecrCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)){}; + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new DecrCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new DecrCmd(*this); } + private: std::string key_; - int64_t new_value_; - virtual void DoInitial() override; + int64_t new_value_ = 0; + void DoInitial() override; + rocksdb::Status s_; }; class DecrbyCmd : public Cmd { public: - DecrbyCmd(const std::string& name , int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { + DecrbyCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)){}; + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new DecrbyCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new DecrbyCmd(*this); } + private: std::string key_; - int64_t by_, new_value_; - virtual void DoInitial() override; + int64_t by_ = 0, new_value_ = 0; + void DoInitial() override; + rocksdb::Status s_; }; class GetsetCmd : public Cmd { public: - GetsetCmd(const std::string& name , int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { + GetsetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)){}; + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new GetsetCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new GetsetCmd(*this); } + private: std::string key_; std::string new_value_; - virtual void DoInitial() override; + void DoInitial() override; + rocksdb::Status s_; }; class AppendCmd : public Cmd { public: - AppendCmd(const std::string& name , int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { + AppendCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)){}; + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new AppendCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new AppendCmd(*this); } + private: std::string key_; std::string value_; - virtual void DoInitial() override; + std::string new_value_; + void DoInitial() override; + rocksdb::Status s_; + int64_t expired_timestamp_millsec_ = 0; + std::string ToRedisProtocol() override; }; class MgetCmd : public Cmd { public: - MgetCmd(const std::string& name , int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual void Do(std::shared_ptr partition = nullptr); - virtual std::vector current_key() const { - return keys_; - } - virtual Cmd* Clone() override { - return new MgetCmd(*this); - } + MgetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)){}; + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + std::vector current_key() const override { return keys_; } + void Split(const HintKeys& hint_keys) override; + void Merge() override; + Cmd* Clone() override { return new MgetCmd(*this); } + + private: + void DoInitial() override; + void MergeCachedAndDbResults(); + void AssembleResponseFromCache(); private: std::vector keys_; - virtual void DoInitial() override; + std::vector cache_miss_keys_; + std::string value_; + std::unordered_map cache_hit_values_; + std::vector split_res_; + std::vector db_value_status_array_; + std::vector cache_value_status_array_; + rocksdb::Status s_; }; class KeysCmd : public Cmd { public: - KeysCmd(const std::string& name , int arity, uint16_t flag) - : Cmd(name, arity, flag), type_(blackwidow::DataType::kAll) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new KeysCmd(*this); - } + KeysCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new KeysCmd(*this); } + private: std::string pattern_; - blackwidow::DataType type_; - virtual void DoInitial() override; - virtual void Clear() { - type_ = blackwidow::DataType::kAll; - } + storage::DataType type_{storage::DataType::kAll}; + void DoInitial() override; + void Clear() override { type_ = storage::DataType::kAll; } + rocksdb::Status s_; }; class SetnxCmd : public Cmd { public: - SetnxCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + SetnxCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SetnxCmd(*this); - } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SetnxCmd(*this); } + private: std::string key_; std::string value_; - int32_t success_; - virtual void DoInitial() override; - virtual std::string ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) override; + int32_t success_ = 0; + void DoInitial() override; + rocksdb::Status s_; + std::string ToRedisProtocol() override; }; class SetexCmd : public Cmd { public: - SetexCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + SetexCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SetexCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SetexCmd(*this); } + private: std::string key_; - int64_t sec_; + int64_t ttl_sec_ = 0; std::string value_; - virtual void DoInitial() override; - virtual std::string ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) override; + void DoInitial() override; + rocksdb::Status s_; + std::string ToRedisProtocol() override; }; class PsetexCmd : public Cmd { public: - PsetexCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + PsetexCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PsetexCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new PsetexCmd(*this); } + private: std::string key_; - int64_t usec_; + int64_t ttl_millsec = 0; std::string value_; - virtual void DoInitial() override; - virtual std::string ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) override; + void DoInitial() override; + rocksdb::Status s_; + std::string ToRedisProtocol() override; }; class DelvxCmd : public Cmd { public: - DelvxCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + DelvxCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new DelvxCmd(*this); - } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new DelvxCmd(*this); } + private: std::string key_; std::string value_; - int32_t success_; - virtual void DoInitial() override; + int32_t success_ = 0; + void DoInitial() override; + rocksdb::Status s_; }; class MsetCmd : public Cmd { public: - MsetCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual std::vector current_key() const { + MsetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)) { + set_cmd_ = std::make_shared(kCmdNameSet, -3, kCmdFlagsWrite | kCmdFlagsKv); + } + MsetCmd(const MsetCmd& other) : Cmd(other), kvs_(other.kvs_) { + set_cmd_ = std::make_shared(kCmdNameSet, -3, kCmdFlagsWrite | kCmdFlagsKv); + } + + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + std::vector current_key() const override { std::vector res; for (auto& kv : kvs_) { res.push_back(kv.key); } return res; } - virtual Cmd* Clone() override { - return new MsetCmd(*this); - } + void Split(const HintKeys& hint_keys) override; + void Merge() override; + Cmd* Clone() override { return new MsetCmd(*this); } + void DoBinlog() override; + private: - std::vector kvs_; - virtual void DoInitial() override; + std::vector kvs_; + void DoInitial() override; + // used for write binlog + std::shared_ptr set_cmd_; + rocksdb::Status s_; }; class MsetnxCmd : public Cmd { public: - MsetnxCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new MsetnxCmd(*this); + MsetnxCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)) { + set_cmd_ = std::make_shared(kCmdNameSet, -3, kCmdFlagsWrite | kCmdFlagsKv); + } + MsetnxCmd(const MsetnxCmd& other) + : Cmd(other), kvs_(other.kvs_), success_(other.success_) { + set_cmd_ = std::make_shared(kCmdNameSet, -3, kCmdFlagsWrite | kCmdFlagsKv); } + std::vector current_key() const override { + std::vector res; + for (auto& kv : kvs_) { + res.push_back(kv.key); + } + return res; + } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new MsetnxCmd(*this); } + void DoBinlog() override; + private: - std::vector kvs_; - int32_t success_; - virtual void DoInitial() override; + std::vector kvs_; + int32_t success_ = 0; + void DoInitial() override; + // used for write binlog + std::shared_ptr set_cmd_; }; class GetrangeCmd : public Cmd { public: - GetrangeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + GetrangeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new GetrangeCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new GetrangeCmd(*this); } + private: std::string key_; - int64_t start_; - int64_t end_; - virtual void DoInitial() override; + int64_t start_ = 0; + int64_t end_ = 0; + std::string value_; + int64_t sec_ = 0; + rocksdb::Status s_; + void DoInitial() override; }; class SetrangeCmd : public Cmd { public: - SetrangeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + SetrangeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SetrangeCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SetrangeCmd(*this); } + private: std::string key_; - int64_t offset_; + int64_t offset_ = 0; std::string value_; - virtual void DoInitial() override; + void DoInitial() override; + rocksdb::Status s_; }; class StrlenCmd : public Cmd { public: - StrlenCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + StrlenCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new StrlenCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new StrlenCmd(*this); } + private: std::string key_; - virtual void DoInitial() override; + std::string value_; + int64_t ttl_millsec = 0; + void DoInitial() override; + rocksdb::Status s_; }; class ExistsCmd : public Cmd { public: - ExistsCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual std::vector current_key() const { - return keys_; - } - virtual Cmd* Clone() override { - return new ExistsCmd(*this); - } + ExistsCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)) {} + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + std::vector current_key() const override { return keys_; } + void Split(const HintKeys& hint_keys) override; + void Merge() override; + Cmd* Clone() override { return new ExistsCmd(*this); } private: std::vector keys_; - virtual void DoInitial() override; + int64_t split_res_ = 0; + void DoInitial() override; }; class ExpireCmd : public Cmd { public: - ExpireCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + ExpireCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ExpireCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ExpireCmd(*this); } + private: std::string key_; - int64_t sec_; - virtual void DoInitial() override; - virtual std::string ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) override; + int64_t ttl_sec_ = 0; + void DoInitial() override; + std::string ToRedisProtocol() override; + rocksdb::Status s_; }; class PexpireCmd : public Cmd { public: - PexpireCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + PexpireCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PexpireCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new PexpireCmd(*this); } + private: std::string key_; - int64_t msec_; - virtual void DoInitial() override; - virtual std::string ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) override; + int64_t ttl_millsec = 0; + void DoInitial() override; + std::string ToRedisProtocol() override; + rocksdb::Status s_; }; class ExpireatCmd : public Cmd { public: - ExpireatCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + ExpireatCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ExpireatCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ExpireatCmd(*this); } + private: std::string key_; - int64_t time_stamp_; - virtual void DoInitial() override; + int64_t time_stamp_sec_ = 0; + void DoInitial() override; + rocksdb::Status s_; }; class PexpireatCmd : public Cmd { public: - PexpireatCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + PexpireatCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PexpireatCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new PexpireatCmd(*this); } + private: std::string key_; - int64_t time_stamp_ms_; - virtual void DoInitial() override; - virtual std::string ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) override; + int64_t time_stamp_millsec_ = 0; + void DoInitial() override; + rocksdb::Status s_; }; class TtlCmd : public Cmd { public: - TtlCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + TtlCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new TtlCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new TtlCmd(*this); } + private: std::string key_; - virtual void DoInitial() override; + void DoInitial() override; + rocksdb::Status s_; }; class PttlCmd : public Cmd { public: - PttlCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + PttlCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PttlCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new PttlCmd(*this); } + private: std::string key_; - virtual void DoInitial() override; + void DoInitial() override; + rocksdb::Status s_; }; class PersistCmd : public Cmd { public: - PersistCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + PersistCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PersistCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new PersistCmd(*this); } + private: std::string key_; - virtual void DoInitial() override; + void DoInitial() override; + rocksdb::Status s_; }; class TypeCmd : public Cmd { public: - TypeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + TypeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new TypeCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new TypeCmd(*this); } + private: std::string key_; - virtual void DoInitial() override; + void DoInitial() override; + rocksdb::Status s_; }; class ScanCmd : public Cmd { public: - ScanCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), pattern_("*"), count_(10) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ScanCmd(*this); - } + ScanCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)), pattern_("*") {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new ScanCmd(*this); } + private: - int64_t cursor_; - std::string pattern_; - int64_t count_; - virtual void DoInitial() override; - virtual void Clear() { + int64_t cursor_ = 0; + std::string pattern_ = "*"; + int64_t count_ = 10; + storage::DataType type_ = storage::DataType::kAll; + void DoInitial() override; + void Clear() override { pattern_ = "*"; count_ = 10; + type_ = storage::DataType::kAll; } + rocksdb::Status s_; }; class ScanxCmd : public Cmd { public: - ScanxCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), pattern_("*"), count_(10) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ScanxCmd(*this); - } + ScanxCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)), pattern_("*") {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new ScanxCmd(*this); } + private: - blackwidow::DataType type_; + storage::DataType type_; std::string start_key_; - std::string pattern_; - int64_t count_; - virtual void DoInitial() override; - virtual void Clear() { + std::string pattern_ = "*"; + int64_t count_ = 10; + void DoInitial() override; + void Clear() override { pattern_ = "*"; count_ = 10; } + rocksdb::Status s_; }; class PKSetexAtCmd : public Cmd { -public: - PKSetexAtCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), time_stamp_(0) {} - virtual std::vector current_key() const { + public: + PKSetexAtCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PKSetexAtCmd(*this); - } -private: + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKSetexAtCmd(*this); } + + private: std::string key_; std::string value_; - int64_t time_stamp_; - virtual void DoInitial() override; - virtual void Clear() { - time_stamp_ = 0; - } + int64_t time_stamp_sec_ = 0; + void DoInitial() override; + void Clear() override { time_stamp_sec_ = 0; } + rocksdb::Status s_; }; class PKScanRangeCmd : public Cmd { public: - PKScanRangeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), pattern_("*"), limit_(10), string_with_value(false) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PKScanRangeCmd(*this); + PKScanRangeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)), pattern_("*") {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_start_); + return res; } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKScanRangeCmd(*this); } + private: - blackwidow::DataType type_; + storage::DataType type_; std::string key_start_; std::string key_end_; - std::string pattern_; - int64_t limit_; - bool string_with_value; - virtual void DoInitial() override; - virtual void Clear() { + std::string pattern_ = "*"; + int64_t limit_ = 10; + bool string_with_value = false; + void DoInitial() override; + void Clear() override { pattern_ = "*"; limit_ = 10; string_with_value = false; } + rocksdb::Status s_; }; class PKRScanRangeCmd : public Cmd { public: - PKRScanRangeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), pattern_("*"), limit_(10), string_with_value(false) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PKRScanRangeCmd(*this); + PKRScanRangeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)), pattern_("*") {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_start_); + return res; } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKRScanRangeCmd(*this); } + private: - blackwidow::DataType type_; + storage::DataType type_ = storage::DataType::kAll; std::string key_start_; std::string key_end_; - std::string pattern_; - int64_t limit_; - bool string_with_value; - virtual void DoInitial() override; - virtual void Clear() { + std::string pattern_ = "*"; + int64_t limit_ = 10; + bool string_with_value = false; + void DoInitial() override; + void Clear() override { pattern_ = "*"; limit_ = 10; string_with_value = false; } + rocksdb::Status s_; }; #endif diff --git a/tools/pika_migrate/include/pika_list.h b/tools/pika_migrate/include/pika_list.h index 3f18129554..1591e76c32 100644 --- a/tools/pika_migrate/include/pika_list.h +++ b/tools/pika_migrate/include/pika_list.h @@ -6,284 +6,424 @@ #ifndef PIKA_LIST_H_ #define PIKA_LIST_H_ -#include "blackwidow/blackwidow.h" - +#include "include/acl.h" #include "include/pika_command.h" -#include "include/pika_partition.h" +#include "storage/storage.h" /* * list */ class LIndexCmd : public Cmd { public: - LIndexCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), index_(0) {}; - virtual std::vector current_key() const { + LIndexCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::LIST)){}; + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new LIndexCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new LIndexCmd(*this); } + private: std::string key_; - int64_t index_; - virtual void DoInitial() override; - virtual void Clear() { - index_ = 0; - } + int64_t index_ = 0; + void DoInitial() override; + void Clear() override { index_ = 0; } + rocksdb::Status s_; }; class LInsertCmd : public Cmd { public: - LInsertCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), dir_(blackwidow::After) {}; - virtual std::vector current_key() const { + LInsertCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::LIST)){}; + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new LInsertCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new LInsertCmd(*this); } + private: std::string key_; - blackwidow::BeforeOrAfter dir_; + storage::BeforeOrAfter dir_{storage::After}; std::string pivot_; std::string value_; - virtual void DoInitial() override; + void DoInitial() override; + rocksdb::Status s_; }; class LLenCmd : public Cmd { public: - LLenCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { + LLenCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::LIST)){}; + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new LLenCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new LLenCmd(*this); } + private: std::string key_; - virtual void DoInitial() override; + void DoInitial() override; + rocksdb::Status s_; +}; + +class BlockingBaseCmd : public Cmd { + public: + BlockingBaseCmd(const std::string& name, int arity, uint32_t flag, uint32_t category = 0) + : Cmd(name, arity, flag, static_cast(AclCategory::LIST) | category) {} + + // blpop/brpop used start + struct WriteBinlogOfPopArgs { + BlockKeyType block_type; + std::string key; + std::shared_ptr db; + std::shared_ptr conn; + WriteBinlogOfPopArgs() = default; + WriteBinlogOfPopArgs(BlockKeyType block_type_, const std::string& key_, std::shared_ptr db_, + std::shared_ptr conn_) + : block_type(block_type_), key(key_), db(db_), conn(conn_) {} + }; + void BlockThisClientToWaitLRPush(BlockKeyType block_pop_type, std::vector& keys, int64_t expire_time); + void TryToServeBLrPopWithThisKey(const std::string& key, std::shared_ptr db); + static void ServeAndUnblockConns(void* args); + static void WriteBinlogOfPopAndUpdateCache(std::vector& pop_args); + void removeDuplicates(std::vector& keys_); + // blpop/brpop used functions end +}; + +class BLPopCmd final : public BlockingBaseCmd { + public: + BLPopCmd(const std::string& name, int arity, uint32_t flag) + : BlockingBaseCmd(name, arity, flag, static_cast(AclCategory::BLOCKING)){}; + std::vector current_key() const override { return {keys_}; } + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new BLPopCmd(*this); } + void DoInitial() override; + void DoBinlog() override; + + private: + std::vector keys_; + int64_t expire_time_{0}; + WriteBinlogOfPopArgs binlog_args_; + bool is_binlog_deferred_{false}; + rocksdb::Status s_; }; class LPopCmd : public Cmd { public: - LPopCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { + LPopCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::LIST)){}; + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new LPopCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new LPopCmd(*this); } + private: std::string key_; - virtual void DoInitial() override; + std::int64_t count_ = 1; + void DoInitial() override; + rocksdb::Status s_; }; -class LPushCmd : public Cmd { +class LPushCmd : public BlockingBaseCmd { public: - LPushCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { + LPushCmd(const std::string& name, int arity, uint32_t flag) : BlockingBaseCmd(name, arity, flag){}; + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new LPushCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new LPushCmd(*this); } + private: std::string key_; std::vector values_; - virtual void DoInitial() override; - virtual void Clear() { - values_.clear(); - } + rocksdb::Status s_; + void DoInitial() override; + void Clear() override { values_.clear(); } }; class LPushxCmd : public Cmd { public: - LPushxCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { + LPushxCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::LIST)){}; + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new LPushxCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new LPushxCmd(*this); } + private: std::string key_; - std::string value_; - virtual void DoInitial() override; + rocksdb::Status s_; + std::vector values_; + void DoInitial() override; }; class LRangeCmd : public Cmd { public: - LRangeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), left_(0), right_(0) {}; - virtual std::vector current_key() const { + LRangeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::LIST)){}; + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new LRangeCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new LRangeCmd(*this); } + private: std::string key_; - int64_t left_; - int64_t right_; - virtual void DoInitial() override; + int64_t left_ = 0; + int64_t right_ = 0; + rocksdb::Status s_; + void DoInitial() override; }; class LRemCmd : public Cmd { public: - LRemCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), count_(0) {}; - virtual std::vector current_key() const { + LRemCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::LIST)){}; + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new LRemCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new LRemCmd(*this); } + private: std::string key_; - int64_t count_; + int64_t count_ = 0; std::string value_; - virtual void DoInitial() override; + rocksdb::Status s_; + void DoInitial() override; }; class LSetCmd : public Cmd { public: - LSetCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), index_(0) {}; - virtual std::vector current_key() const { + LSetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::LIST)){}; + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new LSetCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new LSetCmd(*this); } + private: std::string key_; - int64_t index_; + int64_t index_ = 0; + rocksdb::Status s_; std::string value_; - virtual void DoInitial() override; + void DoInitial() override; }; class LTrimCmd : public Cmd { public: - LTrimCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), start_(0), stop_(0) {}; - virtual std::vector current_key() const { + LTrimCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::LIST)){}; + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new LTrimCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new LTrimCmd(*this); } + private: std::string key_; - int64_t start_; - int64_t stop_; - virtual void DoInitial() override; + int64_t start_ = 0; + int64_t stop_ = 0; + rocksdb::Status s_; + void DoInitial() override; +}; + +class BRPopCmd final : public BlockingBaseCmd { + public: + BRPopCmd(const std::string& name, int arity, uint32_t flag) + : BlockingBaseCmd(name, arity, flag, static_cast(AclCategory::BLOCKING)){}; + std::vector current_key() const override { return {keys_}; } + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new BRPopCmd(*this); } + void DoInitial() override; + void DoBinlog() override; + + private: + std::vector keys_; + int64_t expire_time_{0}; + WriteBinlogOfPopArgs binlog_args_; + bool is_binlog_deferred_{false}; }; class RPopCmd : public Cmd { public: - RPopCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { + RPopCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::LIST)){}; + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new RPopCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new RPopCmd(*this); } + private: std::string key_; - virtual void DoInitial() override; + std::int64_t count_ = 1; + void DoInitial() override; + rocksdb::Status s_; }; -class RPopLPushCmd : public Cmd { +class RPopLPushCmd : public BlockingBaseCmd { public: - RPopLPushCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new RPopLPushCmd(*this); + RPopLPushCmd(const std::string& name, int arity, uint32_t flag) + : BlockingBaseCmd(name, arity, flag, static_cast(AclCategory::BLOCKING)) { + rpop_cmd_ = std::make_shared(kCmdNameRPop, 2, kCmdFlagsWrite | kCmdFlagsList); + lpush_cmd_ = std::make_shared(kCmdNameLPush, -3, kCmdFlagsWrite | kCmdFlagsList); + }; + RPopLPushCmd(const RPopLPushCmd& other) + : BlockingBaseCmd(other), + source_(other.source_), + receiver_(other.receiver_), + value_poped_from_source_(other.value_poped_from_source_), + is_write_binlog_(other.is_write_binlog_) { + rpop_cmd_ = std::make_shared(kCmdNameRPop, 2, kCmdFlagsWrite | kCmdFlagsList); + lpush_cmd_ = std::make_shared(kCmdNameLPush, -3, kCmdFlagsWrite | kCmdFlagsList); } + std::vector current_key() const override { + std::vector res; + res.push_back(receiver_); + res.push_back(source_); + return res; + } + void Do() override; + void ReadCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new RPopLPushCmd(*this); } + void DoBinlog() override; + private: std::string source_; std::string receiver_; - virtual void DoInitial() override; + std::string value_poped_from_source_; + bool is_write_binlog_ = false; + // used for write binlog + std::shared_ptr rpop_cmd_; + std::shared_ptr lpush_cmd_; + rocksdb::Status s_; + void DoInitial() override; }; -class RPushCmd : public Cmd { +class RPushCmd : public BlockingBaseCmd { public: - RPushCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { + RPushCmd(const std::string& name, int arity, uint32_t flag) : BlockingBaseCmd(name, arity, flag){}; + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new RPushCmd(*this); - } + void Do() override; + + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new RPushCmd(*this); } + private: std::string key_; std::vector values_; - virtual void DoInitial() override; - virtual void Clear() { - values_.clear(); - } + rocksdb::Status s_; + void DoInitial() override; + void Clear() override { values_.clear(); } }; class RPushxCmd : public Cmd { public: - RPushxCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { + RPushxCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::LIST)){}; + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new RPushxCmd(*this); - } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new RPushxCmd(*this); } + private: std::string key_; std::string value_; - virtual void DoInitial() override; + std::vector values_; + rocksdb::Status s_; + void DoInitial() override; }; #endif diff --git a/tools/pika_migrate/include/pika_meta.h b/tools/pika_migrate/include/pika_meta.h deleted file mode 100644 index de576bfa63..0000000000 --- a/tools/pika_migrate/include/pika_meta.h +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_META -#define PIKA_META - -#include "slash/include/env.h" -#include "slash/include/slash_mutex.h" - -#include "include/pika_define.h" - -using slash::Status; - -class PikaMeta { - public: - PikaMeta(); - ~PikaMeta(); - - void SetPath(const std::string& path); - - Status StableSave(const std::vector& table_structs); - Status ParseMeta(std::vector* const table_structs); - - private: - pthread_rwlock_t rwlock_; - std::string local_meta_path_; - - // No copying allowed; - PikaMeta(const PikaMeta&); - void operator=(const PikaMeta&); -}; - -#endif diff --git a/tools/pika_migrate/include/pika_migrate_thread.h b/tools/pika_migrate/include/pika_migrate_thread.h new file mode 100644 index 0000000000..50a3658eca --- /dev/null +++ b/tools/pika_migrate/include/pika_migrate_thread.h @@ -0,0 +1,118 @@ +#ifndef PIKA_MIGRATE_THREAD_H_ +#define PIKA_MIGRATE_THREAD_H_ + +#include "include/pika_client_conn.h" +#include "include/pika_command.h" +#include "net/include/net_cli.h" +#include "net/include/net_thread.h" +#include "pika_client_conn.h" +#include "pika_db.h" +#include "storage/storage.h" +#include "storage/src/base_data_key_format.h" +#include "strings.h" + +void WriteDelKeyToBinlog(const std::string& key, const std::shared_ptr& db); + +class PikaMigrateThread; +class DB; +class PikaParseSendThread : public net::Thread { + public: + PikaParseSendThread(PikaMigrateThread* migrate_thread, const std::shared_ptr& db_); + ~PikaParseSendThread() override; + bool Init(const std::string& ip, int64_t port, int64_t timeout_ms, int64_t mgrtkeys_num); + void ExitThread(void); + + private: + int MigrateOneKey(net::NetCli* cli, const std::string& key, const char key_type, bool async); + void DelKeysAndWriteBinlog(std::deque>& send_keys, const std::shared_ptr& db); + bool CheckMigrateRecv(int64_t need_receive_num); + void *ThreadMain() override; + + + private: + std::string dest_ip_; + int64_t dest_port_ = 0; + int64_t timeout_ms_ = 60; + int32_t mgrtkeys_num_ = 0; + std::atomic should_exit_; + PikaMigrateThread *migrate_thread_ = nullptr; + net::NetCli *cli_ = nullptr; + pstd::Mutex working_mutex_; + std::shared_ptr db_; +}; + +class PikaMigrateThread : public net::Thread { + public: + PikaMigrateThread(); + ~PikaMigrateThread() override; + bool ReqMigrateBatch(const std::string& ip, int64_t port, int64_t time_out, int64_t keys_num, int64_t slot_id, + const std::shared_ptr& db); + int ReqMigrateOne(const std::string& key, const std::shared_ptr& db); + void GetMigrateStatus(std::string* ip, int64_t* port, int64_t* slot, bool* migrating, int64_t* moved, + int64_t* remained); + void CancelMigrate(void); + void IncWorkingThreadNum(void); + void DecWorkingThreadNum(void); + void OnTaskFailed(void); + void AddResponseNum(int32_t response_num); + bool IsMigrating(void) {return is_migrating_.load();} + time_t GetStartTime(void) {return start_time_;} + time_t GetEndTime(void) {return end_time_;} + std::string GetStartTimeStr(void) {return s_start_time_;} + + private: + void ResetThread(void); + void DestroyThread(bool is_self_exit); + void NotifyRequestMigrate(void); + bool IsMigrating(std::pair& kpair); + void ReadSlotKeys(const std::string& slotKey, int64_t need_read_num, int64_t& real_read_num, int32_t* finish); + bool CreateParseSendThreads(int32_t dispatch_num); + void DestroyParseSendThreads(void); + void *ThreadMain() override; + + private: + std::string dest_ip_; + int64_t dest_port_ = 0; + int64_t timeout_ms_ = 60; + int64_t keys_num_ = 0; + time_t start_time_ = 0; + time_t end_time_ = 0; + std::string s_start_time_; + std::shared_ptr db_; + std::atomic is_migrating_; + std::atomic should_exit_; + std::atomic is_task_success_; + std::atomic send_num_; + std::atomic response_num_; + std::atomic moved_num_; + + bool request_migrate_ = false; + pstd::CondVar request_migrate_cond_; + std::mutex request_migrate_mutex_; + + int32_t workers_num_ = 0; + std::vector workers_; + + std::atomic working_thread_num_; + pstd::CondVar workers_cond_; + std::mutex workers_mutex_; + int64_t slot_id_ = 0; + std::deque> mgrtone_queue_; + std::mutex mgrtone_queue_mutex_; + + int64_t cursor_ = 0; + std::deque> mgrtkeys_queue_; + pstd::CondVar mgrtkeys_cond_; + std::mutex mgrtkeys_queue_mutex_; + + std::map, std::string> mgrtkeys_map_; + std::mutex mgrtkeys_map_mutex_; + + std::mutex migrator_mutex_; + + friend class PikaParseSendThread; +}; + +#endif + +/* EOF */ \ No newline at end of file diff --git a/tools/pika_migrate/include/pika_monitor_thread.h b/tools/pika_migrate/include/pika_monitor_thread.h index f7900e2af7..27bfa24050 100644 --- a/tools/pika_migrate/include/pika_monitor_thread.h +++ b/tools/pika_migrate/include/pika_monitor_thread.h @@ -3,46 +3,45 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#ifndef PIKA_MONITOR_THREAD_H_ -#define PIKA_MONITOR_THREAD_H_ +#ifndef PIKA_MONITOR_THREAD_H_ +#define PIKA_MONITOR_THREAD_H_ -#include +#include #include +#include #include -#include - -#include "pink/include/pink_thread.h" -#include "slash/include/slash_mutex.h" +#include "net/include/net_thread.h" +#include "pstd/include/pstd_mutex.h" #include "include/pika_define.h" #include "include/pika_client_conn.h" -class PikaMonitorThread : public pink::Thread { +class PikaMonitorThread : public net::Thread { public: PikaMonitorThread(); - virtual ~PikaMonitorThread(); + ~PikaMonitorThread() override; - void AddMonitorClient(std::shared_ptr client_ptr); - void AddMonitorMessage(const std::string &monitor_message); - int32_t ThreadClientList(std::vector* client = NULL); + void AddMonitorClient(const std::shared_ptr& client_ptr); + void AddMonitorMessage(const std::string& monitor_message); + int32_t ThreadClientList(std::vector* client = nullptr); bool ThreadClientKill(const std::string& ip_port = "all"); bool HasMonitorClients(); private: - void AddCronTask(MonitorCronTask task); + void AddCronTask(const MonitorCronTask& task); bool FindClient(const std::string& ip_port); - pink::WriteStatus SendMessage(int32_t fd, std::string& message); + net::WriteStatus SendMessage(int32_t fd, std::string& message); void RemoveMonitorClient(const std::string& ip_port); std::atomic has_monitor_clients_; - slash::Mutex monitor_mutex_protector_; - slash::CondVar monitor_cond_; + pstd::Mutex monitor_mutex_protector_; + pstd::CondVar monitor_cond_; std::list monitor_clients_; std::deque monitor_messages_; std::queue cron_tasks_; - virtual void* ThreadMain(); + void* ThreadMain() override; void RemoveMonitorClient(int32_t client_fd); }; #endif diff --git a/tools/pika_migrate/include/pika_monotonic_time.h b/tools/pika_migrate/include/pika_monotonic_time.h new file mode 100644 index 0000000000..909fadfaec --- /dev/null +++ b/tools/pika_migrate/include/pika_monotonic_time.h @@ -0,0 +1,20 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_MONOTONIC_TIME_H +#define PIKA_MONOTONIC_TIME_H + +#include + +/* A counter in micro-seconds. The 'monotime' type is provided for variables + * holding a monotonic time. This will help distinguish & document that the + * variable is associated with the monotonic clock and should not be confused + * with other types of time.*/ +using monotime = uint64_t; + +// Get monotonic time in microseconds +monotime getMonotonicUs(); + +#endif // PIKA_MONOTONIC_TIME_H \ No newline at end of file diff --git a/tools/pika_migrate/include/pika_partition.h b/tools/pika_migrate/include/pika_partition.h deleted file mode 100644 index 461955b85a..0000000000 --- a/tools/pika_migrate/include/pika_partition.h +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright (c) 2018-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_PARTITION_H_ -#define PIKA_PARTITION_H_ - -#include "blackwidow/blackwidow.h" -#include "blackwidow/backupable.h" -#include "slash/include/scope_record_lock.h" - -#include "include/pika_binlog.h" - -class Cmd; - -/* - *Keyscan used - */ -struct KeyScanInfo { - time_t start_time; - std::string s_start_time; - int32_t duration; - std::vector key_infos; //the order is strings, hashes, lists, zsets, sets - bool key_scaning_; - KeyScanInfo() : - start_time(0), - s_start_time("1970-01-01 08:00:00"), - duration(-3), - key_infos({{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}), - key_scaning_(false) { - } -}; - - -struct BgSaveInfo { - bool bgsaving; - time_t start_time; - std::string s_start_time; - std::string path; - uint32_t filenum; - uint64_t offset; - BgSaveInfo() : bgsaving(false), filenum(0), offset(0) {} - void Clear() { - bgsaving = false; - path.clear(); - filenum = 0; - offset = 0; - } -}; - -class Partition : public std::enable_shared_from_this { - public: - Partition(const std::string& table_name, - uint32_t partition_id, - const std::string& table_db_path, - const std::string& table_log_path); - virtual ~Partition(); - - std::string GetTableName() const; - uint32_t GetPartitionId() const; - std::string GetPartitionName() const; - std::shared_ptr logger() const; - std::shared_ptr db() const; - - void Compact(const blackwidow::DataType& type); - // needd to hold logger_->Lock() - Status WriteBinlog(const std::string& binlog); - - void DbRWLockWriter(); - void DbRWLockReader(); - void DbRWUnLock(); - - slash::lock::LockMgr* LockMgr(); - - void SetBinlogIoError(bool error); - bool IsBinlogIoError(); - bool GetBinlogOffset(BinlogOffset* const boffset); - bool SetBinlogOffset(const BinlogOffset& boffset); - - void PrepareRsync(); - bool TryUpdateMasterOffset(); - bool ChangeDb(const std::string& new_path); - - void Leave(); - void Close(); - void MoveToTrash(); - - // BgSave use; - bool IsBgSaving(); - void BgSavePartition(); - BgSaveInfo bgsave_info(); - - // FlushDB & FlushSubDB use - bool FlushDB(); - bool FlushSubDB(const std::string& db_name); - - // Purgelogs use - bool PurgeLogs(uint32_t to = 0, bool manual = false); - void ClearPurge(); - - // key scan info use - Status GetKeyNum(std::vector* key_info); - KeyScanInfo GetKeyScanInfo(); - - private: - std::string table_name_; - uint32_t partition_id_; - - std::string db_path_; - std::string log_path_; - std::string bgsave_sub_path_; - std::string dbsync_path_; - std::string partition_name_; - - bool opened_; - std::shared_ptr logger_; - std::atomic binlog_io_error_; - - pthread_rwlock_t db_rwlock_; - slash::lock::LockMgr* lock_mgr_; - std::shared_ptr db_; - - bool full_sync_; - - slash::Mutex key_info_protector_; - KeyScanInfo key_scan_info_; - - /* - * BgSave use - */ - static void DoBgSave(void* arg); - bool RunBgsaveEngine(); - bool InitBgsaveEnv(); - bool InitBgsaveEngine(); - void ClearBgsave(); - void FinishBgsave(); - BgSaveInfo bgsave_info_; - slash::Mutex bgsave_protector_; - blackwidow::BackupEngine* bgsave_engine_; - - /* - * Purgelogs use - */ - static void DoPurgeLogs(void* arg); - bool PurgeFiles(uint32_t to, bool manual); - bool GetBinlogFiles(std::map& binlogs); - std::atomic purging_; - - // key scan info use - void InitKeyScan(); - - /* - * No allowed copy and copy assign - */ - Partition(const Partition&); - void operator=(const Partition&); - -}; - -struct PurgeArg { - std::shared_ptr partition; - uint32_t to; - bool manual; - bool force; // Ignore the delete window -}; - - -#endif diff --git a/tools/pika_migrate/include/pika_pubsub.h b/tools/pika_migrate/include/pika_pubsub.h index 737dc752c0..f9f7d85a30 100644 --- a/tools/pika_migrate/include/pika_pubsub.h +++ b/tools/pika_migrate/include/pika_pubsub.h @@ -6,6 +6,7 @@ #ifndef PIKA_PUBSUB_H_ #define PIKA_PUBSUB_H_ +#include "acl.h" #include "pika_command.h" /* @@ -13,81 +14,94 @@ */ class PublishCmd : public Cmd { public: - PublishCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr) override; - virtual Cmd* Clone() override { - return new PublishCmd(*this); - } + PublishCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PUBSUB)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new PublishCmd(*this); } + std::vector current_key() const override { return {channel_}; } + private: std::string channel_; std::string msg_; - virtual void DoInitial() override; + void DoInitial() override; }; class SubscribeCmd : public Cmd { public: - SubscribeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr) override; - virtual Cmd* Clone() override { - return new SubscribeCmd(*this); - } + SubscribeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PUBSUB)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SubscribeCmd(*this); } + std::vector current_key() const override { return channels_; } + private: - virtual void DoInitial() override; + std::vector channels_; + void DoInitial() override; }; class UnSubscribeCmd : public Cmd { public: - UnSubscribeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr) override; - virtual Cmd* Clone() override { - return new UnSubscribeCmd(*this); - } + UnSubscribeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PUBSUB)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new UnSubscribeCmd(*this); } + std::vector current_key() const override { return channels_; } + private: - virtual void DoInitial() override; + std::vector channels_; + void DoInitial() override; }; class PUnSubscribeCmd : public Cmd { public: - PUnSubscribeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr) override; - virtual Cmd* Clone() override { - return new PUnSubscribeCmd(*this); - } + PUnSubscribeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PUBSUB)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new PUnSubscribeCmd(*this); } + std::vector current_key() const override { return {channels_}; } + private: - virtual void DoInitial() override; + std::vector channels_; + void DoInitial() override; }; class PSubscribeCmd : public Cmd { public: - PSubscribeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr) override; - virtual Cmd* Clone() override { - return new PSubscribeCmd(*this); - } + PSubscribeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PUBSUB)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new PSubscribeCmd(*this); } + std::vector current_key() const override { return {channels_}; } + + std::vector channels_; private: - virtual void DoInitial() override; + void DoInitial() override; }; class PubSubCmd : public Cmd { public: - PubSubCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr) override; - virtual Cmd* Clone() override { - return new PubSubCmd(*this); - } + PubSubCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PUBSUB)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new PubSubCmd(*this); } + private: std::string subcommand_; - std::vector arguments_; - virtual void DoInitial() override; - virtual void Clear() { - arguments_.clear(); - } + std::vector arguments_; + void DoInitial() override; + void Clear() override { arguments_.clear(); } }; #endif // INCLUDE_PIKA_PUBSUB_H_ diff --git a/tools/pika_migrate/include/pika_repl_bgworker.h b/tools/pika_migrate/include/pika_repl_bgworker.h index e74f41e3a9..e548ab551d 100644 --- a/tools/pika_migrate/include/pika_repl_bgworker.h +++ b/tools/pika_migrate/include/pika_repl_bgworker.h @@ -8,36 +8,46 @@ #include #include +#include +#include "net/include/bg_thread.h" +#include "net/include/pb_conn.h" +#include "net/include/thread_pool.h" -#include "pink/include/pb_conn.h" -#include "pink/include/bg_thread.h" -#include "pink/include/thread_pool.h" +#include "pika_inner_message.pb.h" -#include "src/pika_inner_message.pb.h" - -#include "include/pika_command.h" #include "include/pika_binlog_transverter.h" +#include "include/pika_define.h" +#include "include/pika_command.h" class PikaReplBgWorker { public: explicit PikaReplBgWorker(int queue_size); - ~PikaReplBgWorker(); int StartThread(); int StopThread(); - void Schedule(pink::TaskFunc func, void* arg); - void QueueClear(); + int TaskQueueSize() { + int pri_size = 0; + int qu_size = 0; + bg_thread_.QueueSize(&pri_size, &qu_size); + return pri_size + qu_size; + } + void Schedule(net::TaskFunc func, void* arg); + void Schedule(net::TaskFunc func, void* arg, std::function& call_back); static void HandleBGWorkerWriteBinlog(void* arg); static void HandleBGWorkerWriteDB(void* arg); - + static void WriteDBInSyncWay(const std::shared_ptr& c_ptr); + void SetThreadName(const std::string& thread_name) { + bg_thread_.set_thread_name(thread_name); + } BinlogItem binlog_item_; - pink::RedisParser redis_parser_; + net::RedisParser redis_parser_; std::string ip_port_; - std::string table_name_; - uint32_t partition_id_; + std::string db_name_; private: - pink::BGThread bg_thread_; - static int HandleWriteBinlog(pink::RedisParser* parser, const pink::RedisCmdArgsType& argv); + net::BGThread bg_thread_; + static int HandleWriteBinlog(net::RedisParser* parser, const net::RedisCmdArgsType& argv); + static void ParseBinlogOffset(const InnerMessage::BinlogOffset& pb_offset, LogOffset* offset); + static void ParseAndSendPikaCommand(const std::shared_ptr& c_ptr); }; #endif // PIKA_REPL_BGWROKER_H_ diff --git a/tools/pika_migrate/include/pika_repl_client.h b/tools/pika_migrate/include/pika_repl_client.h index d786af489f..73fb897a62 100644 --- a/tools/pika_migrate/include/pika_repl_client.h +++ b/tools/pika_migrate/include/pika_repl_client.h @@ -6,65 +6,49 @@ #ifndef PIKA_REPL_CLIENT_H_ #define PIKA_REPL_CLIENT_H_ -#include #include +#include +#include -#include "pink/include/pink_conn.h" -#include "pink/include/client_thread.h" -#include "pink/include/thread_pool.h" -#include "slash/include/slash_status.h" - +#include "net/include/client_thread.h" +#include "net/include/net_conn.h" +#include "net/include/thread_pool.h" +#include "pstd/include/pstd_status.h" #include "include/pika_define.h" -#include "include/pika_partition.h" + #include "include/pika_binlog_reader.h" #include "include/pika_repl_bgworker.h" #include "include/pika_repl_client_thread.h" -#include "pink/include/thread_pool.h" -#include "src/pika_inner_message.pb.h" +#include "net/include/thread_pool.h" +#include "pika_inner_message.pb.h" -using slash::Status; struct ReplClientTaskArg { std::shared_ptr res; - std::shared_ptr conn; - ReplClientTaskArg(std::shared_ptr _res, - std::shared_ptr _conn) + std::shared_ptr conn; + ReplClientTaskArg(const std::shared_ptr& _res, const std::shared_ptr& _conn) : res(_res), conn(_conn) {} }; struct ReplClientWriteBinlogTaskArg { std::shared_ptr res; - std::shared_ptr conn; + std::shared_ptr conn; void* res_private_data; PikaReplBgWorker* worker; - ReplClientWriteBinlogTaskArg( - const std::shared_ptr _res, - std::shared_ptr _conn, - void* _res_private_data, - PikaReplBgWorker* _worker) : - res(_res), conn(_conn), - res_private_data(_res_private_data), worker(_worker) {} + ReplClientWriteBinlogTaskArg(const std::shared_ptr& _res, + const std::shared_ptr& _conn, + void* _res_private_data, PikaReplBgWorker* _worker) + : res(_res), conn(_conn), res_private_data(_res_private_data), worker(_worker) {} }; struct ReplClientWriteDBTaskArg { - PikaCmdArgsType* argv; - BinlogItem* binlog_item; - std::string table_name; - uint32_t partition_id; - ReplClientWriteDBTaskArg(PikaCmdArgsType* _argv, - BinlogItem* _binlog_item, - const std::string _table_name, - uint32_t _partition_id) - : argv(_argv), binlog_item(_binlog_item), - table_name(_table_name), partition_id(_partition_id) {} - ~ReplClientWriteDBTaskArg() { - delete argv; - delete binlog_item; - } + const std::shared_ptr cmd_ptr; + explicit ReplClientWriteDBTaskArg(std::shared_ptr _cmd_ptr) + : cmd_ptr(std::move(_cmd_ptr)) {} + ~ReplClientWriteDBTaskArg() = default; }; - class PikaReplClient { public: PikaReplClient(int cron_interval, int keepalive_timeout); @@ -73,54 +57,61 @@ class PikaReplClient { int Start(); int Stop(); - slash::Status Write(const std::string& ip, const int port, const std::string& msg); - slash::Status Close(const std::string& ip, const int port); - - void Schedule(pink::TaskFunc func, void* arg); - void ScheduleWriteBinlogTask(std::string table_partition, - const std::shared_ptr res, - std::shared_ptr conn, - void* req_private_data); - void ScheduleWriteDBTask(const std::string& dispatch_key, - PikaCmdArgsType* argv, BinlogItem* binlog_item, - const std::string& table_name, uint32_t partition_id); - - Status SendMetaSync(); - Status SendPartitionDBSync(const std::string& ip, - uint32_t port, - const std::string& table_name, - uint32_t partition_id, - const BinlogOffset& boffset, - const std::string& local_ip); - Status SendPartitionTrySync(const std::string& ip, - uint32_t port, - const std::string& table_name, - uint32_t partition_id, - const BinlogOffset& boffset, - const std::string& local_ip); - Status SendPartitionBinlogSync(const std::string& ip, - uint32_t port, - const std::string& table_name, - uint32_t partition_id, - const BinlogOffset& ack_start, - const BinlogOffset& ack_end, - const std::string& local_ip, - bool is_frist_send); - Status SendRemoveSlaveNode(const std::string& ip, - uint32_t port, - const std::string& table_name, - uint32_t partition_id, - const std::string& local_ip); - private: - size_t GetHashIndex(std::string key, bool upper_half); - void UpdateNextAvail() { - next_avail_ = (next_avail_ + 1) % bg_workers_.size(); + pstd::Status Write(const std::string& ip, int port, const std::string& msg); + pstd::Status Close(const std::string& ip, int port); + + void Schedule(net::TaskFunc func, void* arg); + void ScheduleByDBName(net::TaskFunc func, void* arg, const std::string& db_name); + void ScheduleWriteBinlogTask(const std::string& db_name, const std::shared_ptr& res, + const std::shared_ptr& conn, void* res_private_data); + void ScheduleWriteDBTask(const std::shared_ptr& cmd_ptr, const std::string& db_name); + + pstd::Status SendMetaSync(); + pstd::Status SendDBSync(const std::string& ip, uint32_t port, const std::string& db_name, + const BinlogOffset& boffset, const std::string& local_ip); + pstd::Status SendTrySync(const std::string& ip, uint32_t port, const std::string& db_name, + const BinlogOffset& boffset, const std::string& local_ip); + pstd::Status SendBinlogSync(const std::string& ip, uint32_t port, const std::string& db_name, + const LogOffset& ack_start, const LogOffset& ack_end, + const std::string& local_ip, bool is_first_send); + pstd::Status SendRemoveSlaveNode(const std::string& ip, uint32_t port, const std::string& db_name, const std::string& local_ip); + + void IncrAsyncWriteDBTaskCount(const std::string& db_name, int32_t incr_step) { + int32_t db_index = db_name.back() - '0'; + assert(db_index >= 0 && db_index <= 7); + async_write_db_task_counts_[db_index].fetch_add(incr_step, std::memory_order::memory_order_seq_cst); } - PikaReplClientThread* client_thread_; - int next_avail_; + void DecrAsyncWriteDBTaskCount(const std::string& db_name, int32_t incr_step) { + int32_t db_index = db_name.back() - '0'; + assert(db_index >= 0 && db_index <= 7); + async_write_db_task_counts_[db_index].fetch_sub(incr_step, std::memory_order::memory_order_seq_cst); + } + + int32_t GetUnfinishedAsyncWriteDBTaskCount(const std::string& db_name) { + int32_t db_index = db_name.back() - '0'; + assert(db_index >= 0 && db_index <= 7); + return async_write_db_task_counts_[db_index].load(std::memory_order_seq_cst); + } + + private: + size_t GetBinlogWorkerIndexByDBName(const std::string &db_name); + size_t GetHashIndexByKey(const std::string& key); + void UpdateNextAvail() { next_avail_ = (next_avail_ + 1) % static_cast(write_binlog_workers_.size()); } + + std::unique_ptr client_thread_; + int next_avail_ = 0; std::hash str_hash; - std::vector bg_workers_; + + // async_write_db_task_counts_ is used when consuming binlog, which indicates the nums of async write-DB tasks that are + // queued or being executing by WriteDBWorkers. If a flushdb-binlog need to apply DB, it must wait + // util this count drop to zero. you can also check pika discussion #2807 to know more + // it is only used in slaveNode when consuming binlog + std::atomic async_write_db_task_counts_[MAX_DB_NUM]; + // [NOTICE] write_db_workers_ must be declared after async_write_db_task_counts_ to ensure write_db_workers_ will be destroyed before async_write_db_task_counts_ + // when PikaReplClient is de-constructing, because some of the async task that exec by write_db_workers_ will manipulate async_write_db_task_counts_ + std::vector> write_binlog_workers_; + std::vector> write_db_workers_; }; #endif diff --git a/tools/pika_migrate/include/pika_repl_client_conn.h b/tools/pika_migrate/include/pika_repl_client_conn.h index 516507f2d5..bfd697dfa0 100644 --- a/tools/pika_migrate/include/pika_repl_client_conn.h +++ b/tools/pika_migrate/include/pika_repl_client_conn.h @@ -6,35 +6,34 @@ #ifndef PIKA_REPL_CLIENT_CONN_H_ #define PIKA_REPL_CLIENT_CONN_H_ -#include "pink/include/pb_conn.h" +#include "net/include/pb_conn.h" #include +#include #include "include/pika_conf.h" -#include "src/pika_inner_message.pb.h" +#include "pika_inner_message.pb.h" -class PikaReplClientConn: public pink::PbConn { +class SyncMasterDB; +class SyncSlaveDB; + +class PikaReplClientConn : public net::PbConn { public: - PikaReplClientConn(int fd, const std::string& ip_port, pink::Thread *thread, void* worker_specific_data, pink::PinkEpoll* epoll); - virtual ~PikaReplClientConn() = default; + PikaReplClientConn(int fd, const std::string& ip_port, net::Thread* thread, void* worker_specific_data, + net::NetMultiplexer* mpx); + ~PikaReplClientConn() override = default; static void HandleMetaSyncResponse(void* arg); static void HandleDBSyncResponse(void* arg); static void HandleTrySyncResponse(void* arg); static void HandleRemoveSlaveNodeResponse(void* arg); - static bool IsTableStructConsistent(const std::vector& current_tables, - const std::vector& expect_tables); + static bool IsDBStructConsistent(const std::vector& current_dbs, + const std::vector& expect_tables); int DealMessage() override; + private: - // dispatch binlog by its table_name + partition - void DispatchBinlogRes(const std::shared_ptr response); - - struct ReplRespArg { - std::shared_ptr resp; - std::shared_ptr conn; - ReplRespArg(std::shared_ptr _resp, std::shared_ptr _conn) : resp(_resp), conn(_conn) { - } - }; + // dispatch binlog by its db_name + void DispatchBinlogRes(const std::shared_ptr& response); }; #endif diff --git a/tools/pika_migrate/include/pika_repl_client_thread.h b/tools/pika_migrate/include/pika_repl_client_thread.h index c0ed6ab48b..fe8213b090 100644 --- a/tools/pika_migrate/include/pika_repl_client_thread.h +++ b/tools/pika_migrate/include/pika_repl_client_thread.h @@ -6,54 +6,40 @@ #ifndef PIKA_REPL_CLIENT_THREAD_H_ #define PIKA_REPL_CLIENT_THREAD_H_ -#include #include +#include #include "include/pika_repl_client_conn.h" -#include "pink/include/pink_conn.h" -#include "pink/include/client_thread.h" +#include "net/include/client_thread.h" +#include "net/include/net_conn.h" -class PikaReplClientThread : public pink::ClientThread { +class PikaReplClientThread : public net::ClientThread { public: PikaReplClientThread(int cron_interval, int keepalive_timeout); - virtual ~PikaReplClientThread() = default; - int Start(); + ~PikaReplClientThread() override = default; private: - class ReplClientConnFactory : public pink::ConnFactory { + class ReplClientConnFactory : public net::ConnFactory { public: - virtual std::shared_ptr NewPinkConn( - int connfd, - const std::string &ip_port, - pink::Thread *thread, - void* worker_specific_data, - pink::PinkEpoll* pink_epoll) const override { - return std::make_shared(connfd, ip_port, thread, worker_specific_data, pink_epoll); + std::shared_ptr NewNetConn(int connfd, const std::string& ip_port, net::Thread* thread, + void* worker_specific_data, + net::NetMultiplexer* net) const override { + return std::static_pointer_cast( + std::make_shared(connfd, ip_port, thread, worker_specific_data, net)); } }; - class ReplClientHandle : public pink::ClientHandle { + class ReplClientHandle : public net::ClientHandle { public: - void CronHandle() const override { - } + void CronHandle() const override {} void FdTimeoutHandle(int fd, const std::string& ip_port) const override; void FdClosedHandle(int fd, const std::string& ip_port) const override; bool AccessHandle(std::string& ip) const override { - // ban 127.0.0.1 if you want to test this routine - // if (ip.find("127.0.0.2") != std::string::npos) { - // std::cout << "AccessHandle " << ip << std::endl; - // return false; - // } return true; } - int CreateWorkerSpecificData(void** data) const override { - return 0; - } - int DeleteWorkerSpecificData(void* data) const override { - return 0; - } - void DestConnectFailedHandle(std::string ip_port, std::string reason) const override { - } + int CreateWorkerSpecificData(void** data) const override { return 0; } + int DeleteWorkerSpecificData(void* data) const override { return 0; } + void DestConnectFailedHandle(const std::string& ip_port, const std::string& reason) const override {} }; ReplClientConnFactory conn_factory_; diff --git a/tools/pika_migrate/include/pika_repl_server.h b/tools/pika_migrate/include/pika_repl_server.h index 592052d92a..4a12f99cb9 100644 --- a/tools/pika_migrate/include/pika_repl_server.h +++ b/tools/pika_migrate/include/pika_repl_server.h @@ -6,8 +6,10 @@ #ifndef PIKA_REPL_SERVER_H_ #define PIKA_REPL_SERVER_H_ -#include "pink/include/thread_pool.h" +#include "net/include/thread_pool.h" +#include +#include #include #include "include/pika_command.h" @@ -16,9 +18,9 @@ struct ReplServerTaskArg { std::shared_ptr req; - std::shared_ptr conn; - ReplServerTaskArg(std::shared_ptr _req, std::shared_ptr _conn) - : req(_req), conn(_conn) {} + std::shared_ptr conn; + ReplServerTaskArg(std::shared_ptr _req, std::shared_ptr _conn) + : req(std::move(_req)), conn(std::move(_conn)) {} }; class PikaReplServer { @@ -29,19 +31,20 @@ class PikaReplServer { int Start(); int Stop(); - slash::Status SendSlaveBinlogChips(const std::string& ip, int port, const std::vector& tasks); - slash::Status Write(const std::string& ip, const int port, const std::string& msg); + pstd::Status SendSlaveBinlogChips(const std::string& ip, int port, const std::vector& tasks); + pstd::Status Write(const std::string& ip, int port, const std::string& msg); - void Schedule(pink::TaskFunc func, void* arg); + void BuildBinlogOffset(const LogOffset& offset, InnerMessage::BinlogOffset* boffset); + void BuildBinlogSyncResp(const std::vector& tasks, InnerMessage::InnerResponse* resp); + void Schedule(net::TaskFunc func, void* arg); void UpdateClientConnMap(const std::string& ip_port, int fd); void RemoveClientConn(int fd); void KillAllConns(); private: - pink::ThreadPool* server_tp_; - PikaReplServerThread* pika_repl_server_thread_; - - pthread_rwlock_t client_conn_rwlock_; + std::unique_ptr server_tp_ = nullptr; + std::unique_ptr pika_repl_server_thread_ = nullptr; + std::shared_mutex client_conn_rwlock_; std::map client_conn_map_; }; diff --git a/tools/pika_migrate/include/pika_repl_server_conn.h b/tools/pika_migrate/include/pika_repl_server_conn.h index d5757f0373..c96159e0fe 100644 --- a/tools/pika_migrate/include/pika_repl_server_conn.h +++ b/tools/pika_migrate/include/pika_repl_server_conn.h @@ -8,23 +8,35 @@ #include -#include "pink/include/pb_conn.h" -#include "pink/include/pink_thread.h" +#include "net/include/net_thread.h" +#include "net/include/pb_conn.h" -#include "src/pika_inner_message.pb.h" +#include "include/pika_define.h" +#include "pika_inner_message.pb.h" -class PikaReplServerConn: public pink::PbConn { +class SyncMasterDB; + +class PikaReplServerConn : public net::PbConn { public: - PikaReplServerConn(int fd, std::string ip_port, pink::Thread* thread, void* worker_specific_data, pink::PinkEpoll* epoll); - virtual ~PikaReplServerConn(); + PikaReplServerConn(int fd, const std::string& ip_port, net::Thread* thread, void* worker_specific_data, + net::NetMultiplexer* mpx); + ~PikaReplServerConn() override; static void HandleMetaSyncRequest(void* arg); static void HandleTrySyncRequest(void* arg); + + static bool TrySyncOffsetCheck(const std::shared_ptr& db, + const InnerMessage::InnerRequest::TrySync& try_sync_request, + InnerMessage::InnerResponse::TrySync* try_sync_response); + static bool TrySyncUpdateSlaveNode(const std::shared_ptr& db, + const InnerMessage::InnerRequest::TrySync& try_sync_request, + const std::shared_ptr& conn, + InnerMessage::InnerResponse::TrySync* try_sync_response); static void HandleDBSyncRequest(void* arg); static void HandleBinlogSyncRequest(void* arg); static void HandleRemoveSlaveNodeRequest(void* arg); - int DealMessage(); + int DealMessage() override; }; #endif // INCLUDE_PIKA_REPL_SERVER_CONN_H_ diff --git a/tools/pika_migrate/include/pika_repl_server_thread.h b/tools/pika_migrate/include/pika_repl_server_thread.h index f322a1df7c..c4e356839b 100644 --- a/tools/pika_migrate/include/pika_repl_server_thread.h +++ b/tools/pika_migrate/include/pika_repl_server_thread.h @@ -6,50 +6,41 @@ #ifndef PIKA_REPL_SERVER_THREAD_H_ #define PIKA_REPL_SERVER_THREAD_H_ -#include "pink/src/holy_thread.h" +#include "net/src/holy_thread.h" #include "include/pika_repl_server_conn.h" -class PikaReplServerThread : public pink::HolyThread { +class PikaReplServerThread : public net::HolyThread { public: PikaReplServerThread(const std::set& ips, int port, int cron_interval); - virtual ~PikaReplServerThread() = default; - + ~PikaReplServerThread() override = default; int ListenPort(); - // for ProcessBinlogData use - uint64_t GetnPlusSerial() { - return serial_++; - } - private: - class ReplServerConnFactory : public pink::ConnFactory { + class ReplServerConnFactory : public net::ConnFactory { public: - explicit ReplServerConnFactory(PikaReplServerThread* binlog_receiver) - : binlog_receiver_(binlog_receiver) { - } + explicit ReplServerConnFactory(PikaReplServerThread* binlog_receiver) : binlog_receiver_(binlog_receiver) {} - virtual std::shared_ptr NewPinkConn( - int connfd, - const std::string& ip_port, - pink::Thread* thread, - void* worker_specific_data, - pink::PinkEpoll* pink_epoll) const override { - return std::make_shared(connfd, ip_port, thread, binlog_receiver_, pink_epoll); + std::shared_ptr NewNetConn(int connfd, const std::string& ip_port, net::Thread* thread, + void* worker_specific_data, + net::NetMultiplexer* net) const override { + return std::static_pointer_cast( + std::make_shared(connfd, ip_port, thread, binlog_receiver_, net)); } - private: - PikaReplServerThread* binlog_receiver_; + + private: + PikaReplServerThread* binlog_receiver_ = nullptr; }; - class ReplServerHandle : public pink::ServerHandle { + class ReplServerHandle : public net::ServerHandle { public: - virtual void FdClosedHandle(int fd, const std::string& ip_port) const override; + void FdClosedHandle(int fd, const std::string& ip_port) const override; }; ReplServerConnFactory conn_factory_; ReplServerHandle handle_; - int port_; - uint64_t serial_; + int port_ = 0; + uint64_t serial_ = 0; }; #endif diff --git a/tools/pika_migrate/include/pika_rm.h b/tools/pika_migrate/include/pika_rm.h index cb20a8b250..ec80c1ff58 100644 --- a/tools/pika_migrate/include/pika_rm.h +++ b/tools/pika_migrate/include/pika_rm.h @@ -6,354 +6,223 @@ #ifndef PIKA_RM_H_ #define PIKA_RM_H_ -#include #include -#include #include +#include +#include +#include #include -#include "slash/include/slash_status.h" +#include "pstd/include/pstd_status.h" #include "include/pika_binlog_reader.h" +#include "include/pika_consensus.h" #include "include/pika_repl_client.h" #include "include/pika_repl_server.h" +#include "include/pika_slave_node.h" +#include "include/pika_stable_log.h" +#include "include/rsync_client.h" #define kBinlogSendPacketNum 40 #define kBinlogSendBatchNum 100 // unit seconds -#define kSendKeepAliveTimeout (10 * 1000000) +#define kSendKeepAliveTimeout (2 * 1000000) #define kRecvKeepAliveTimeout (20 * 1000000) -using slash::Status; - -struct SyncWinItem { - BinlogOffset offset_; - bool acked_; - bool operator==(const SyncWinItem& other) const { - if (offset_.filenum == other.offset_.filenum && offset_.offset == other.offset_.offset) { - return true; - } - return false; - } - explicit SyncWinItem(const BinlogOffset& offset) : offset_(offset), acked_(false) { - } - SyncWinItem(uint32_t filenum, uint64_t offset) : offset_(filenum, offset), acked_(false) { - } - std::string ToString() const { - return offset_.ToString() + " acked: " + std::to_string(acked_); - } -}; - -class SyncWindow { - public: - SyncWindow() { - } - void Push(const SyncWinItem& item); - bool Update(const SyncWinItem& start_item, const SyncWinItem& end_item, BinlogOffset* acked_offset); - int Remainings(); - std::string ToStringStatus() const { - if (win_.empty()) { - return " Size: " + std::to_string(win_.size()) + "\r\n"; - } else { - std::string res; - res += " Size: " + std::to_string(win_.size()) + "\r\n"; - res += (" Begin_item: " + win_.begin()->ToString() + "\r\n"); - res += (" End_item: " + win_.rbegin()->ToString() + "\r\n"); - return res; - } - } - private: - // TODO(whoiami) ring buffer maybe - std::deque win_; -}; -// role master use -class SlaveNode : public RmNode { +class SyncDB { public: - SlaveNode(const std::string& ip, int port, const std::string& table_name, uint32_t partition_id, int session_id); - ~SlaveNode(); - void Lock() { - slave_mu.Lock(); - } - void Unlock() { - slave_mu.Unlock(); - } - SlaveState slave_state; + SyncDB(const std::string& db_name); + virtual ~SyncDB() = default; + DBInfo& SyncDBInfo() { return db_info_; } + std::string DBName(); - BinlogSyncState b_state; - SyncWindow sync_win; - BinlogOffset sent_offset; - BinlogOffset acked_offset; - - std::string ToStringStatus(); - - std::shared_ptr binlog_reader; - Status InitBinlogFileReader(const std::shared_ptr& binlog, const BinlogOffset& offset); - void ReleaseBinlogFileReader(); - - slash::Mutex slave_mu; -}; - -class SyncPartition { - public: - SyncPartition(const std::string& table_name, uint32_t partition_id); - virtual ~SyncPartition() = default; - - PartitionInfo& SyncPartitionInfo() { - return partition_info_; - } protected: - // std::shared_ptr binlog_; - PartitionInfo partition_info_; + DBInfo db_info_; }; -class SyncMasterPartition : public SyncPartition { +class SyncMasterDB : public SyncDB { public: - SyncMasterPartition(const std::string& table_name, uint32_t partition_id); - Status AddSlaveNode(const std::string& ip, int port, int session_id); - Status RemoveSlaveNode(const std::string& ip, int port); - - Status ActivateSlaveBinlogSync(const std::string& ip, int port, const std::shared_ptr binlog, const BinlogOffset& offset); - Status ActivateSlaveDbSync(const std::string& ip, int port); - - Status SyncBinlogToWq(const std::string& ip, int port); - Status UpdateSlaveBinlogAckInfo(const std::string& ip, int port, const BinlogOffset& start, const BinlogOffset& end); - Status GetSlaveSyncBinlogInfo(const std::string& ip, int port, BinlogOffset* sent_offset, BinlogOffset* acked_offset); - Status GetSlaveState(const std::string& ip, int port, SlaveState* const slave_state); - - Status SetLastSendTime(const std::string& ip, int port, uint64_t time); - Status GetLastSendTime(const std::string& ip, int port, uint64_t* time); - - Status SetLastRecvTime(const std::string& ip, int port, uint64_t time); - Status GetLastRecvTime(const std::string& ip, int port, uint64_t* time); - - Status GetSafetyPurgeBinlog(std::string* safety_purge); - bool BinlogCloudPurge(uint32_t index); - - Status WakeUpSlaveBinlogSync(); - Status CheckSyncTimeout(uint64_t now); - + SyncMasterDB(const std::string& db_name); + pstd::Status AddSlaveNode(const std::string& ip, int port, int session_id); + pstd::Status RemoveSlaveNode(const std::string& ip, int port); + pstd::Status ActivateSlaveBinlogSync(const std::string& ip, int port, const LogOffset& offset); + pstd::Status ActivateSlaveDbSync(const std::string& ip, int port); + pstd::Status SyncBinlogToWq(const std::string& ip, int port); + pstd::Status GetSlaveSyncBinlogInfo(const std::string& ip, int port, BinlogOffset* sent_offset, BinlogOffset* acked_offset); + pstd::Status GetSlaveState(const std::string& ip, int port, SlaveState* slave_state); + pstd::Status SetLastRecvTime(const std::string& ip, int port, uint64_t time); + pstd::Status GetSafetyPurgeBinlog(std::string* safety_purge); + pstd::Status WakeUpSlaveBinlogSync(); + pstd::Status CheckSyncTimeout(uint64_t now); + pstd::Status GetSlaveNodeSession(const std::string& ip, int port, int32_t* session); int GetNumberOfSlaveNode(); + bool BinlogCloudPurge(uint32_t index); bool CheckSlaveNodeExist(const std::string& ip, int port); - Status GetSlaveNodeSession(const std::string& ip, int port, int32_t* session); - void GetValidSlaveNames(std::vector* slavenames); - // display use - Status GetInfo(std::string* info); // debug use std::string ToStringStatus(); - int32_t GenSessionId(); - bool CheckSessionId(const std::string& ip, int port, - const std::string& table_name, - uint64_t partition_id, int session_id); + bool CheckSessionId(const std::string& ip, int port, const std::string& db_name, int session_id); + + // consensus use + pstd::Status ConsensusUpdateSlave(const std::string& ip, int port, const LogOffset& start, const LogOffset& end); + pstd::Status ConsensusProposeLog(const std::shared_ptr& cmd_ptr); + pstd::Status ConsensusProcessLeaderLog(const std::shared_ptr& cmd_ptr, const BinlogItem& attribute); + LogOffset ConsensusCommittedIndex(); + LogOffset ConsensusLastIndex(); + + std::shared_ptr StableLogger() { return coordinator_.StableLogger(); } + + std::shared_ptr Logger() { + if (!coordinator_.StableLogger()) { + return nullptr; + } + return coordinator_.StableLogger()->Logger(); + } private: - bool CheckReadBinlogFromCache(); - // inovker need to hold partition_mu_ - void CleanMasterNode(); - void CleanSlaveNode(); // invoker need to hold slave_mu_ - Status ReadCachedBinlogToWq(const std::shared_ptr& slave_ptr); - Status ReadBinlogFileToWq(const std::shared_ptr& slave_ptr); - // inovker need to hold partition_mu_ - Status GetSlaveNode(const std::string& ip, int port, std::shared_ptr* slave_node); - - slash::Mutex partition_mu_; - std::vector> slaves_; + pstd::Status ReadBinlogFileToWq(const std::shared_ptr& slave_ptr); - slash::Mutex session_mu_; - int32_t session_id_; + std::shared_ptr GetSlaveNode(const std::string& ip, int port); + std::unordered_map> GetAllSlaveNodes(); - // BinlogCacheWindow win_; + pstd::Mutex session_mu_; + int32_t session_id_ = 0; + ConsensusCoordinator coordinator_; }; -class SyncSlavePartition : public SyncPartition { +class SyncSlaveDB : public SyncDB { public: - SyncSlavePartition(const std::string& table_name, uint32_t partition_id); - + SyncSlaveDB(const std::string& db_name); void Activate(const RmNode& master, const ReplState& repl_state); void Deactivate(); - void SetLastRecvTime(uint64_t time); - uint64_t LastRecvTime(); - void SetReplState(const ReplState& repl_state); ReplState State(); - - Status CheckSyncTimeout(uint64_t now); + pstd::Status CheckSyncTimeout(uint64_t now); // For display - Status GetInfo(std::string* info); + pstd::Status GetInfo(std::string* info); // For debug std::string ToStringStatus(); - - const std::string& MasterIp() { - return m_info_.Ip(); - } - int MasterPort() { - return m_info_.Port(); - } - void SetMasterSessionId(int32_t session_id) { - m_info_.SetSessionId(session_id); - } - int32_t MasterSessionId() { - return m_info_.SessionId(); - } - void SetLocalIp(const std::string& local_ip) { - local_ip_ = local_ip; - } - std::string LocalIp() { - return local_ip_; - } + std::string LocalIp(); + int32_t MasterSessionId(); + const std::string& MasterIp(); + int MasterPort(); + void SetMasterSessionId(int32_t session_id); + void SetLocalIp(const std::string& local_ip); + void StopRsync(); + pstd::Status ActivateRsync(); + bool IsRsyncExited() { return rsync_cli_->IsExitedFromRunning(); } private: - slash::Mutex partition_mu_; + std::unique_ptr rsync_cli_; + int32_t rsync_init_retry_count_{0}; + pstd::Mutex db_mu_; RmNode m_info_; - ReplState repl_state_; + ReplState repl_state_{kNoConnect}; std::string local_ip_; }; -class BinlogReaderManager { - public: - ~BinlogReaderManager(); - Status FetchBinlogReader(const RmNode& rm_node, std::shared_ptr* reader); - Status ReleaseBinlogReader(const RmNode& rm_node); - private: - slash::Mutex reader_mu_; - std::unordered_map, hash_rm_node> occupied_; - std::vector> vacant_; -}; - class PikaReplicaManager { public: PikaReplicaManager(); - ~PikaReplicaManager(); - + ~PikaReplicaManager() = default; + friend Cmd; void Start(); void Stop(); - - Status AddSyncPartitionSanityCheck(const std::set& p_infos); - Status AddSyncPartition(const std::set& p_infos); - Status RemoveSyncPartitionSanityCheck(const std::set& p_infos); - Status RemoveSyncPartition(const std::set& p_infos); - Status SelectLocalIp(const std::string& remote_ip, - const int remote_port, - std::string* const local_ip); - Status ActivateSyncSlavePartition(const RmNode& node, const ReplState& repl_state); - Status UpdateSyncSlavePartitionSessionId(const PartitionInfo& p_info, int32_t session_id); - Status DeactivateSyncSlavePartition(const PartitionInfo& p_info); - Status SetSlaveReplState(const PartitionInfo& p_info, const ReplState& repl_state); - Status GetSlaveReplState(const PartitionInfo& p_info, ReplState* repl_state); + bool CheckMasterSyncFinished(); + pstd::Status ActivateSyncSlaveDB(const RmNode& node, const ReplState& repl_state); // For Pika Repl Client Thread - Status SendMetaSyncRequest(); - Status SendRemoveSlaveNodeRequest(const std::string& table, uint32_t partition_id); - Status SendPartitionTrySyncRequest(const std::string& table_name, size_t partition_id); - Status SendPartitionDBSyncRequest(const std::string& table_name, size_t partition_id); - Status SendPartitionBinlogSyncAckRequest(const std::string& table, uint32_t partition_id, - const BinlogOffset& ack_start, const BinlogOffset& ack_end, - bool is_first_send = false); - Status CloseReplClientConn(const std::string& ip, int32_t port); + pstd::Status SendMetaSyncRequest(); + pstd::Status SendRemoveSlaveNodeRequest(const std::string& table); + pstd::Status SendTrySyncRequest(const std::string& db_name); + pstd::Status SendDBSyncRequest(const std::string& db_name); + pstd::Status SendBinlogSyncAckRequest(const std::string& table, const LogOffset& ack_start, + const LogOffset& ack_end, bool is_first_send = false); + pstd::Status CloseReplClientConn(const std::string& ip, int32_t port); // For Pika Repl Server Thread - Status SendSlaveBinlogChipsRequest(const std::string& ip, int port, const std::vector& tasks); - - // For SyncMasterPartition - std::shared_ptr GetSyncMasterPartitionByName(const PartitionInfo& p_info); - Status GetSafetyPurgeBinlogFromSMP(const std::string& table_name, - uint32_t partition_id, std::string* safety_purge); - bool BinlogCloudPurgeFromSMP(const std::string& table_name, - uint32_t partition_id, uint32_t index); - - // For SyncSlavePartition - std::shared_ptr GetSyncSlavePartitionByName(const PartitionInfo& p_info); + pstd::Status SendSlaveBinlogChipsRequest(const std::string& ip, int port, const std::vector& tasks); + // For SyncMasterDB + std::shared_ptr GetSyncMasterDBByName(const DBInfo& p_info); + // For SyncSlaveDB + std::shared_ptr GetSyncSlaveDBByName(const DBInfo& p_info); - Status RunSyncSlavePartitionStateMachine(); + pstd::Status RunSyncSlaveDBStateMachine(); - Status SetMasterLastRecvTime(const RmNode& slave, uint64_t time); - Status SetSlaveLastRecvTime(const RmNode& slave, uint64_t time); + pstd::Status CheckSyncTimeout(uint64_t now); - Status CheckSyncTimeout(uint64_t now); - - // To check partition info + // To check db info // For pkcluster info command - Status GetPartitionInfo( - const std::string& table, uint32_t partition_id, std::string* info); - - void FindCompleteReplica(std::vector* replica); + static bool CheckSlaveDBState(const std::string& ip, int port); void FindCommonMaster(std::string* master); - - Status CheckPartitionRole( - const std::string& table, uint32_t partition_id, int* role); - void RmStatus(std::string* debug_info); - - // following funcs invoked by master partition only - - Status AddPartitionSlave(const RmNode& slave); - Status RemovePartitionSlave(const RmNode& slave); - bool CheckPartitionSlaveExist(const RmNode& slave); - Status GetPartitionSlaveSession(const RmNode& slave, int32_t* session); - - Status LostConnection(const std::string& ip, int port); - - Status ActivateBinlogSync(const RmNode& slave, const BinlogOffset& offset); - Status ActivateDbSync(const RmNode& slave); + pstd::Status CheckDBRole(const std::string& table, int* role); + pstd::Status LostConnection(const std::string& ip, int port); + pstd::Status DeactivateSyncSlaveDB(const std::string& ip, int port); // Update binlog win and try to send next binlog - Status UpdateSyncBinlogStatus(const RmNode& slave, const BinlogOffset& offset_start, const BinlogOffset& offset_end); - Status GetSyncBinlogStatus(const RmNode& slave, BinlogOffset* sent_boffset, BinlogOffset* acked_boffset); - Status GetSyncMasterPartitionSlaveState(const RmNode& slave, SlaveState* const slave_state); - - Status WakeUpBinlogSync(); - - // Session Id - int32_t GenPartitionSessionId(const std::string& table_name, uint32_t partition_id); - int32_t GetSlavePartitionSessionId(const std::string& table_name, uint32_t partition_id); - bool CheckSlavePartitionSessionId(const std::string& table_name, uint32_t partition_id, - int session_id); - bool CheckMasterPartitionSessionId(const std::string& ip, int port, - const std::string& table_name, - uint32_t partition_id, int session_id); + pstd::Status UpdateSyncBinlogStatus(const RmNode& slave, const LogOffset& offset_start, const LogOffset& offset_end); + pstd::Status WakeUpBinlogSync(); // write_queue related - void ProduceWriteQueue(const std::string& ip, int port, const std::vector& tasks); - int ConsumeWriteQueue(); + void ProduceWriteQueue(const std::string& ip, int port, std::string db_name, const std::vector& tasks); + void DropItemInOneWriteQueue(const std::string& ip, int port, const std::string& db_name); void DropItemInWriteQueue(const std::string& ip, int port); + int ConsumeWriteQueue(); // Schedule Task - void ScheduleReplServerBGTask(pink::TaskFunc func, void* arg); - void ScheduleReplClientBGTask(pink::TaskFunc func, void* arg); - void ScheduleWriteBinlogTask(const std::string& table_partition, - const std::shared_ptr res, - std::shared_ptr conn, void* res_private_data); - void ScheduleWriteDBTask(const std::string& dispatch_key, - PikaCmdArgsType* argv, BinlogItem* binlog_item, - const std::string& table_name, uint32_t partition_id); - + void ScheduleReplServerBGTask(net::TaskFunc func, void* arg); + void ScheduleReplClientBGTask(net::TaskFunc func, void* arg); + void ScheduleWriteBinlogTask(const std::string& db_name, + const std::shared_ptr& res, + const std::shared_ptr& conn, void* res_private_data); + void ScheduleWriteDBTask(const std::shared_ptr& cmd_ptr, const std::string& db_name); + void ScheduleReplClientBGTaskByDBName(net::TaskFunc , void* arg, const std::string &db_name); void ReplServerRemoveClientConn(int fd); void ReplServerUpdateClientConnMap(const std::string& ip_port, int fd); - BinlogReaderManager binlog_reader_mgr; + std::shared_mutex& GetDBLock() { return dbs_rw_; } + + void DBLock() { + dbs_rw_.lock(); + } + void DBUnlock() { + dbs_rw_.unlock(); + } + + std::unordered_map, hash_db_info>& GetSyncMasterDBs() { + return sync_master_dbs_; + } + std::unordered_map, hash_db_info>& GetSyncSlaveDBs() { + return sync_slave_dbs_; + } + + int32_t GetUnfinishedAsyncWriteDBTaskCount(const std::string& db_name) { + return pika_repl_client_->GetUnfinishedAsyncWriteDBTaskCount(db_name); + } private: - void InitPartition(); + void InitDB(); + pstd::Status SelectLocalIp(const std::string& remote_ip, int remote_port, std::string* local_ip); - pthread_rwlock_t partitions_rw_; - std::unordered_map, hash_partition_info> sync_master_partitions_; - std::unordered_map, hash_partition_info> sync_slave_partitions_; + std::shared_mutex dbs_rw_; + std::unordered_map, hash_db_info> sync_master_dbs_; + std::unordered_map, hash_db_info> sync_slave_dbs_; - slash::Mutex write_queue_mu_; - // every host owns a queue - std::unordered_map> write_queues_; // ip+port, queue + pstd::Mutex write_queue_mu_; - PikaReplClient* pika_repl_client_; - PikaReplServer* pika_repl_server_; - int last_meta_sync_timestamp_; + // every host owns a queue, the key is "ip + port" + std::unordered_map>> write_queues_; + std::unique_ptr pika_repl_client_; + std::unique_ptr pika_repl_server_; }; #endif // PIKA_RM_H diff --git a/tools/pika_migrate/include/pika_rsync_service.h b/tools/pika_migrate/include/pika_rsync_service.h index f728f52b57..ccd4605a15 100644 --- a/tools/pika_migrate/include/pika_rsync_service.h +++ b/tools/pika_migrate/include/pika_rsync_service.h @@ -3,15 +3,14 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#ifndef PIKA_RSYNC_SERVICE_H_ -#define PIKA_RSYNC_SERVICE_H_ +#ifndef PIKA_RSYNC_SERVICE_H_ +#define PIKA_RSYNC_SERVICE_H_ -#include "iostream" +#include class PikaRsyncService { public: - PikaRsyncService(const std::string& raw_path, - const int port); + PikaRsyncService(const std::string& raw_path, int port); ~PikaRsyncService(); int StartRsync(); bool CheckRsyncAlive(); @@ -22,7 +21,7 @@ class PikaRsyncService { std::string raw_path_; std::string rsync_path_; std::string pid_path_; - int port_; + int port_ = 0; }; #endif diff --git a/tools/pika_migrate/include/pika_sender.h b/tools/pika_migrate/include/pika_sender.h index 1cdb38f34b..172c65b24c 100644 --- a/tools/pika_migrate/include/pika_sender.h +++ b/tools/pika_migrate/include/pika_sender.h @@ -7,11 +7,11 @@ #include #include -#include "pink/include/bg_thread.h" -#include "pink/include/pink_cli.h" -#include "pink/include/redis_cli.h" +#include "net/include/bg_thread.h" +#include "net/include/net_cli.h" +#include "net/include/redis_cli.h" -class PikaSender : public pink::Thread { +class PikaSender : public net::Thread { public: PikaSender(std::string ip, int64_t port, std::string password); virtual ~PikaSender(); @@ -25,9 +25,11 @@ class PikaSender : public pink::Thread { void ConnectRedis(); private: - pink::PinkCli *cli_; - slash::CondVar signal_; - slash::Mutex keys_mutex_; + net::NetCli *cli_; + pstd::CondVar wsignal_; + pstd::CondVar rsignal_; + std::mutex signal_mutex; + std::mutex keys_queue_mutex_; std::queue keys_queue_; std::string ip_; int port_; diff --git a/tools/pika_migrate/include/pika_server.h b/tools/pika_migrate/include/pika_server.h index 49085088b3..8418a15a85 100644 --- a/tools/pika_migrate/include/pika_server.h +++ b/tools/pika_migrate/include/pika_server.h @@ -6,102 +6,74 @@ #ifndef PIKA_SERVER_H_ #define PIKA_SERVER_H_ -#include -#include +#include -#include "slash/include/slash_mutex.h" -#include "slash/include/slash_status.h" -#include "slash/include/slash_string.h" -#include "pink/include/bg_thread.h" -#include "pink/include/thread_pool.h" -#include "pink/include/pink_pubsub.h" -#include "blackwidow/blackwidow.h" -#include "blackwidow/backupable.h" +#if defined(__APPLE__) || defined(__FreeBSD__) +# include +# include +#else +# include +#endif -#include "include/redis_sender.h" -#include "include/pika_conf.h" -#include "include/pika_table.h" +#include +#include + +#include "src/cache/include/config.h" +#include "net/include/bg_thread.h" +#include "net/include/net_pubsub.h" +#include "net/include/thread_pool.h" +#include "pstd/include/pstd_mutex.h" +#include "pstd/include/pstd_status.h" +#include "pstd/include/pstd_string.h" +#include "storage/backupable.h" +#include "storage/storage.h" + +#include "acl.h" +#include "include/pika_auxiliary_thread.h" #include "include/pika_binlog.h" +#include "include/pika_cache.h" +#include "include/pika_client_processor.h" +#include "include/pika_cmd_table_manager.h" +#include "include/pika_command.h" +#include "include/pika_conf.h" +#include "include/pika_db.h" #include "include/pika_define.h" -#include "include/pika_monitor_thread.h" -#include "include/pika_rsync_service.h" #include "include/pika_dispatch_thread.h" +#include "include/pika_instant.h" +#include "include/pika_migrate_thread.h" #include "include/pika_repl_client.h" #include "include/pika_repl_server.h" -#include "include/pika_auxiliary_thread.h" +#include "include/pika_rsync_service.h" +#include "include/pika_slot_command.h" +#include "include/pika_statistic.h" +#include "include/pika_transaction.h" +#include "include/redis_sender.h" +#include "include/rsync_server.h" -using slash::Status; -using slash::Slice; - -struct StatisticData { - StatisticData() - : accumulative_connections(0), - thread_querynum(0), - last_thread_querynum(0), - last_sec_thread_querynum(0), - last_time_us(0) { - CmdTable* cmds = new CmdTable(); - cmds->reserve(300); - InitCmdTable(cmds); - CmdTable::const_iterator it = cmds->begin(); - for (; it != cmds->end(); ++it) { - std::string tmp = it->first; - exec_count_table[slash::StringToUpper(tmp)].store(0); - } - DestoryCmdTable(cmds); - delete cmds; - } - - std::atomic accumulative_connections; - std::unordered_map> exec_count_table; - std::atomic thread_querynum; - std::atomic last_thread_querynum; - std::atomic last_sec_thread_querynum; - std::atomic last_time_us; -}; -/* -static std::set MultiKvCommands {kCmdNameDel, - kCmdNameMget, kCmdNameKeys, kCmdNameMset, - kCmdNameMsetnx, kCmdNameExists, kCmdNameScan, - kCmdNameScanx, kCmdNamePKScanRange, kCmdNamePKRScanRange, - kCmdNameRPopLPush, kCmdNameZUnionstore, kCmdNameZInterstore, - kCmdNameSUnion, kCmdNameSUnionstore, kCmdNameSInter, - kCmdNameSInterstore, kCmdNameSDiff, kCmdNameSDiffstore, - kCmdNameSMove, kCmdNameBitOp, kCmdNamePfAdd, - kCmdNamePfCount, kCmdNamePfMerge, kCmdNameGeoAdd, - kCmdNameGeoPos, kCmdNameGeoDist, kCmdNameGeoHash, - kCmdNameGeoRadius, kCmdNameGeoRadiusByMember}; -*/ - -static std::set ShardingModeNotSupportCommands { - kCmdNameMsetnx, kCmdNameScan, kCmdNameKeys, - kCmdNameScanx, kCmdNamePKScanRange, kCmdNamePKRScanRange, - kCmdNameRPopLPush, kCmdNameZUnionstore, kCmdNameZInterstore, - kCmdNameSUnion, kCmdNameSUnionstore, kCmdNameSInter, - kCmdNameSInterstore, kCmdNameSDiff, kCmdNameSDiffstore, - kCmdNameSMove, kCmdNameBitOp, kCmdNamePfAdd, - kCmdNamePfCount, kCmdNamePfMerge, kCmdNameGeoAdd, - kCmdNameGeoPos, kCmdNameGeoDist, kCmdNameGeoHash, - kCmdNameGeoRadius, kCmdNameGeoRadiusByMember, kCmdNamePKPatternMatchDel}; - - -extern PikaConf *g_pika_conf; +extern std::unique_ptr g_pika_conf; enum TaskType { kCompactAll, - kCompactStrings, - kCompactHashes, - kCompactSets, - kCompactZSets, - kCompactList, kResetReplState, kPurgeLog, kStartKeyScan, kStopKeyScan, kBgSave, + kCompactRangeAll, + kCompactOldestOrBestDeleteRatioSst, +}; + +struct TaskArg { + TaskType type; + std::vector argv; + TaskArg(TaskType t) : type(t) {} + TaskArg(TaskType t, const std::vector& a) : type(t), argv(a) {} }; -class PikaServer { +void DoBgslotscleanup(void* arg); +void DoBgslotsreload(void* arg); + +class PikaServer : public pstd::noncopyable { public: PikaServer(); ~PikaServer(); @@ -110,7 +82,6 @@ class PikaServer { * Server init info */ bool ServerInit(); - void Start(); void Exit(); @@ -120,60 +91,73 @@ class PikaServer { std::string master_ip(); int master_port(); int role(); - bool readonly(const std::string& table, const std::string& key); + bool leader_protected_mode(); + void CheckLeaderProtectedMode(); + bool readonly(const std::string& table); int repl_state(); std::string repl_state_str(); bool force_full_sync(); void SetForceFullSync(bool v); void SetDispatchQueueLimit(int queue_limit); - blackwidow::BlackwidowOptions bw_options(); + void SetSlowCmdThreadPoolFlag(bool flag); + storage::StorageOptions storage_options(); + std::unique_ptr& pika_dispatch_thread() { + return pika_dispatch_thread_; + } /* - * Table use + * DB use */ - void InitTableStruct(); - std::shared_ptr GetTable(const std::string& table_name); - std::set GetTablePartitionIds(const std::string& table_name); + void InitDBStruct(); bool IsBgSaving(); bool IsKeyScaning(); bool IsCompacting(); - bool IsTableExist(const std::string& table_name); - bool IsTablePartitionExist(const std::string& table_name, uint32_t partition_id); - bool IsCommandSupport(const std::string& command); - bool IsTableBinlogIoError(const std::string& table_name); - Status DoSameThingSpecificTable(const TaskType& type, const std::set& tables = {}); - - /* - * Partition use - */ - void PreparePartitionTrySync(); - void PartitionSetMaxCacheStatisticKeys(uint32_t max_cache_statistic_keys); - void PartitionSetSmallCompactionThreshold(uint32_t small_compaction_threshold); - bool GetTablePartitionBinlogOffset(const std::string& table_name, - uint32_t partition_id, - BinlogOffset* const boffset); - std::shared_ptr GetPartitionByDbName(const std::string& db_name); - std::shared_ptr GetTablePartitionById( - const std::string& table_name, - uint32_t partition_id); - std::shared_ptr GetTablePartitionByKey( - const std::string& table_name, - const std::string& key); - Status DoSameThingEveryPartition(const TaskType& type); + bool IsDBExist(const std::string& db_name); + bool IsDBBinlogIoError(const std::string& db_name); + std::shared_ptr GetDB(const std::string& db_name); + std::set GetAllDBName(); + pstd::Status DoSameThingSpecificDB(const std::set& dbs, const TaskArg& arg); + std::shared_mutex& GetDBLock() { + return dbs_rw_; + } + void DBLockShared() { + dbs_rw_.lock_shared(); + } + void DBLock() { + dbs_rw_.lock(); + } + void DBUnlock() { + dbs_rw_.unlock(); + } + void DBUnlockShared() { + dbs_rw_.unlock_shared(); + } + + /* + * DB use + */ + void PrepareDBTrySync(); + void DBSetMaxCacheStatisticKeys(uint32_t max_cache_statistic_keys); + void DBSetSmallCompactionThreshold(uint32_t small_compaction_threshold); + void DBSetSmallCompactionDurationThreshold(uint32_t small_compaction_duration_threshold); + bool GetDBBinlogOffset(const std::string& db_name, BinlogOffset* boffset); + pstd::Status DoSameThingEveryDB(const TaskType& type); /* * Master use */ void BecomeMaster(); - void DeleteSlave(int fd); //conn fd + void DeleteSlave(int fd); // conn fd int32_t CountSyncSlaves(); int32_t GetSlaveListString(std::string& slave_list_str); - int32_t GetShardingSlaveListString(std::string& slave_list_str); - bool TryAddSlave(const std::string& ip, int64_t port, int fd, - const std::vector& table_structs); - slash::Mutex slave_mutex_; // protect slaves_; + bool TryAddSlave(const std::string& ip, int64_t port, int fd, const std::vector& table_structs); + pstd::Mutex slave_mutex_; // protect slaves_; std::vector slaves_; + /** + * Sotsmgrt use + */ + std::unique_ptr pika_migrate_; /* * Slave use @@ -189,24 +173,32 @@ class PikaServer { void FinishMetaSync(); bool MetaSyncDone(); void ResetMetaSyncStatus(); - bool AllPartitionConnectSuccess(); - bool LoopPartitionStateMachine(); - void SetLoopPartitionStateMachine(bool need_loop); + int GetMetaSyncTimestamp(); + void UpdateMetaSyncTimestamp(); + void UpdateMetaSyncTimestampWithoutLock(); + bool IsFirstMetaSync(); + void SetFirstMetaSync(bool v); /* - * ThreadPool Process Task + * PikaClientProcessor Process Task */ - void Schedule(pink::TaskFunc func, void* arg); + void ScheduleClientPool(net::TaskFunc func, void* arg, bool is_slow_cmd, bool is_admin_cmd); + + // for info debug + size_t ClientProcessorThreadPoolCurQueueSize(); + size_t ClientProcessorThreadPoolMaxQueueSize(); + size_t SlowCmdThreadPoolCurQueueSize(); + size_t SlowCmdThreadPoolMaxQueueSize(); /* * BGSave used */ - void BGSaveTaskSchedule(pink::TaskFunc func, void* arg); + void BGSaveTaskSchedule(net::TaskFunc func, void* arg); /* * PurgeLog used */ - void PurgelogsTaskSchedule(pink::TaskFunc func, void* arg); + void PurgelogsTaskSchedule(net::TaskFunc func, void* arg); /* * Flushall & Flushdb used @@ -217,83 +209,104 @@ class PikaServer { /* * DBSync used */ - void DBSync(const std::string& ip, int port, - const std::string& table_name, - uint32_t partition_id); - void TryDBSync(const std::string& ip, int port, - const std::string& table_name, - uint32_t partition_id, int32_t top); - void DbSyncSendFile(const std::string& ip, int port, - const std::string& table_name, - uint32_t partition_id); - std::string DbSyncTaskIndex(const std::string& ip, int port, - const std::string& table_name, - uint32_t partition_id); + pstd::Status GetDumpUUID(const std::string& db_name, std::string* snapshot_uuid); + pstd::Status GetDumpMeta(const std::string& db_name, std::vector* files, std::string* snapshot_uuid); + void TryDBSync(const std::string& ip, int port, const std::string& db_name, int32_t top); /* * Keyscan used */ - void KeyScanTaskSchedule(pink::TaskFunc func, void* arg); + void KeyScanTaskSchedule(net::TaskFunc func, void* arg); /* * Client used */ void ClientKillAll(); - int ClientKill(const std::string &ip_port); - int64_t ClientList(std::vector *clients = nullptr); + int ClientKill(const std::string& ip_port); + int64_t ClientList(std::vector* clients = nullptr); + void ClientKillPubSub(); + void ClientKillAllNormal(); /* * Monitor used */ - bool HasMonitorClients(); - void AddMonitorMessage(const std::string &monitor_message); - void AddMonitorClient(std::shared_ptr client_ptr); + bool HasMonitorClients() const; + bool ClientIsMonitor(const std::shared_ptr& client_ptr) const; + void AddMonitorMessage(const std::string& monitor_message); + void AddMonitorClient(const std::shared_ptr& client_ptr); /* * Slowlog used */ void SlowlogTrim(); void SlowlogReset(); - uint32_t SlowlogLen(); void SlowlogObtain(int64_t number, std::vector* slowlogs); - void SlowlogPushEntry(const PikaCmdArgsType& argv, int32_t time, int64_t duration); + void SlowlogPushEntry(const std::vector& argv, int64_t time, int64_t duration); + uint32_t SlowlogLen(); + uint64_t SlowlogCount(); /* * Statistic used */ - void ResetStat(); uint64_t ServerQueryNum(); uint64_t ServerCurrentQps(); uint64_t accumulative_connections(); + long long ServerKeyspaceHits(); + long long ServerKeyspaceMisses(); + void ResetStat(); void incr_accumulative_connections(); + void incr_server_keyspace_hits(); + void incr_server_keyspace_misses(); void ResetLastSecQuerynum(); - void UpdateQueryNumAndExecCountTable(const std::string& command); - std::unordered_map ServerExecCountTable(); + void UpdateQueryNumAndExecCountDB(const std::string& db_name, const std::string& command, bool is_write); + std::unordered_map ServerExecCountDB(); + std::unordered_map ServerAllDBStat(); + + /* + * Disk usage statistic + */ + uint64_t GetDBSize() const { + return disk_statistic_.db_size_.load(); + } + uint64_t GetLogSize() const { + return disk_statistic_.log_size_.load(); + } + + /* + * Network Statistic used + */ + size_t NetInputBytes(); + size_t NetOutputBytes(); + size_t NetReplInputBytes(); + size_t NetReplOutputBytes(); + float InstantaneousInputKbps(); + float InstantaneousOutputKbps(); + float InstantaneousInputReplKbps(); + float InstantaneousOutputReplKbps(); /* * Slave to Master communication used */ int SendToPeer(); void SignalAuxiliary(); - Status TriggerSendBinlogSync(); + pstd::Status TriggerSendBinlogSync(); /* * PubSub used */ int PubSubNumPat(); int Publish(const std::string& channel, const std::string& msg); - int UnSubscribe(std::shared_ptr conn, - const std::vector& channels, - const bool pattern, + void EnablePublish(int fd); + int UnSubscribe(const std::shared_ptr& conn, const std::vector& channels, bool pattern, std::vector>* result); - void Subscribe(std::shared_ptr conn, - const std::vector& channels, - const bool pattern, + void Subscribe(const std::shared_ptr& conn, const std::vector& channels, bool pattern, std::vector>* result); - void PubSubChannels(const std::string& pattern, - std::vector* result); - void PubSubNumSub(const std::vector& channels, - std::vector>* result); + void PubSubChannels(const std::string& pattern, std::vector* result); + void PubSubNumSub(const std::vector& channels, std::vector>* result); + int ClientPubSubChannelSize(const std::shared_ptr& conn); + int ClientPubSubChannelPatternSize(const std::shared_ptr& conn); + + pstd::Status GetCmdRouting(std::vector& redis_cmds, std::vector* dst, bool* all_local); /* * migrate used @@ -301,125 +314,361 @@ class PikaServer { int SendRedisCommand(const std::string& command, const std::string& key); void RetransmitData(const std::string& path); + // info debug use + void ServerStatus(std::string* info); + + /* + * Async migrate used + */ + int SlotsMigrateOne(const std::string& key, const std::shared_ptr &db); + bool SlotsMigrateBatch(const std::string &ip, int64_t port, int64_t time_out, int64_t slots, int64_t keys_num, const std::shared_ptr& db); + void GetSlotsMgrtSenderStatus(std::string *ip, int64_t* port, int64_t *slot, bool *migrating, int64_t *moved, int64_t *remained); + bool SlotsMigrateAsyncCancel(); + std::shared_mutex bgslots_protector_; + + /* + * BGSlotsReload used + */ + struct BGSlotsReload { + bool reloading = false; + time_t start_time = 0; + time_t end_time = 0; + std::string s_start_time; + int64_t cursor = 0; + std::string pattern = "*"; + int64_t count = 100; + std::shared_ptr db; + BGSlotsReload() = default; + void Clear() { + reloading = false; + pattern = "*"; + count = 100; + cursor = 0; + } + }; + + BGSlotsReload bgslots_reload_; + + BGSlotsReload bgslots_reload() { + std::lock_guard ml(bgslots_protector_); + return bgslots_reload_; + } + bool GetSlotsreloading() { + std::lock_guard ml(bgslots_protector_); + return bgslots_reload_.reloading; + } + void SetSlotsreloading(bool reloading) { + std::lock_guard ml(bgslots_protector_); + bgslots_reload_.reloading = reloading; + } + void SetSlotsreloadingCursor(int64_t cursor) { + std::lock_guard ml(bgslots_protector_); + bgslots_reload_.cursor = cursor; + } + int64_t GetSlotsreloadingCursor() { + std::lock_guard ml(bgslots_protector_); + return bgslots_reload_.cursor; + } + + void SetSlotsreloadingEndTime() { + std::lock_guard ml(bgslots_protector_); + bgslots_reload_.end_time = time(nullptr); + } + void Bgslotsreload(const std::shared_ptr& db); + + // Revoke the authorization of the specified account, when handle Cmd deleteUser + void AllClientUnAuth(const std::set& users); + + // Determine whether the user's conn can continue to subscribe to the channel + void CheckPubsubClientKill(const std::string& userName, const std::vector& allChannel); + + /* + * BGSlotsCleanup used + */ + struct BGSlotsCleanup { + bool cleaningup = false; + time_t start_time = 0; + time_t end_time = 0; + std::string s_start_time; + int64_t cursor = 0; + std::string pattern = "*"; + int64_t count = 100; + std::shared_ptr db; + storage::DataType type_; + std::vector cleanup_slots; + BGSlotsCleanup() = default; + void Clear() { + cleaningup = false; + pattern = "*"; + count = 100; + cursor = 0; + } + }; + + /* + * BGSlotsCleanup use + */ + BGSlotsCleanup bgslots_cleanup_; + net::BGThread bgslots_cleanup_thread_; + + BGSlotsCleanup bgslots_cleanup() { + std::lock_guard ml(bgslots_protector_); + return bgslots_cleanup_; + } + bool GetSlotscleaningup() { + std::lock_guard ml(bgslots_protector_); + return bgslots_cleanup_.cleaningup; + } + void SetSlotscleaningup(bool cleaningup) { + std::lock_guard ml(bgslots_protector_); + bgslots_cleanup_.cleaningup = cleaningup; + } + void SetSlotscleaningupCursor(int64_t cursor) { + std::lock_guard ml(bgslots_protector_); + bgslots_cleanup_.cursor = cursor; + } + void SetCleanupSlots(std::vector cleanup_slots) { + std::lock_guard ml(bgslots_protector_); + bgslots_cleanup_.cleanup_slots.swap(cleanup_slots); + } + std::vector GetCleanupSlots() { + std::lock_guard ml(bgslots_protector_); + return bgslots_cleanup_.cleanup_slots; + } + + void Bgslotscleanup(std::vector cleanup_slots, const std::shared_ptr& db); + void StopBgslotscleanup() { + std::lock_guard ml(bgslots_protector_); + bgslots_cleanup_.cleaningup = false; + std::vector cleanup_slots; + bgslots_cleanup_.cleanup_slots.swap(cleanup_slots); + } + + /* + * StorageOptions used + */ + storage::Status RewriteStorageOptions(const storage::OptionType& option_type, + const std::unordered_map& options); + + /* + * Instantaneous Metric used + */ + std::unique_ptr instant_; + + /* + * Diskrecovery used + */ + std::map> GetDB() { + return dbs_; + } + + /* + * acl init + */ + pstd::Status InitAcl() { return acl_->Initialization(); } + + std::unique_ptr<::Acl>& Acl() { return acl_; } friend class Cmd; friend class InfoCmd; - friend class PkClusterAddSlotsCmd; - friend class PkClusterDelSlotsCmd; friend class PikaReplClientConn; friend class PkClusterInfoCmd; + struct BGCacheTaskArg { + BGCacheTaskArg() : conf(nullptr), reenable_cache(false) {} + int task_type; + std::shared_ptr db; + uint32_t cache_num; + cache::CacheConfig cache_cfg; + std::unique_ptr conf; + bool reenable_cache; + }; + + /* + * Cache used + */ + static void DoCacheBGTask(void* arg); + void ResetCacheAsync(uint32_t cache_num, std::shared_ptr db, cache::CacheConfig *cache_cfg = nullptr); + void ClearCacheDbAsync(std::shared_ptr db); + void ClearCacheDbAsyncV2(std::shared_ptr db); + void ResetCacheConfig(std::shared_ptr db); + void ClearHitRatio(std::shared_ptr db); + void OnCacheStartPosChanged(int zset_cache_start_direction, std::shared_ptr db); + void UpdateCacheInfo(void); + void ResetDisplayCacheInfo(int status, std::shared_ptr db); + void CacheConfigInit(cache::CacheConfig &cache_cfg); + void ProcessCronTask(); + double HitRatio(); + + /* + * disable compact + */ + void DisableCompact(); + + /* + * lastsave used + */ + int64_t GetLastSave() const {return lastsave_;} + void UpdateLastSave(int64_t lastsave) {lastsave_ = lastsave;} + void InitStatistic(CmdTable *inited_cmd_table) { + // we insert all cmd name to statistic_.server_stat.exec_count_db, + // then when we can call PikaServer::UpdateQueryNumAndExecCountDB(const std::string&, const std::string&, bool) in parallel without lock + // although exec_count_db(unordered_map) is not thread-safe, but we won't trigger any insert or erase operation toward exec_count_db(unordered_map) during the running of pika + auto &exec_stat_map = statistic_.server_stat.exec_count_db; + for (auto& it : *inited_cmd_table) { + std::string cmd_name = it.first; //value copy is needed + pstd::StringToUpper(cmd_name); //cmd_name now is all uppercase + exec_stat_map.insert(std::make_pair(cmd_name, 0)); + } + } private: /* * TimingTask use */ void DoTimingTask(); void AutoCompactRange(); - void AutoPurge(); + void AutoBinlogPurge(); + void AutoServerlogPurge(); void AutoDeleteExpiredDump(); - void AutoKeepAliveRSync(); + void AutoUpdateNetworkMetric(); + void PrintThreadPoolQueueStatus(); + void StatDiskUsage(); + int64_t GetLastSaveTime(const std::string& dump_dir); std::string host_; - int port_; - time_t start_time_s_; + int port_ = 0; + time_t start_time_s_ = 0; - blackwidow::BlackwidowOptions bw_options_; - void InitBlackwidowOptions(); + std::shared_mutex storage_options_rw_; + storage::StorageOptions storage_options_; + void InitStorageOptions(); std::atomic exit_; + std::timed_mutex exit_mutex_; /* - * Table used + * DB used */ - std::atomic slot_state_; - pthread_rwlock_t tables_rw_; - std::map> tables_; + std::shared_mutex dbs_rw_; + std::map> dbs_; /* * CronTask used */ - bool have_scheduled_crontask_; + bool have_scheduled_crontask_ = false; struct timeval last_check_compact_time_; /* - * Communicate with the client used + * ResumeDB used */ - int worker_num_; - pink::ThreadPool* pika_thread_pool_; - PikaDispatchThread* pika_dispatch_thread_; + struct timeval last_check_resume_time_; + /* + * Communicate with the client used + */ + int worker_num_ = 0; + std::unique_ptr pika_client_processor_; + std::unique_ptr pika_slow_cmd_thread_pool_; + std::unique_ptr pika_admin_cmd_thread_pool_; + std::unique_ptr pika_dispatch_thread_ = nullptr; /* * Slave used */ std::string master_ip_; - int master_port_; - int repl_state_; - int role_; - bool loop_partition_state_machine_; - bool force_full_sync_; - pthread_rwlock_t state_protector_; //protect below, use for master-slave mode + int master_port_ = 0; + int repl_state_ = PIKA_REPL_NO_CONNECT; + int role_ = PIKA_ROLE_SINGLE; + int last_meta_sync_timestamp_ = 0; + bool first_meta_sync_ = false; + bool force_full_sync_ = false; + bool leader_protected_mode_ = false; // reject request after master slave sync done + std::shared_mutex state_protector_; // protect below, use for master-slave mode /* * Bgsave used */ - pink::BGThread bgsave_thread_; + net::BGThread bgsave_thread_; /* * Purgelogs use */ - pink::BGThread purge_thread_; - - /* - * DBSync used - */ - slash::Mutex db_sync_protector_; - std::unordered_set db_sync_slaves_; + net::BGThread purge_thread_; /* * Keyscan used */ - pink::BGThread key_scan_thread_; + net::BGThread key_scan_thread_; /* * Monitor used */ - PikaMonitorThread* pika_monitor_thread_; + mutable pstd::Mutex monitor_mutex_protector_; + std::set, std::owner_less>> pika_monitor_clients_; /* * Rsync used */ - PikaRsyncService* pika_rsync_service_; + std::unique_ptr pika_rsync_service_; + std::unique_ptr rsync_server_; /* * Pubsub used */ - pink::PubSubThread* pika_pubsub_thread_; + std::unique_ptr pika_pubsub_thread_; /* * Communication used */ - PikaAuxiliaryThread* pika_auxiliary_thread_; + std::unique_ptr pika_auxiliary_thread_; /* - * + * migrate to redis used + */ + std::vector> redis_senders_; + + /* + * Async slotsMgrt use */ - std::vector redis_senders_; + std::unique_ptr pika_migrate_thread_; /* * Slowlog used */ - uint64_t slowlog_entry_id_; - pthread_rwlock_t slowlog_protector_; + uint64_t slowlog_entry_id_ = 0; + uint64_t slowlog_counter_ = 0; + std::shared_mutex slowlog_protector_; std::list slowlog_list_; /* * Statistic used */ - StatisticData statistic_data_; + Statistic statistic_; - PikaServer(PikaServer &ps); - void operator =(const PikaServer &ps); + DiskStatistic disk_statistic_; + + net::BGThread common_bg_thread_; + + /* + * Cache used + */ + std::shared_mutex mu_; + std::shared_mutex cache_info_rwlock_; + + /* + * lastsave used + */ + int64_t lastsave_ = 0; + + /* + * acl + */ + std::unique_ptr<::Acl> acl_ = nullptr; + + /* + * fast and slow thread pools + */ + bool slow_cmd_thread_pool_flag_; }; #endif diff --git a/tools/pika_migrate/include/pika_set.h b/tools/pika_migrate/include/pika_set.h index fe00850751..c4b8eb2031 100644 --- a/tools/pika_migrate/include/pika_set.h +++ b/tools/pika_migrate/include/pika_set.h @@ -6,258 +6,363 @@ #ifndef PIKA_SET_H_ #define PIKA_SET_H_ +#include "include/acl.h" #include "include/pika_command.h" -#include "include/pika_partition.h" +#include "pika_kv.h" /* * set */ class SAddCmd : public Cmd { public: - SAddCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + SAddCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SET)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SAddCmd(*this); + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SAddCmd(*this); } + + private: + std::string key_; + std::vector members_; + rocksdb::Status s_; + void DoInitial() override; +}; + +class SRemCmd : public Cmd { + public: + SRemCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SET)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; } + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SRemCmd(*this); } + + private: + void DoInitial() override; + private: std::string key_; std::vector members_; - virtual void DoInitial() override; + rocksdb::Status s_; + int32_t deleted_ = 0; }; class SPopCmd : public Cmd { public: - SPopCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + SPopCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SET)) { + srem_cmd_ = std::make_shared(kCmdNameSRem, -3, kCmdFlagsWrite | kCmdFlagsSet); + } + SPopCmd(const SPopCmd& other) + : Cmd(other), key_(other.key_), members_(other.members_), count_(other.count_), s_(other.s_) { + srem_cmd_ = std::make_shared(kCmdNameSRem, -3, kCmdFlagsWrite | kCmdFlagsSet); + } + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SPopCmd(*this); - } + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SPopCmd(*this); } + void DoBinlog() override; + + private: + void DoInitial() override; + private: std::string key_; - virtual void DoInitial() override; + std::vector members_; + // used for write binlog + std::shared_ptr srem_cmd_; + int64_t count_ = 1; + rocksdb::Status s_; }; class SCardCmd : public Cmd { public: - SCardCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + SCardCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SET)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SCardCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SCardCmd(*this); } + private: std::string key_; - virtual void DoInitial() override; + rocksdb::Status s_; + void DoInitial() override; }; class SMembersCmd : public Cmd { public: - SMembersCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + SMembersCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SET)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SMembersCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SMembersCmd(*this); } + private: std::string key_; - virtual void DoInitial() override; + rocksdb::Status s_; + void DoInitial() override; }; class SScanCmd : public Cmd { public: - SScanCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), pattern_("*"), count_(10) {} - virtual std::vector current_key() const { + SScanCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SET)), pattern_("*") {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SScanCmd(*this); - } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SScanCmd(*this); } + private: - std::string key_, pattern_; - int64_t cursor_, count_; - virtual void DoInitial() override; - virtual void Clear() { + std::string key_, pattern_ = "*"; + int64_t cursor_ = 0; + int64_t count_ = 10; + void DoInitial() override; + void Clear() override { pattern_ = "*"; count_ = 10; } }; -class SRemCmd : public Cmd { +class SUnionCmd : public Cmd { public: - SRemCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SRemCmd(*this); - } + SUnionCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SET)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SUnionCmd(*this); } + private: - std::string key_; - std::vector members_; - virtual void DoInitial() override; + std::vector keys_; + void DoInitial() override; }; -class SUnionCmd : public Cmd { +class SetOperationCmd : public Cmd { public: - SUnionCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SUnionCmd(*this); + SetOperationCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SET)) { + sadd_cmd_ = std::make_shared(kCmdNameSAdd, -3, kCmdFlagsWrite | kCmdFlagsSet); + del_cmd_ = std::make_unique(kCmdNameDel, -2, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache); } - private: + SetOperationCmd(const SetOperationCmd& other) + : Cmd(other), dest_key_(other.dest_key_), value_to_dest_(other.value_to_dest_) { + sadd_cmd_ = std::make_shared(kCmdNameSAdd, -3, kCmdFlagsWrite | kCmdFlagsSet); + del_cmd_ = std::make_unique(kCmdNameDel, -2, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache); + } + + std::vector current_key() const override { return {dest_key_}; } + void DoBinlog() override; + + protected: + std::string dest_key_; std::vector keys_; - virtual void DoInitial() override; + // used for write binlog + std::shared_ptr sadd_cmd_; + std::shared_ptr del_cmd_; + std::vector value_to_dest_; }; -class SUnionstoreCmd : public Cmd { +class SUnionstoreCmd : public SetOperationCmd { public: - SUnionstoreCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SUnionstoreCmd(*this); - } + SUnionstoreCmd(const std::string& name, int arity, uint32_t flag) : SetOperationCmd(name, arity, flag) {} + // current_key() is override in base class : SetOperationCmd + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SUnionstoreCmd(*this); } + private: - std::string dest_key_; - std::vector keys_; - virtual void DoInitial() override; + void DoInitial() override; + rocksdb::Status s_; }; class SInterCmd : public Cmd { public: - SInterCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SInterCmd(*this); - } + SInterCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SET)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SInterCmd(*this); } + private: std::vector keys_; - virtual void DoInitial() override; + void DoInitial() override; }; -class SInterstoreCmd : public Cmd { +class SInterstoreCmd : public SetOperationCmd { public: - SInterstoreCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SInterstoreCmd(*this); - } + SInterstoreCmd(const std::string& name, int arity, uint32_t flag) : SetOperationCmd(name, arity, flag) {} + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SInterstoreCmd(*this); } + private: - std::string dest_key_; - std::vector keys_; - virtual void DoInitial() override; + void DoInitial() override; + rocksdb::Status s_; }; class SIsmemberCmd : public Cmd { public: - SIsmemberCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + SIsmemberCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SET)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SIsmemberCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SIsmemberCmd(*this); } + private: - std::string key_, member_; - virtual void DoInitial() override; + std::string key_; + std::string member_; + rocksdb::Status s_; + void DoInitial() override; }; class SDiffCmd : public Cmd { public: - SDiffCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SDiffCmd(*this); - } + SDiffCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SET)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SDiffCmd(*this); } + private: std::vector keys_; - virtual void DoInitial() override; + void DoInitial() override; }; -class SDiffstoreCmd : public Cmd { +class SDiffstoreCmd : public SetOperationCmd { public: - SDiffstoreCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SDiffstoreCmd(*this); - } + SDiffstoreCmd(const std::string& name, int arity, uint32_t flag) : SetOperationCmd(name, arity, flag) {} + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SDiffstoreCmd(*this); } + private: - std::string dest_key_; - std::vector keys_; - virtual void DoInitial() override; + rocksdb::Status s_; + void DoInitial() override; }; class SMoveCmd : public Cmd { public: - SMoveCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SMoveCmd(*this); + SMoveCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SET)) { + srem_cmd_ = std::make_shared(kCmdNameSRem, -3, kCmdFlagsWrite | kCmdFlagsSet); + sadd_cmd_ = std::make_shared(kCmdNameSAdd, -3, kCmdFlagsWrite | kCmdFlagsSet); } + SMoveCmd(const SMoveCmd& other) + : Cmd(other), + src_key_(other.src_key_), + dest_key_(other.dest_key_), + member_(other.member_), + move_success_(other.move_success_) { + srem_cmd_ = std::make_shared(kCmdNameSRem, -3, kCmdFlagsWrite | kCmdFlagsSet); + sadd_cmd_ = std::make_shared(kCmdNameSAdd, -3, kCmdFlagsWrite | kCmdFlagsSet); + } + std::vector current_key() const override { return {src_key_, dest_key_}; } + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SMoveCmd(*this); } + void DoBinlog() override; + private: std::string src_key_, dest_key_, member_; - virtual void DoInitial() override; + void DoInitial() override; + // used for write binlog + std::shared_ptr srem_cmd_; + std::shared_ptr sadd_cmd_; + int32_t move_success_{0}; }; class SRandmemberCmd : public Cmd { public: - SRandmemberCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), count_(1) {} - virtual std::vector current_key() const { + SRandmemberCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SET)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SRandmemberCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SRandmemberCmd(*this); } + private: std::string key_; - int64_t count_; - bool reply_arr; - virtual void DoInitial() override; - virtual void Clear() { + int64_t count_ = 1; + bool reply_arr = false; + rocksdb::Status s_; + void DoInitial() override; + void Clear() override { count_ = 1; reply_arr = false; } diff --git a/tools/pika_migrate/include/pika_slave_node.h b/tools/pika_migrate/include/pika_slave_node.h new file mode 100644 index 0000000000..e37325b521 --- /dev/null +++ b/tools/pika_migrate/include/pika_slave_node.h @@ -0,0 +1,82 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_SLAVE_NODE_H_ +#define PIKA_SLAVE_NODE_H_ + +#include +#include + +#include "include/pika_binlog_reader.h" +#include "include/pika_define.h" + +struct SyncWinItem { + LogOffset offset_; + std::size_t binlog_size_ = 0; + bool acked_ = false; + bool operator==(const SyncWinItem& other) const { + return offset_.b_offset.filenum == other.offset_.b_offset.filenum && + offset_.b_offset.offset == other.offset_.b_offset.offset; + } + explicit SyncWinItem(const LogOffset& offset, std::size_t binlog_size = 0) + : offset_(offset), binlog_size_(binlog_size) {} + std::string ToString() const { + return offset_.ToString() + " binglog size: " + std::to_string(binlog_size_) + " acked: " + std::to_string(static_cast(acked_)); + } +}; + +class SyncWindow { + public: + SyncWindow() = default; + void Push(const SyncWinItem& item); + bool Update(const SyncWinItem& start_item, const SyncWinItem& end_item, LogOffset* acked_offset); + int Remaining(); + std::string ToStringStatus() const { + if (win_.empty()) { + return " Size: " + std::to_string(win_.size()) + "\r\n"; + } else { + std::string res; + res += " Size: " + std::to_string(win_.size()) + "\r\n"; + res += (" Begin_item: " + win_.begin()->ToString() + "\r\n"); + res += (" End_item: " + win_.rbegin()->ToString() + "\r\n"); + return res; + } + } + std::size_t GetTotalBinlogSize() { return total_size_; } + void Reset() { + win_.clear(); + total_size_ = 0; + } + + private: + // TODO(whoiami) ring buffer maybe + std::deque win_; + std::size_t total_size_ = 0; +}; + +// role master use +class SlaveNode : public RmNode { + public: + SlaveNode(const std::string& ip, int port, const std::string& db_name, int session_id); + ~SlaveNode() override; + void Lock() { slave_mu.lock(); } + void Unlock() { slave_mu.unlock(); } + SlaveState slave_state{kSlaveNotSync}; + + BinlogSyncState b_state{kNotSync}; + SyncWindow sync_win; + LogOffset sent_offset; + LogOffset acked_offset; + + std::string ToStringStatus(); + + std::shared_ptr binlog_reader; + pstd::Status InitBinlogFileReader(const std::shared_ptr& binlog, const BinlogOffset& offset); + pstd::Status Update(const LogOffset& start, const LogOffset& end, LogOffset* updated_offset); + + pstd::Mutex slave_mu; +}; + +#endif // PIKA_SLAVE_NODE_H diff --git a/tools/pika_migrate/include/pika_slaveping_thread.h b/tools/pika_migrate/include/pika_slaveping_thread.h index bc8e6a7ef9..a79200782e 100644 --- a/tools/pika_migrate/include/pika_slaveping_thread.h +++ b/tools/pika_migrate/include/pika_slaveping_thread.h @@ -8,17 +8,16 @@ #include -#include "slash/include/slash_status.h" -#include "pink/include/pink_cli.h" -#include "pink/include/pink_thread.h" +#include "net/include/net_cli.h" +#include "net/include/net_thread.h" +#include "pstd/include/pstd_status.h" -using slash::Status; +using pstd::Status; -class PikaSlavepingThread : public pink::Thread { +class PikaSlavepingThread : public net::Thread { public: - PikaSlavepingThread(int64_t sid) - : sid_(sid), is_first_send_(true) { - cli_ = pink::NewPbCli(); + PikaSlavepingThread(int64_t sid) : sid_(sid), is_first_send_(true) { + cli_ = net::NewPbCli(); cli_->set_connect_timeout(1500); set_thread_name("SlavePingThread"); }; @@ -32,12 +31,10 @@ class PikaSlavepingThread : public pink::Thread { Status RecvProc(); private: - int64_t sid_; - bool is_first_send_; - - int sockfd_; - pink::PinkCli *cli_; - + int64_t sid_ = 0; + bool is_first_send_ = true; + int sockfd_ = -1; + net::NetCli* cli_ = nullptr; virtual void* ThreadMain(); }; diff --git a/tools/pika_migrate/include/pika_slot.h b/tools/pika_migrate/include/pika_slot.h deleted file mode 100644 index 052f87269b..0000000000 --- a/tools/pika_migrate/include/pika_slot.h +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_SLOT_H_ -#define PIKA_SLOT_H_ - -#include "include/pika_command.h" - -class SlotsInfoCmd : public Cmd { - public: - SlotsInfoCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlotsInfoCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class SlotsHashKeyCmd : public Cmd { - public: - SlotsHashKeyCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlotsHashKeyCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class SlotsMgrtSlotAsyncCmd : public Cmd { - public: - SlotsMgrtSlotAsyncCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlotsMgrtSlotAsyncCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class SlotsMgrtTagSlotAsyncCmd : public Cmd { - public: - SlotsMgrtTagSlotAsyncCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), dest_port_(0), slot_num_(-1) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlotsMgrtTagSlotAsyncCmd(*this); - } - private: - virtual void DoInitial() override; - std::string dest_ip_; - int64_t dest_port_; - int64_t slot_num_; - virtual void Clear() { - dest_ip_.clear(); - dest_port_ = 0; - slot_num_ = -1; - } -}; - -class SlotsScanCmd : public Cmd { - public: - SlotsScanCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), pattern_("*"), count_(10) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlotsScanCmd(*this); - } - private: - int64_t cursor_; - uint32_t slotnum_; - std::string pattern_; - int64_t count_; - virtual void DoInitial() override; - virtual void Clear() { - pattern_ = "*"; - count_ = 10; - } -}; - -class SlotsDelCmd : public Cmd { - public: - SlotsDelCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlotsDelCmd(*this); - } - private: - std::vector slots_; - virtual void DoInitial() override; - virtual void Clear() { - slots_.clear(); - } -}; - -class SlotsMgrtExecWrapperCmd : public Cmd { - public: - SlotsMgrtExecWrapperCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlotsMgrtExecWrapperCmd(*this); - } - private: - std::string key_; - virtual void DoInitial() override; - virtual void Clear() { - key_.clear(); - } -}; - -class SlotsMgrtAsyncStatusCmd : public Cmd { - public: - SlotsMgrtAsyncStatusCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlotsMgrtAsyncStatusCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class SlotsMgrtAsyncCancelCmd : public Cmd { - public: - SlotsMgrtAsyncCancelCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlotsMgrtAsyncCancelCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class SlotsMgrtSlotCmd : public Cmd { - public: - SlotsMgrtSlotCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlotsMgrtSlotCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class SlotsMgrtTagSlotCmd : public Cmd { - public: - SlotsMgrtTagSlotCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlotsMgrtTagSlotCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class SlotsMgrtOneCmd : public Cmd { - public: - SlotsMgrtOneCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlotsMgrtOneCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class SlotsMgrtTagOneCmd : public Cmd { - public: - SlotsMgrtTagOneCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlotsMgrtTagOneCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -#endif // PIKA_SLOT_H_ diff --git a/tools/pika_migrate/include/pika_slot_command.h b/tools/pika_migrate/include/pika_slot_command.h new file mode 100644 index 0000000000..53937d6172 --- /dev/null +++ b/tools/pika_migrate/include/pika_slot_command.h @@ -0,0 +1,273 @@ +#ifndef PIKA_SLOT_COMMAND_H_ +#define PIKA_SLOT_COMMAND_H_ + +#include "include/pika_client_conn.h" +#include "include/pika_command.h" +#include "net/include/net_cli.h" +#include "net/include/net_thread.h" +#include "storage/storage.h" +#include "storage/src/base_data_key_format.h" +#include "strings.h" + +const std::string SlotKeyPrefix = "_internal:slotkey:4migrate:"; +const std::string SlotTagPrefix = "_internal:slottag:4migrate:"; + +const size_t MaxKeySendSize = 10 * 1024; + +int GetKeyType(const std::string& key, std::string &key_type, const std::shared_ptr& db); +void AddSlotKey(const std::string& type, const std::string& key, const std::shared_ptr& db); +void RemSlotKey(const std::string& key, const std::shared_ptr& db); +int DeleteKey(const std::string& key, const char key_type, const std::shared_ptr& db); +void RemSlotKeyByType(const std::string& type, const std::string& key, const std::shared_ptr& db); +std::string GetSlotKey(uint32_t slot); +std::string GetSlotsTagKey(uint32_t crc); + +class PikaMigrate { + public: + PikaMigrate(); + virtual ~PikaMigrate(); + + int MigrateKey(const std::string& host, const int port, int timeout, const std::string& key, const char type, + std::string& detail, const std::shared_ptr& db); + void CleanMigrateClient(); + + void Lock() { + mutex_.lock(); + } + int Trylock() { + return mutex_.try_lock(); + } + void Unlock() { + mutex_.unlock(); + } + net::NetCli* GetMigrateClient(const std::string& host, const int port, int timeout); + + private: + std::map migrate_clients_; + pstd::Mutex mutex_; + void KillMigrateClient(net::NetCli* migrate_cli); + void KillAllMigrateClient(); + int64_t TTLByType(const char key_type, const std::string& key, const std::shared_ptr& db); + int MigrateSend(net::NetCli* migrate_cli, const std::string& key, const char type, std::string& detail, + const std::shared_ptr& db); + bool MigrateRecv(net::NetCli* migrate_cli, int need_receive, std::string& detail); + int ParseKey(const std::string& key, const char type, std::string& wbuf_str, const std::shared_ptr& db); + int ParseKKey(const std::string& key, std::string& wbuf_str, const std::shared_ptr& db); + int ParseZKey(const std::string& key, std::string& wbuf_str, const std::shared_ptr& db); + int ParseSKey(const std::string& key, std::string& wbuf_str, const std::shared_ptr& db); + int ParseHKey(const std::string& key, std::string& wbuf_str, const std::shared_ptr& db); + int ParseLKey(const std::string& key, std::string& wbuf_str, const std::shared_ptr& db); + int ParseMKey(const std::string& key, std::string& wbuf_str, const std::shared_ptr& db); + bool SetTTL(const std::string& key, std::string& wbuf_str, int64_t ttl); +}; + +class SlotsMgrtTagSlotCmd : public Cmd { + public: + SlotsMgrtTagSlotCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsMgrtTagSlotCmd(*this); } + + private: + std::string dest_ip_; + int64_t dest_port_ = 0; + int64_t timeout_ms_ = 60; + int64_t slot_id_ = 0; + std::basic_string, std::allocator> key_; + void DoInitial() override; +}; + +class SlotsMgrtTagSlotAsyncCmd : public Cmd { + public: + SlotsMgrtTagSlotAsyncCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag){} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsMgrtTagSlotAsyncCmd(*this); } + + private: + std::string dest_ip_; + int64_t dest_port_ = 0; + int64_t timeout_ms_ = 60; + int64_t max_bulks_ = 0; + int64_t max_bytes_ = 0; + int64_t slot_id_ = 0; + int64_t keys_num_ = 0; + void DoInitial() override; +}; + +class SlotsMgrtTagOneCmd : public Cmd { + public: + SlotsMgrtTagOneCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsMgrtTagOneCmd(*this); } + + private: + std::string dest_ip_; + int64_t dest_port_ = 0; + int64_t timeout_ms_ = 60; + std::string key_; + int64_t slot_id_ = 0; + char key_type_ = '\0'; + void DoInitial() override; + int KeyTypeCheck(const std::shared_ptr& db); +}; + +class SlotsMgrtAsyncStatusCmd : public Cmd { + public: + SlotsMgrtAsyncStatusCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsMgrtAsyncStatusCmd(*this); } + + private: + void DoInitial() override; +}; + +class SlotsInfoCmd : public Cmd { + public: + SlotsInfoCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsInfoCmd(*this); } + + private: + void DoInitial() override; + + int64_t begin_ = 0; + int64_t end_ = 1024; +}; + +class SlotsMgrtAsyncCancelCmd : public Cmd { + public: + SlotsMgrtAsyncCancelCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsMgrtAsyncCancelCmd(*this); } + + private: + void DoInitial() override; +}; + +class SlotsDelCmd : public Cmd { + public: + SlotsDelCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsDelCmd(*this); } + + private: + std::vector slots_; + void DoInitial() override; +}; + +class SlotsHashKeyCmd : public Cmd { + public: + SlotsHashKeyCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsHashKeyCmd(*this); } + + private: + std::vector keys_; + void DoInitial() override; +}; + +class SlotsScanCmd : public Cmd { + public: + SlotsScanCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsScanCmd(*this); } + + private: + std::string key_; + std::string pattern_ = "*"; + int64_t cursor_ = 0; + int64_t count_ = 10; + void DoInitial() override; + void Clear() override { + pattern_ = "*"; + count_ = 10; + } +}; + +/* * +* SLOTSMGRT-EXEC-WRAPPER $hashkey $command [$arg1 ...] +* SLOTSMGRT-EXEC-WRAPPER $hashkey $command [$key1 $arg1 ...] +* SLOTSMGRT-EXEC-WRAPPER $hashkey $command [$key1 $arg1 ...] [$key2 $arg2 ...] +* */ +class SlotsMgrtExecWrapperCmd : public Cmd { + public: + SlotsMgrtExecWrapperCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsMgrtExecWrapperCmd(*this); } + + private: + std::string key_; + std::vector args; + void DoInitial() override; +}; + + +class SlotsReloadCmd : public Cmd { + public: + SlotsReloadCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsReloadCmd(*this); } + + private: + void DoInitial() override; +}; + +class SlotsReloadOffCmd : public Cmd { + public: + SlotsReloadOffCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsReloadOffCmd(*this); } + + private: + void DoInitial() override; +}; + +class SlotsCleanupCmd : public Cmd { + public: + SlotsCleanupCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsCleanupCmd(*this); } + std::vector cleanup_slots_; + + private: + void DoInitial() override; +}; + +class SlotsCleanupOffCmd : public Cmd { + public: + SlotsCleanupOffCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsCleanupOffCmd(*this); } + + private: + void DoInitial() override; +}; + +#endif diff --git a/tools/pika_migrate/include/pika_stable_log.h b/tools/pika_migrate/include/pika_stable_log.h new file mode 100644 index 0000000000..300e0d0fc5 --- /dev/null +++ b/tools/pika_migrate/include/pika_stable_log.h @@ -0,0 +1,63 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_STABLE_LOG_H_ +#define PIKA_STABLE_LOG_H_ + +#include +#include + +#include "include/pika_binlog.h" + +class StableLog : public std::enable_shared_from_this { + public: + StableLog(std::string table_name, std::string log_path); + ~StableLog(); + std::shared_ptr Logger() { return stable_logger_; } + void Leave(); + void SetFirstOffset(const LogOffset& offset) { + std::lock_guard l(offset_rwlock_); + first_offset_ = offset; + } + LogOffset first_offset() { + std::shared_lock l(offset_rwlock_); + return first_offset_; + } + // Need to hold binlog lock + pstd::Status TruncateTo(const LogOffset& offset); + + // Purgelogs use + bool PurgeStableLogs(uint32_t to = 0, bool manual = false); + void ClearPurge(); + bool GetBinlogFiles(std::map* binlogs); + pstd::Status PurgeFileAfter(uint32_t filenum); + + private: + void Close(); + void RemoveStableLogDir(); + void UpdateFirstOffset(uint32_t filenum); + /* + * Purgelogs use + */ + static void DoPurgeStableLogs(void* arg); + bool PurgeFiles(uint32_t to, bool manual); + std::atomic purging_; + + std::string db_name_; + std::string log_path_; + std::shared_ptr stable_logger_; + + std::shared_mutex offset_rwlock_; + LogOffset first_offset_; +}; + +struct PurgeStableLogArg { + std::shared_ptr logger; + uint32_t to = 0; + bool manual = false; + bool force = false; // Ignore the delete window +}; + +#endif // PIKA_STABLE_LOG_H_ diff --git a/tools/pika_migrate/include/pika_statistic.h b/tools/pika_migrate/include/pika_statistic.h new file mode 100644 index 0000000000..9ea824ca13 --- /dev/null +++ b/tools/pika_migrate/include/pika_statistic.h @@ -0,0 +1,67 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_STATISTIC_H_ +#define PIKA_STATISTIC_H_ + +#include +#include +#include +#include + +class QpsStatistic { + public: + QpsStatistic(); + QpsStatistic(const QpsStatistic& other); + ~QpsStatistic() = default; + void IncreaseQueryNum(bool is_write); + void ResetLastSecQuerynum(); + + std::atomic querynum; + std::atomic write_querynum; + + std::atomic last_querynum; + std::atomic last_write_querynum; + + std::atomic last_sec_querynum; + std::atomic last_sec_write_querynum; + + std::atomic last_time_us; +}; + +struct ServerStatistic { + ServerStatistic() = default; + ~ServerStatistic() = default; + + std::atomic accumulative_connections; + std::unordered_map> exec_count_db; + std::atomic keyspace_hits; + std::atomic keyspace_misses; + QpsStatistic qps; +}; + +struct Statistic { + Statistic(); + + QpsStatistic DBStat(const std::string& db_name); + std::unordered_map AllDBStat(); + + void UpdateDBQps(const std::string& db_name, const std::string& command, bool is_write); + void ResetDBLastSecQuerynum(); + + // statistic shows accumulated data of all tables + ServerStatistic server_stat; + + // statistic shows accumulated data of every single table + std::shared_mutex db_stat_rw; + std::unordered_map db_stat; +}; + +struct DiskStatistic { + std::atomic db_size_ = 0; + std::atomic log_size_ = 0; +}; + +#endif // PIKA_STATISTIC_H_ diff --git a/tools/pika_migrate/include/pika_stream.h b/tools/pika_migrate/include/pika_stream.h new file mode 100644 index 0000000000..bf61a96c6b --- /dev/null +++ b/tools/pika_migrate/include/pika_stream.h @@ -0,0 +1,163 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_STREAM_H_ +#define PIKA_STREAM_H_ + +#include "include/acl.h" +#include "include/pika_command.h" +#include "storage/src/redis_streams.h" +#include "storage/storage.h" + +/* + * stream + */ + +inline void ParseAddOrTrimArgsOrReply(CmdRes& res, const PikaCmdArgsType& argv, storage::StreamAddTrimArgs& args, + int* idpos, bool is_xadd); + +inline void ParseReadOrReadGroupArgsOrReply(CmdRes& res, const PikaCmdArgsType& argv, + storage::StreamReadGroupReadArgs& args, bool is_xreadgroup); + +// @field_values is the result of ScanStream. +// field is the serialized message id, +// value is the serialized message. +inline void AppendMessagesToRes(CmdRes& res, std::vector& field_values, const DB* db); + +class XAddCmd : public Cmd { + public: + XAddCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STREAM)){}; + std::vector current_key() const override { return {key_}; } + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new XAddCmd(*this); } + + private: + std::string key_; + storage::StreamAddTrimArgs args_; + int field_pos_{0}; + + void DoInitial() override; +}; + +class XDelCmd : public Cmd { + public: + XDelCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STREAM)){}; + std::vector current_key() const override { return {key_}; } + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new XDelCmd(*this); } + + private: + std::string key_; + std::vector ids_; + + void DoInitial() override; + void Clear() override { ids_.clear(); } +}; + +class XReadCmd : public Cmd { + public: + XReadCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STREAM)){}; + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new XReadCmd(*this); } + + private: + storage::StreamReadGroupReadArgs args_; + + void DoInitial() override; + void Clear() override { + args_.unparsed_ids.clear(); + args_.keys.clear(); + } +}; + +class XRangeCmd : public Cmd { + public: + XRangeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STREAM)){}; + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new XRangeCmd(*this); } + + protected: + std::string key_; + storage::StreamScanArgs args_; + + void DoInitial() override; +}; + +class XRevrangeCmd : public XRangeCmd { + public: + XRevrangeCmd(const std::string& name, int arity, uint32_t flag) : XRangeCmd(name, arity, flag){}; + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new XRevrangeCmd(*this); } +}; + +class XLenCmd : public Cmd { + public: + XLenCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STREAM)){}; + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new XLenCmd(*this); } + + private: + std::string key_; + + void DoInitial() override; +}; + +class XTrimCmd : public Cmd { + public: + XTrimCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag){}; + std::vector current_key() const override { return {key_}; } + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new XTrimCmd(*this); } + + private: + std::string key_; + storage::StreamAddTrimArgs args_; + + void DoInitial() override; +}; + +class XInfoCmd : public Cmd { + public: + XInfoCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STREAM)){}; + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new XInfoCmd(*this); } + + private: + std::string key_; + std::string cgroupname_; + std::string consumername_; + std::string subcmd_; + uint64_t count_{0}; + bool is_full_{false}; + + void DoInitial() override; + void StreamInfo(std::shared_ptr& db); + void GroupsInfo(std::shared_ptr& db); + void ConsumersInfo(std::shared_ptr& db); +}; + +#endif // PIKA_STREAM_H_ diff --git a/tools/pika_migrate/include/pika_table.h b/tools/pika_migrate/include/pika_table.h deleted file mode 100644 index adf6b62b6c..0000000000 --- a/tools/pika_migrate/include/pika_table.h +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (c) 2018-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_TABLE_H_ -#define PIKA_TABLE_H_ - -#include "blackwidow/blackwidow.h" - -#include "include/pika_command.h" -#include "include/pika_partition.h" - -class Table : public std::enable_shared_from_this
{ - public: - Table(const std::string& table_name, - uint32_t partition_num, - const std::string& db_path, - const std::string& log_path); - virtual ~Table(); - - friend class Cmd; - friend class InfoCmd; - friend class PkClusterInfoCmd; - friend class PikaServer; - - std::string GetTableName(); - void BgSaveTable(); - void CompactTable(const blackwidow::DataType& type); - bool FlushPartitionDB(); - bool FlushPartitionSubDB(const std::string& db_name); - bool IsBinlogIoError(); - uint32_t PartitionNum(); - - // Dynamic change partition - Status AddPartitions(const std::set& partition_ids); - Status RemovePartitions(const std::set& partition_ids); - - // KeyScan use; - void KeyScan(); - bool IsKeyScaning(); - void RunKeyScan(); - void StopKeyScan(); - void ScanDatabase(const blackwidow::DataType& type); - KeyScanInfo GetKeyScanInfo(); - Status GetPartitionsKeyScanInfo(std::map* infos); - - // Compact use; - void Compact(const blackwidow::DataType& type); - - void LeaveAllPartition(); - std::set GetPartitionIds(); - std::shared_ptr GetPartitionById(uint32_t partition_id); - std::shared_ptr GetPartitionByKey(const std::string& key); - - private: - std::string table_name_; - uint32_t partition_num_; - std::string db_path_; - std::string log_path_; - - // lock order - // partitions_rw_ > key_scan_protector_ - - pthread_rwlock_t partitions_rw_; - std::map> partitions_; - - /* - * KeyScan use - */ - static void DoKeyScan(void *arg); - void InitKeyScan(); - slash::Mutex key_scan_protector_; - KeyScanInfo key_scan_info_; - - /* - * No allowed copy and copy assign - */ - Table(const Table&); - void operator=(const Table&); -}; - -struct BgTaskArg { - std::shared_ptr
table; - std::shared_ptr partition; -}; - - -#endif diff --git a/tools/pika_migrate/include/pika_transaction.h b/tools/pika_migrate/include/pika_transaction.h new file mode 100644 index 0000000000..f772ef4e90 --- /dev/null +++ b/tools/pika_migrate/include/pika_transaction.h @@ -0,0 +1,107 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_TRANSACTION_H_ +#define PIKA_TRANSACTION_H_ + +#include "acl.h" +#include "include/pika_command.h" +#include "net/include/redis_conn.h" +#include "pika_db.h" +#include "storage/storage.h" + +class MultiCmd : public Cmd { + public: + MultiCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::TRANSACTION)) {} + void Do() override; + Cmd* Clone() override { return new MultiCmd(*this); } + void Split(const HintKeys& hint_keys) override {} + void Merge() override {} + + private: + void DoInitial() override; +}; + +class ExecCmd : public Cmd { + public: + ExecCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::TRANSACTION)) {} + void Do() override; + Cmd* Clone() override { return new ExecCmd(*this); } + void Split(const HintKeys& hint_keys) override {} + void Merge() override {} + std::vector current_key() const override { return {}; } + void Execute() override; + private: + struct CmdInfo { + public: + CmdInfo(std::shared_ptr cmd, std::shared_ptr db, + std::shared_ptr sync_db) : cmd_(cmd), db_(db), sync_db_(sync_db) {} + std::shared_ptr cmd_; + std::shared_ptr db_; + std::shared_ptr sync_db_; + }; + void DoInitial() override; + void Lock(); + void Unlock(); + bool IsTxnFailedAndSetState(); + void SetCmdsVec(); + void ServeToBLrPopWithKeys(); + std::unordered_set> lock_db_{}; + std::unordered_map, std::vector> lock_db_keys_{}; + std::unordered_set> r_lock_dbs_ {}; + bool is_lock_rm_dbs_{false}; // g_pika_rm->dbs_rw_; + std::vector cmds_; + std::vector list_cmd_; + std::vector keys_; +}; + +class DiscardCmd : public Cmd { + public: + DiscardCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::TRANSACTION)) {} + void Do() override; + Cmd* Clone() override { return new DiscardCmd(*this); } + void Split(const HintKeys& hint_keys) override {} + void Merge() override {} + + private: + void DoInitial() override; +}; + +class WatchCmd : public Cmd { + public: + WatchCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::TRANSACTION)) {} + + void Do() override; + void Split(const HintKeys& hint_keys) override {} + Cmd* Clone() override { return new WatchCmd(*this); } + void Merge() override {} + std::vector current_key() const override { return keys_; } + void Execute() override; + + private: + void DoInitial() override; + std::vector keys_; + std::vector db_keys_; // cause the keys watched may cross different dbs, so add dbname as keys prefix +}; + +class UnwatchCmd : public Cmd { + public: + UnwatchCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::TRANSACTION)) {} + + void Do() override; + Cmd* Clone() override { return new UnwatchCmd(*this); } + void Split(const HintKeys& hint_keys) override {} + void Merge() override {} + + private: + void DoInitial() override; +}; + +#endif // PIKA_TRANSACTION_H_ diff --git a/tools/pika_migrate/include/pika_version.h b/tools/pika_migrate/include/pika_version.h index c0c6a2b617..9c0b2a1732 100644 --- a/tools/pika_migrate/include/pika_version.h +++ b/tools/pika_migrate/include/pika_version.h @@ -6,8 +6,8 @@ #ifndef INCLUDE_PIKA_VERSION_H_ #define INCLUDE_PIKA_VERSION_H_ -#define PIKA_MAJOR 3 -#define PIKA_MINOR 2 -#define PIKA_PATCH 7 +#define PIKA_MAJOR 4 +#define PIKA_MINOR 0 +#define PIKA_PATCH 0 #endif // INCLUDE_PIKA_VERSION_H_ diff --git a/tools/pika_migrate/include/pika_zset.h b/tools/pika_migrate/include/pika_zset.h index d4ab4ca6ea..a74ee026fc 100644 --- a/tools/pika_migrate/include/pika_zset.h +++ b/tools/pika_migrate/include/pika_zset.h @@ -6,69 +6,78 @@ #ifndef PIKA_ZSET_H_ #define PIKA_ZSET_H_ -#include "blackwidow/blackwidow.h" - +#include "storage/storage.h" +#include "include/acl.h" #include "include/pika_command.h" -#include "include/pika_partition.h" +#include "pika_kv.h" /* * zset */ class ZAddCmd : public Cmd { public: - ZAddCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + ZAddCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZAddCmd(*this); - } + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZAddCmd(*this); } + private: std::string key_; - std::vector score_members; - virtual void DoInitial() override; + std::vector score_members; + rocksdb::Status s_; + void DoInitial() override; }; class ZCardCmd : public Cmd { public: - ZCardCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + ZCardCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZCardCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZCardCmd(*this); } + private: std::string key_; - virtual void DoInitial() override; + void DoInitial() override; }; class ZScanCmd : public Cmd { public: - ZScanCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), pattern_("*"), count_(10) {} - virtual std::vector current_key() const { + ZScanCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)), pattern_("*") {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZScanCmd(*this); - } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new ZScanCmd(*this); } + private: - std::string key_, pattern_; - int64_t cursor_, count_; - virtual void DoInitial() override; - virtual void Clear() { + std::string key_, pattern_ = "*"; + int64_t cursor_ = 0, count_ = 10; + void DoInitial() override; + void Clear() override { pattern_ = "*"; count_ = 10; } @@ -76,82 +85,104 @@ class ZScanCmd : public Cmd { class ZIncrbyCmd : public Cmd { public: - ZIncrbyCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + ZIncrbyCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZIncrbyCmd(*this); - } + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZIncrbyCmd(*this); } + double Score() { return score_; } + private: std::string key_, member_; - double by_; - virtual void DoInitial() override; + double by_ = .0f; + double score_ = .0f; + void DoInitial() override; }; class ZsetRangeParentCmd : public Cmd { public: - ZsetRangeParentCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), is_ws_(false) {} + ZsetRangeParentCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + protected: std::string key_; - int64_t start_, stop_; - bool is_ws_; - virtual void DoInitial() override; - virtual void Clear() { - is_ws_ = false; - } + int64_t start_ = 0; + int64_t stop_ = -1; + bool is_ws_ = false; + void DoInitial() override; + void Clear() override { is_ws_ = false; } }; class ZRangeCmd : public ZsetRangeParentCmd { public: - ZRangeCmd(const std::string& name, int arity, uint16_t flag) - : ZsetRangeParentCmd(name, arity, flag) {} - virtual std::vector current_key() const { + ZRangeCmd(const std::string& name, int arity, uint32_t flag) : ZsetRangeParentCmd(name, arity, flag) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZRangeCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZRangeCmd(*this); } + private: - virtual void DoInitial() override; + rocksdb::Status s_; + void DoInitial() override; }; class ZRevrangeCmd : public ZsetRangeParentCmd { public: - ZRevrangeCmd(const std::string& name, int arity, uint16_t flag) - : ZsetRangeParentCmd(name, arity, flag) {} - virtual std::vector current_key() const { + ZRevrangeCmd(const std::string& name, int arity, uint32_t flag) : ZsetRangeParentCmd(name, arity, flag) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZRevrangeCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZRevrangeCmd(*this); } + private: - virtual void DoInitial() override; + rocksdb::Status s_; + void DoInitial() override; }; class ZsetRangebyscoreParentCmd : public Cmd { public: - ZsetRangebyscoreParentCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), left_close_(true), right_close_(true), with_scores_(false), offset_(0), count_(-1) {} + ZsetRangebyscoreParentCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + + double MinScore() { return min_score_; } + double MaxScore() { return max_score_; } + bool LeftClose() { return left_close_; } + bool RightClose() { return right_close_; } + int64_t Offset() { return offset_; } + int64_t Count() { return count_; } + protected: std::string key_; - double min_score_, max_score_; - bool left_close_, right_close_, with_scores_; - int64_t offset_, count_; - virtual void DoInitial() override; - virtual void Clear() { + std::string min_, max_; + double min_score_ = 0, max_score_ = 0; + bool left_close_ = true, right_close_ = true, with_scores_ = false; + int64_t offset_ = 0, count_ = -1; + void DoInitial() override; + void Clear() override { left_close_ = right_close_ = true; with_scores_ = false; offset_ = 0; @@ -161,57 +192,76 @@ class ZsetRangebyscoreParentCmd : public Cmd { class ZRangebyscoreCmd : public ZsetRangebyscoreParentCmd { public: - ZRangebyscoreCmd(const std::string& name, int arity, uint16_t flag) - : ZsetRangebyscoreParentCmd(name, arity, flag) {} - virtual std::vector current_key() const { + ZRangebyscoreCmd(const std::string& name, int arity, uint32_t flag) : ZsetRangebyscoreParentCmd(name, arity, flag) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZRangebyscoreCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZRangebyscoreCmd(*this); } + private: - virtual void DoInitial() override; + rocksdb::Status s_; + void DoInitial() override; }; class ZRevrangebyscoreCmd : public ZsetRangebyscoreParentCmd { public: - ZRevrangebyscoreCmd(const std::string& name, int arity, uint16_t flag) + ZRevrangebyscoreCmd(const std::string& name, int arity, uint32_t flag) : ZsetRangebyscoreParentCmd(name, arity, flag) {} - virtual std::vector current_key() const { + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZRevrangebyscoreCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZRevrangebyscoreCmd(*this); } + private: - virtual void DoInitial() override; + rocksdb::Status s_; + void DoInitial() override; }; class ZCountCmd : public Cmd { public: - ZCountCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), left_close_(true), right_close_(true) {} - virtual std::vector current_key() const { + ZCountCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZCountCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZCountCmd(*this); } + double MinScore() { return min_score_; } + double MaxScore() { return max_score_; } + bool LeftClose() { return left_close_; } + bool RightClose() { return right_close_; } + private: std::string key_; - double min_score_, max_score_; - bool left_close_, right_close_; - virtual void DoInitial() override; - virtual void Clear() { + std::string min_ , max_; + double min_score_ = 0, max_score_ = 0; + bool left_close_ = true, right_close_ = true; + rocksdb::Status s_; + void DoInitial() override; + void Clear() override { left_close_ = true; right_close_ = true; } @@ -219,135 +269,180 @@ class ZCountCmd : public Cmd { class ZRemCmd : public Cmd { public: - ZRemCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + ZRemCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZRemCmd(*this); - } + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZRemCmd(*this); } + private: std::string key_; std::vector members_; - virtual void DoInitial() override; + int32_t deleted_ = 0; + rocksdb::Status s_; + void DoInitial() override; }; class ZsetUIstoreParentCmd : public Cmd { public: - ZsetUIstoreParentCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), aggregate_(blackwidow::SUM) {} + ZsetUIstoreParentCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) { + zadd_cmd_ = std::make_unique(kCmdNameZAdd, -4, kCmdFlagsWrite | kCmdFlagsZset); + } + ZsetUIstoreParentCmd(const ZsetUIstoreParentCmd& other) + : Cmd(other), + dest_key_(other.dest_key_), + num_keys_(other.num_keys_), + aggregate_(other.aggregate_), + keys_(other.keys_), + weights_(other.weights_) { + zadd_cmd_ = std::make_unique(kCmdNameZAdd, -4, kCmdFlagsWrite | kCmdFlagsZset); + } + + std::vector current_key() const override { return {dest_key_}; } + protected: std::string dest_key_; - int64_t num_keys_; - blackwidow::AGGREGATE aggregate_; + int64_t num_keys_ = 0; + storage::AGGREGATE aggregate_{storage::SUM}; std::vector keys_; std::vector weights_; - virtual void DoInitial() override; - virtual void Clear() { - aggregate_ = blackwidow::SUM; - } + void DoInitial() override; + void Clear() override { aggregate_ = storage::SUM; } + // used for write binlog + std::shared_ptr zadd_cmd_; }; class ZUnionstoreCmd : public ZsetUIstoreParentCmd { public: - ZUnionstoreCmd(const std::string& name, int arity, uint16_t flag) - : ZsetUIstoreParentCmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZUnionstoreCmd(*this); - } + ZUnionstoreCmd(const std::string& name, int arity, uint32_t flag) : ZsetUIstoreParentCmd(name, arity, flag) {} + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZUnionstoreCmd(*this); } + private: - virtual void DoInitial() override; + void DoInitial() override; + // used for write binlog + std::map value_to_dest_; + rocksdb::Status s_; + void DoBinlog() override; }; class ZInterstoreCmd : public ZsetUIstoreParentCmd { public: - ZInterstoreCmd(const std::string& name, int arity, uint16_t flag) - : ZsetUIstoreParentCmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZInterstoreCmd(*this); - } + ZInterstoreCmd(const std::string& name, int arity, uint32_t flag) : ZsetUIstoreParentCmd(name, arity, flag) {} + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZInterstoreCmd(*this); } + void DoBinlog() override; + private: - virtual void DoInitial() override; + void DoInitial() override; + rocksdb::Status s_; + // used for write binlog + std::vector value_to_dest_; }; class ZsetRankParentCmd : public Cmd { public: - ZsetRankParentCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} + ZsetRankParentCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + protected: std::string key_, member_; - virtual void DoInitial() override; + void DoInitial() override; }; class ZRankCmd : public ZsetRankParentCmd { public: - ZRankCmd(const std::string& name, int arity, uint16_t flag) - : ZsetRankParentCmd(name, arity, flag) {} - virtual std::vector current_key() const { + ZRankCmd(const std::string& name, int arity, uint32_t flag) : ZsetRankParentCmd(name, arity, flag) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZRankCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZRankCmd(*this); } + private: - virtual void DoInitial() override; + rocksdb::Status s_; + void DoInitial() override; }; class ZRevrankCmd : public ZsetRankParentCmd { public: - ZRevrankCmd(const std::string& name, int arity, uint16_t flag) - : ZsetRankParentCmd(name, arity, flag) {} - virtual std::vector current_key() const { + ZRevrankCmd(const std::string& name, int arity, uint32_t flag) : ZsetRankParentCmd(name, arity, flag) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZRevrankCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZRevrankCmd(*this); } + private: - virtual void DoInitial() override; + rocksdb::Status s_; + void DoInitial() override; }; class ZScoreCmd : public ZsetRankParentCmd { public: - ZScoreCmd(const std::string& name, int arity, uint16_t flag) - : ZsetRankParentCmd(name, arity, flag) {} - virtual std::vector current_key() const { + ZScoreCmd(const std::string& name, int arity, uint32_t flag) : ZsetRankParentCmd(name, arity, flag) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZScoreCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZScoreCmd(*this); } + private: std::string key_, member_; - virtual void DoInitial() override; + rocksdb::Status s_; + void DoInitial() override; }; - class ZsetRangebylexParentCmd : public Cmd { public: - ZsetRangebylexParentCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), left_close_(true), right_close_(true), offset_(0), count_(-1) {} + ZsetRangebylexParentCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + protected: std::string key_, min_member_, max_member_; - bool left_close_, right_close_; - int64_t offset_, count_; - virtual void DoInitial() override; - virtual void Clear() { + std::string min_, max_; + bool left_close_ = true, right_close_ = true; + int64_t offset_ = 0, count_ = -1; + void DoInitial() override; + void Clear() override { left_close_ = right_close_ = true; offset_ = 0; count_ = -1; @@ -356,161 +451,184 @@ class ZsetRangebylexParentCmd : public Cmd { class ZRangebylexCmd : public ZsetRangebylexParentCmd { public: - ZRangebylexCmd(const std::string& name, int arity, uint16_t flag) - : ZsetRangebylexParentCmd(name, arity, flag) {} - virtual std::vector current_key() const { + ZRangebylexCmd(const std::string& name, int arity, uint32_t flag) : ZsetRangebylexParentCmd(name, arity, flag) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZRangebylexCmd(*this); - } - private: - virtual void DoInitial() override; + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZRangebylexCmd(*this); } + + private: + rocksdb::Status s_; + void DoInitial() override; }; class ZRevrangebylexCmd : public ZsetRangebylexParentCmd { public: - ZRevrangebylexCmd(const std::string& name, int arity, uint16_t flag) - : ZsetRangebylexParentCmd(name, arity, flag) {} - virtual std::vector current_key() const { + ZRevrangebylexCmd(const std::string& name, int arity, uint32_t flag) : ZsetRangebylexParentCmd(name, arity, flag) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZRevrangebylexCmd(*this); - } - private: - virtual void DoInitial() override; + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZRevrangebylexCmd(*this); } + + private: + void DoInitial() override; + rocksdb::Status s_; }; class ZLexcountCmd : public Cmd { public: - ZLexcountCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), left_close_(true), right_close_(true) {} - virtual std::vector current_key() const { + ZLexcountCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZLexcountCmd(*this); - } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZLexcountCmd(*this); } + private: std::string key_, min_member_, max_member_; - bool left_close_, right_close_; - virtual void DoInitial() override; - virtual void Clear() { - left_close_ = right_close_ = true; - } + std::string min_, max_; + bool left_close_ = true, right_close_ = true; + rocksdb::Status s_; + void DoInitial() override; + void Clear() override { left_close_ = right_close_ = true; } }; class ZRemrangebyrankCmd : public Cmd { public: - ZRemrangebyrankCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + ZRemrangebyrankCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZRemrangebyrankCmd(*this); - } + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZRemrangebyrankCmd(*this); } + private: - std::string key_; - int64_t start_rank_, stop_rank_; - virtual void DoInitial() override; + std::string key_, min_, max_; + int64_t start_rank_ = 0, stop_rank_ = -1; + int32_t ele_deleted_; + rocksdb::Status s_; + void DoInitial() override; }; class ZRemrangebyscoreCmd : public Cmd { public: - ZRemrangebyscoreCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), left_close_(true), right_close_(true) {} - virtual std::vector current_key() const { + ZRemrangebyscoreCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZRemrangebyscoreCmd(*this); - } + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZRemrangebyscoreCmd(*this); } + private: - std::string key_; - double min_score_, max_score_; - bool left_close_, right_close_; - virtual void DoInitial() override; - virtual void Clear() { - left_close_ = right_close_ = true; - } + std::string key_, min_, max_; + double min_score_ = 0, max_score_ = 0; + bool left_close_ = true, right_close_ = true; + rocksdb::Status s_; + void DoInitial() override; + void Clear() override { left_close_ = right_close_ = true; } }; class ZRemrangebylexCmd : public Cmd { public: - ZRemrangebylexCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), left_close_(true), right_close_(true) {} - virtual std::vector current_key() const { + ZRemrangebylexCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); return res; } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZRemrangebylexCmd(*this); - } + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZRemrangebylexCmd(*this); } + private: - std::string key_; + std::string key_, min_, max_; std::string min_member_, max_member_; - bool left_close_, right_close_; - virtual void DoInitial() override; - virtual void Clear() { - left_close_ = right_close_ = true; - } + bool left_close_ = true, right_close_ = true; + rocksdb::Status s_; + void DoInitial() override; + void Clear() override { left_close_ = right_close_ = true; } }; class ZPopmaxCmd : public Cmd { public: - ZPopmaxCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + ZPopmaxCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + std::vector current_key() const override { std::vector res; res.emplace_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZPopmaxCmd(*this); + return res; } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new ZPopmaxCmd(*this); } + private: - virtual void DoInitial() override; + void DoInitial() override; std::string key_; - int64_t count_; + int64_t count_ = 0; }; class ZPopminCmd : public Cmd { public: - ZPopminCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { + ZPopminCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + std::vector current_key() const override { std::vector res; res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZPopminCmd(*this); + return res; } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new ZPopminCmd(*this); } + private: - virtual void DoInitial() override; + void DoInitial() override; std::string key_; - int64_t count_; + int64_t count_ = 0; }; #endif diff --git a/tools/pika_migrate/include/redis_sender.h b/tools/pika_migrate/include/redis_sender.h index aa905e3d68..0b84c5f0f1 100644 --- a/tools/pika_migrate/include/redis_sender.h +++ b/tools/pika_migrate/include/redis_sender.h @@ -7,11 +7,11 @@ #include #include -#include "pink/include/bg_thread.h" -#include "pink/include/pink_cli.h" -#include "pink/include/redis_cli.h" +#include "pika_repl_bgworker.h" +#include "net/include/net_cli.h" +#include "net/include/redis_cli.h" -class RedisSender : public pink::Thread { +class RedisSender : public net::Thread { public: RedisSender(int id, std::string ip, int64_t port, std::string password); virtual ~RedisSender(); @@ -25,13 +25,18 @@ class RedisSender : public pink::Thread { private: int SendCommand(std::string &command); void ConnectRedis(); + size_t commandQueueSize() { + std::lock_guard l(keys_mutex_); + return commands_queue_.size(); + } private: int id_; - pink::PinkCli *cli_; - slash::CondVar rsignal_; - slash::CondVar wsignal_; - slash::Mutex commands_mutex_; + std::shared_ptr cli_; + pstd::CondVar rsignal_; + pstd::CondVar wsignal_; + pstd::Mutex signal_mutex_; + pstd::Mutex keys_mutex_; std::queue commands_queue_; std::string ip_; int port_; diff --git a/tools/pika_migrate/include/rsync_client.h b/tools/pika_migrate/include/rsync_client.h new file mode 100644 index 0000000000..657407218f --- /dev/null +++ b/tools/pika_migrate/include/rsync_client.h @@ -0,0 +1,247 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef RSYNC_CLIENT_H_ +#define RSYNC_CLIENT_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "net/include/bg_thread.h" +#include "net/include/net_cli.h" +#include "pstd/include/env.h" +#include "pstd/include/pstd_status.h" +#include "pstd/include/pstd_hash.h" +#include "pstd/include/pstd_string.h" +#include "pstd/include/pstd_status.h" +#include "include/pika_define.h" +#include "include/rsync_client_thread.h" +#include "include/throttle.h" +#include "rsync_service.pb.h" + +extern std::unique_ptr g_pika_conf; + +const std::string kDumpMetaFileName = "DUMP_META_DATA"; +const std::string kUuidPrefix = "snapshot-uuid:"; +const size_t kInvalidOffset = 0xFFFFFFFF; + +namespace rsync { + +class RsyncWriter; +class Session; +class WaitObject; +class WaitObjectManager; + +using pstd::Status; + +using ResponseSPtr = std::shared_ptr; +class RsyncClient : public net::Thread { + public: + enum State { + IDLE, + RUNNING, + STOP, + }; + RsyncClient(const std::string& dir, const std::string& db_name); + void* ThreadMain() override; + void Copy(const std::set& file_set, int index); + bool Init(); + int GetParallelNum(); + Status Start(); + Status Stop(); + bool IsRunning() { + return state_.load() == RUNNING; + } + bool IsExitedFromRunning() { + return state_.load() == STOP && all_worker_exited_.load(); + } + bool IsStop() { + return state_.load() == STOP; + } + bool IsIdle() { return state_.load() == IDLE;} + void OnReceive(RsyncService::RsyncResponse* resp); +private: + bool ComparisonUpdate(); + Status CopyRemoteFile(const std::string& filename, int index); + Status PullRemoteMeta(std::string* snapshot_uuid, std::set* file_set); + Status LoadLocalMeta(std::string* snapshot_uuid, std::map* file_map); + std::string GetLocalMetaFilePath(); + Status FlushMetaTable(); + Status CleanUpExpiredFiles(bool need_reset_path, const std::set& files); + Status UpdateLocalMeta(const std::string& snapshot_uuid, const std::set& expired_files, + std::map* localFileMap); + void HandleRsyncMetaResponse(RsyncService::RsyncResponse* response); + +private: + typedef std::unique_ptr NetThreadUPtr; + std::map meta_table_; + std::set file_set_; + std::string snapshot_uuid_; + std::string dir_; + std::string db_name_; + + NetThreadUPtr client_thread_; + std::vector work_threads_; + std::atomic finished_work_cnt_ = 0; + + std::atomic state_; + std::atomic error_stopped_{false}; + std::atomic all_worker_exited_{true}; + int max_retries_ = 10; + std::unique_ptr wo_mgr_; + std::condition_variable cond_; + std::mutex mu_; + + + std::string master_ip_; + int master_port_; + int parallel_num_; +}; + +class RsyncWriter { + public: + RsyncWriter(const std::string& filepath) { + filepath_ = filepath; + fd_ = open(filepath.c_str(), O_RDWR | O_APPEND | O_CREAT, 0644); + } + ~RsyncWriter() {} + Status Write(uint64_t offset, size_t n, const char* data) { + const char* ptr = data; + size_t left = n; + Status s; + while (left != 0) { + ssize_t done = write(fd_, ptr, left); + if (done < 0) { + if (errno == EINTR) { + continue; + } + LOG(WARNING) << "pwrite failed, filename: " << filepath_ << "errno: " << strerror(errno) << "n: " << n; + return Status::IOError(filepath_, "pwrite failed"); + } + left -= done; + ptr += done; + offset += done; + } + return Status::OK(); + } + Status Close() { + close(fd_); + return Status::OK(); + } + Status Fsync() { + fsync(fd_); + return Status::OK(); + } + + private: + std::string filepath_; + int fd_ = -1; +}; + +class WaitObject { + public: + WaitObject() : filename_(""), type_(RsyncService::kRsyncMeta), offset_(0), resp_(nullptr) {} + ~WaitObject() {} + + void Reset(const std::string& filename, RsyncService::Type t, size_t offset) { + std::lock_guard guard(mu_); + resp_.reset(); + filename_ = filename; + type_ = t; + offset_ = offset; + } + + pstd::Status Wait(ResponseSPtr& resp) { + auto timeout = g_pika_conf->rsync_timeout_ms(); + std::unique_lock lock(mu_); + auto cv_s = cond_.wait_for(lock, std::chrono::milliseconds(timeout), [this] { + return resp_.get() != nullptr; + }); + if (!cv_s) { + std::string timout_info("timeout during(in ms) is "); + timout_info.append(std::to_string(timeout)); + return pstd::Status::Timeout("rsync timeout", timout_info); + } + resp = resp_; + return pstd::Status::OK(); + } + + void WakeUp(RsyncService::RsyncResponse* resp) { + std::unique_lock lock(mu_); + resp_.reset(resp); + offset_ = kInvalidOffset; + cond_.notify_all(); + } + + std::string Filename() {return filename_;} + RsyncService::Type Type() {return type_;} + size_t Offset() {return offset_;} + private: + std::string filename_; + RsyncService::Type type_; + size_t offset_ = kInvalidOffset; + ResponseSPtr resp_ = nullptr; + std::condition_variable cond_; + std::mutex mu_; +}; + +class WaitObjectManager { + public: + WaitObjectManager() { + wo_vec_.resize(kMaxRsyncParallelNum); + for (int i = 0; i < kMaxRsyncParallelNum; i++) { + wo_vec_[i] = new WaitObject(); + } + } + ~WaitObjectManager() { + for (int i = 0; i < wo_vec_.size(); i++) { + delete wo_vec_[i]; + wo_vec_[i] = nullptr; + } + } + + WaitObject* UpdateWaitObject(int worker_index, const std::string& filename, + RsyncService::Type type, size_t offset) { + std::lock_guard guard(mu_); + wo_vec_[worker_index]->Reset(filename, type, offset); + return wo_vec_[worker_index]; + } + + void WakeUp(RsyncService::RsyncResponse* resp) { + std::lock_guard guard(mu_); + int index = resp->reader_index(); + if (wo_vec_[index] == nullptr || resp->type() != wo_vec_[index]->Type()) { + delete resp; + return; + } + if (resp->code() != RsyncService::kOk) { + LOG(WARNING) << "rsync response error"; + wo_vec_[index]->WakeUp(resp); + return; + } + + if (resp->type() == RsyncService::kRsyncFile && + ((resp->file_resp().filename() != wo_vec_[index]->Filename()) || + (resp->file_resp().offset() != wo_vec_[index]->Offset()))) { + delete resp; + return; + } + wo_vec_[index]->WakeUp(resp); + } + private: + std::vector wo_vec_; + std::mutex mu_; +}; + +} // end namespace rsync +#endif diff --git a/tools/pika_migrate/include/rsync_client_thread.h b/tools/pika_migrate/include/rsync_client_thread.h new file mode 100644 index 0000000000..19bebcb56d --- /dev/null +++ b/tools/pika_migrate/include/rsync_client_thread.h @@ -0,0 +1,55 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef RSYNC_CLIENT_THREAD_H_ +#define RSYNC_CLIENT_THREAD_H_ + +#include "net/include/client_thread.h" +#include "net/include/net_conn.h" +#include "net/include/pb_conn.h" +#include "rsync_service.pb.h" + +using namespace pstd; +using namespace net; + +namespace rsync { + +class RsyncClientConn : public PbConn { + public: + RsyncClientConn(int fd, const std::string& ip_port, + net::Thread* thread, void* cb_handler, + NetMultiplexer* mpx); + ~RsyncClientConn() override; + int DealMessage() override; + + private: + void* cb_handler_ = nullptr; +}; + +class RsyncClientConnFactory : public ConnFactory { + public: + RsyncClientConnFactory(void* scheduler) : cb_handler_(scheduler) {} + std::shared_ptr NewNetConn(int connfd, const std::string& ip_port, + net::Thread* thread, void* cb_handler, + net::NetMultiplexer* net) const override { + return std::static_pointer_cast( + std::make_shared(connfd, ip_port, thread, cb_handler_, net)); + } + private: + void* cb_handler_ = nullptr; +}; + +class RsyncClientThread : public ClientThread { + public: + RsyncClientThread(int cron_interval, int keepalive_timeout, void* scheduler); + ~RsyncClientThread() override; + private: + RsyncClientConnFactory conn_factory_; + ClientHandle handle_; +}; + +} //end namespace rsync +#endif + diff --git a/tools/pika_migrate/include/rsync_server.h b/tools/pika_migrate/include/rsync_server.h new file mode 100644 index 0000000000..560585f3c8 --- /dev/null +++ b/tools/pika_migrate/include/rsync_server.h @@ -0,0 +1,187 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef RSYNC_SERVER_H_ +#define RSYNC_SERVER_H_ + +#include +#include +#include +#include + +#include "net/include/net_conn.h" +#include "net/include/net_thread.h" +#include "net/include/pb_conn.h" +#include "net/include/server_thread.h" +#include "net/include/thread_pool.h" +#include "net/src/holy_thread.h" +#include "net/src/net_multiplexer.h" +#include "pstd/include/env.h" +#include "pstd_hash.h" +#include "rsync_service.pb.h" + +namespace rsync { +class RsyncServerConn; +struct RsyncServerTaskArg { + std::shared_ptr req; + std::shared_ptr conn; + RsyncServerTaskArg(std::shared_ptr _req, std::shared_ptr _conn) + : req(std::move(_req)), conn(std::move(_conn)) {} +}; +class RsyncReader; +class RsyncServerThread; + +class RsyncServer { + public: + RsyncServer(const std::set& ips, const int port); + ~RsyncServer(); + void Schedule(net::TaskFunc func, void* arg); + int Start(); + int Stop(); + private: + std::unique_ptr work_thread_; + std::unique_ptr rsync_server_thread_; +}; + +class RsyncServerConn : public net::PbConn { + public: + RsyncServerConn(int connfd, const std::string& ip_port, + net::Thread* thread, void* worker_specific_data, + net::NetMultiplexer* mpx); + virtual ~RsyncServerConn() override; + int DealMessage() override; + static void HandleMetaRsyncRequest(void* arg); + static void HandleFileRsyncRequest(void* arg); + private: + std::vector > readers_; + std::mutex mu_; + void* data_ = nullptr; +}; + +class RsyncServerThread : public net::HolyThread { + public: + RsyncServerThread(const std::set& ips, int port, int cron_internal, RsyncServer* arg); + ~RsyncServerThread(); + + private: + class RsyncServerConnFactory : public net::ConnFactory { + public: + explicit RsyncServerConnFactory(RsyncServer* sched) : scheduler_(sched) {} + + std::shared_ptr NewNetConn(int connfd, const std::string& ip_port, + net::Thread* thread, void* worker_specific_data, + net::NetMultiplexer* net) const override { + return std::static_pointer_cast( + std::make_shared(connfd, ip_port, thread, scheduler_, net)); + } + private: + RsyncServer* scheduler_ = nullptr; + }; + class RsyncServerHandle : public net::ServerHandle { + public: + void FdClosedHandle(int fd, const std::string& ip_port) const override; + void FdTimeoutHandle(int fd, const std::string& ip_port) const override; + bool AccessHandle(int fd, std::string& ip) const override; + void CronHandle() const override; + }; + private: + RsyncServerConnFactory conn_factory_; + RsyncServerHandle handle_; +}; + +class RsyncReader { + public: + RsyncReader() { + block_data_ = new char[kBlockSize]; + } + ~RsyncReader() { + if (!filepath_.empty()) { + Reset(); + } + delete []block_data_; + } + pstd::Status Read(const std::string filepath, const size_t offset, + const size_t count, char* data, size_t* bytes_read, + std::string* checksum, bool* is_eof) { + std::lock_guard guard(mu_); + pstd::Status s = readAhead(filepath, offset); + if (!s.ok()) { + return s; + } + size_t offset_in_block = offset % kBlockSize; + size_t copy_count = count > (end_offset_ - offset) ? end_offset_ - offset : count; + memcpy(data, block_data_ + offset_in_block, copy_count); + *bytes_read = copy_count; + *is_eof = (offset + copy_count == total_size_); + return pstd::Status::OK(); + } + +private: + pstd::Status readAhead(const std::string filepath, const size_t offset) { + if (filepath == filepath_ && offset >= start_offset_ && offset < end_offset_) { + return pstd::Status::OK(); + } + if (filepath != filepath_) { + Reset(); + fd_ = open(filepath.c_str(), O_RDONLY); + if (fd_ < 0) { + LOG(ERROR) << "open file [" << filepath << "] failed! error: " << strerror(errno); + return pstd::Status::IOError("open file [" + filepath + "] failed! error: " + strerror(errno)); + } + filepath_ = filepath; + struct stat buf; + stat(filepath.c_str(), &buf); + total_size_ = buf.st_size; + } + start_offset_ = (offset / kBlockSize) * kBlockSize; + + size_t read_offset = start_offset_; + size_t read_count = kBlockSize > (total_size_ - read_offset) ? (total_size_ - read_offset) : kBlockSize; + ssize_t bytesin = 0; + char* ptr = block_data_; + while ((bytesin = pread(fd_, ptr, read_count, read_offset)) > 0) { + read_count -= bytesin; + read_offset += bytesin; + ptr += bytesin; + if (read_count <= 0) { + break; + } + } + if (bytesin < 0) { + LOG(ERROR) << "unable to read from " << filepath << ". error: " << strerror(errno); + Reset(); + return pstd::Status::IOError("unable to read from " + filepath + ". error: " + strerror(errno)); + } + end_offset_ = start_offset_ + (ptr - block_data_); + return pstd::Status::OK(); + } + void Reset() { + total_size_ = -1; + start_offset_ = 0xFFFFFFFF; + end_offset_ = 0xFFFFFFFF; + memset(block_data_, 0, kBlockSize); + md5_.reset(new pstd::MD5()); + filepath_ = ""; + close(fd_); + fd_ = -1; + } + + private: + std::mutex mu_; + const size_t kBlockSize = 16 << 20; + + char* block_data_; + size_t start_offset_ = -1; + size_t end_offset_ = -1; + size_t total_size_ = -1; + + int fd_ = -1; + std::string filepath_; + std::unique_ptr md5_; +}; + +} //end namespace rsync +#endif + diff --git a/tools/pika_migrate/include/throttle.h b/tools/pika_migrate/include/throttle.h new file mode 100644 index 0000000000..73184d6c29 --- /dev/null +++ b/tools/pika_migrate/include/throttle.h @@ -0,0 +1,45 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef THROTTLE_H_ +#define THROTTLE_H_ + +#include +#include "pstd/include/pstd_mutex.h" +#include "pika_conf.h" + +extern std::unique_ptr g_pika_conf; + +namespace rsync { +class Throttle { + public: + Throttle() {} + Throttle(size_t throttle_throughput_bytes, size_t check_cycle); + ~Throttle(); + + void ResetThrottleThroughputBytes(size_t new_throughput_bytes_per_s) { + throttle_throughput_bytes_.store(new_throughput_bytes_per_s); + }; + size_t ThrottledByThroughput(size_t bytes); + void ReturnUnusedThroughput(size_t acquired, size_t consumed, size_t elaspe_time_us); + static Throttle& GetInstance() { + static Throttle instance(g_pika_conf->throttle_bytes_per_second(), 10); + return instance; + } +private: + std::atomic throttle_throughput_bytes_ = 100 * 1024 * 1024; + std::atomic last_throughput_check_time_us_; + std::atomic cur_throughput_bytes_; + // check cycles of throughput per second + size_t check_cycle_ = 10; + pstd::Mutex keys_mutex_; + size_t caculate_check_time_us_(int64_t current_time_us, int64_t check_cycle) { + size_t base_aligning_time_us = 1000 * 1000 / check_cycle; + return current_time_us / base_aligning_time_us * base_aligning_time_us; + } +}; +} // end namespace rsync +#endif + diff --git a/tools/pika_migrate/pika-migrate.md b/tools/pika_migrate/pika-migrate.md new file mode 100644 index 0000000000..9ec245c44f --- /dev/null +++ b/tools/pika_migrate/pika-migrate.md @@ -0,0 +1,43 @@ +## Pika4.0到Redis迁移工具 + +### 适用版本: +Pika 4.0, 单机模式且只支持单db + +### 功能 +将Pika中的数据在线迁移到Pika、Redis(支持全量、增量同步) + +### 开发背景: +之前Pika项目官方提供的pika\_to\_redis工具仅支持离线将Pika的DB中的数据迁移到Pika、Redis, 且无法增量同步, 该工具实际上就是一个特殊的Pika, 只不过成为从库之后, 内部会将从主库获取到的数据转发给Redis,同时并支持增量同步, 实现热迁功能. + +### 热迁原理 +1. pika-port通过dbsync请求获取主库当前全量db数据, 以及当前db数据所对应的binlog点位 +2. 获取到主库当前全量db数据之后, 扫描db, 将db中的数据转发给Redis +3. 通过之前获取的binlog的点位向主库进行增量同步, 在增量同步的过程中, 将从主库获取到的binlog重组成Redis命令, 转发给Redis + +### 新增配置项 +```cpp +################### +## Migrate Settings +################### + +target-redis-host : 127.0.0.1 +target-redis-port : 6379 +target-redis-pwd : abc + +sync-batch-num : 100 +redis-sender-num : 10 +``` + +### 步骤 +1. 考虑到在pika-port在将全量数据写入到Redis这段时间可能耗时很长, 导致主库原先binlog点位已经被清理, 我们首先在主库上执行`config set expire-logs-nums 10000`, 让主库保留10000个Binlog文件(Binlog文件占用磁盘空间, 可以根据实际情况确定保留binlog的数量), 确保后续该工具请求增量同步的时候, 对应的Binlog文件还存在. +2. 修改该工具配置文件的`target-redis-host, target-redis-port, target-redis-pwd, sync-batch-num, redis-sender-num`配置项(`sync-batch-num`是该工具接收到主库的全量数据之后, 为了提升转发效率, 将`sync-batch-num`个数据一起打包发送给Redis, 此外该工具内部可以指定`redis-sender-num`个线程用于转发命令, 命令通过Key的哈希值被分配到不同的线程中, 所以无需担心多线程发送导致的数据错乱的问题) +3. 使用`pika -c pika.conf`命令启动该工具, 查看日志是否有报错信息 +4. 向该工具执行`slaveof ip port force`向主库请求同步, 观察是否有报错信息 +5. 在确认主从关系建立成功之后(此时pika-port同时也在向目标Redis转发数据了)通过向主库执行`info Replication`查看主从同步延迟(可在主库写入一个特殊的Key, 然后看在Redis测是否可以立马获取到, 来判断是否数据已经基本同步完毕) + +### 注意事项 +1. Pika支持不同数据结构采用同名Key, 但是Redis不支持, 所以在有同Key数据的场景下, 以第一个迁移到Redis数据结构为准, 其他同Key数据结构会丢失 +2. 该工具只支持热迁移单机模式下, 并且只采用单DB版本的Pika, 如果是集群模式, 或者是多DB场景, 工具会报错并且退出. +3. 为了避免由于主库Binlog被清理导致该工具触发多次全量同步向Redis写入脏数据, 工具自身做了保护, 在第二次触发全量同步时会报错退出. + + diff --git a/tools/pika_migrate/pikatests.sh b/tools/pika_migrate/pikatests.sh deleted file mode 100755 index bf17cf73e5..0000000000 --- a/tools/pika_migrate/pikatests.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -rm -rf ./log -rm -rf .db -cp output/bin/pika src/redis-server -cp output/conf/pika.conf tests/assets/default.conf - -tclsh tests/test_helper.tcl --clients 1 --single unit/$1 -rm src/redis-server -rm -rf ./log -rm -rf ./db diff --git a/tools/pika_migrate/protogen.cmake b/tools/pika_migrate/protogen.cmake new file mode 100644 index 0000000000..895a15b175 --- /dev/null +++ b/tools/pika_migrate/protogen.cmake @@ -0,0 +1,41 @@ +function(CUSTOM_PROTOBUF_GENERATE_CPP SRCS HDRS) + if (NOT ARGN) + message(SEND_ERROR "Error: CUSTOM_PROTOBUF_GENERATE_CPP() called without any proto files") + return() + endif () + + # Create an include path for each file specified + foreach (FIL ${ARGN}) + get_filename_component(ABS_FIL ${FIL} ABSOLUTE) + get_filename_component(ABS_PATH ${ABS_FIL} PATH) + list(FIND _protobuf_include_path ${ABS_PATH} _contains_already) + if (${_contains_already} EQUAL -1) + list(APPEND _protobuf_include_path -I ${ABS_PATH}) + endif () + endforeach () + + set(${SRCS}) + set(${HDRS}) + foreach (FIL ${ARGN}) + get_filename_component(ABS_FIL ${FIL} ABSOLUTE) + get_filename_component(FIL_WE ${FIL} NAME_WE) + + list(APPEND ${SRCS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.pb.cc") + list(APPEND ${HDRS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.pb.h") + + execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}) + + add_custom_command( + OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.pb.cc" + "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.pb.h" + COMMAND ${PROTOBUF_PROTOC} + ARGS --cpp_out ${CMAKE_CURRENT_BINARY_DIR} ${_protobuf_include_path} ${ABS_FIL} + DEPENDS ${ABS_FIL} + COMMENT "Running C++ protocol buffer compiler on ${FIL}" + VERBATIM) + endforeach () + + set_source_files_properties(${${SRCS}} ${${HDRS}} PROPERTIES GENERATED TRUE) + set(${SRCS} ${${SRCS}} PARENT_SCOPE) + set(${HDRS} ${${HDRS}} PARENT_SCOPE) +endfunction() \ No newline at end of file diff --git a/tools/pika_migrate/src/acl.cc b/tools/pika_migrate/src/acl.cc new file mode 100644 index 0000000000..dad50f73e6 --- /dev/null +++ b/tools/pika_migrate/src/acl.cc @@ -0,0 +1,1418 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include +#include + +#include "include/acl.h" +#include "include/pika_cmd_table_manager.h" +#include "include/pika_command.h" +#include "include/pika_server.h" +#include "pstd_defer.h" +#include "pstd_hash.h" + +extern PikaServer* g_pika_server; + +extern std::unique_ptr g_pika_cmd_table_manager; + +// class User +User::User(std::string name) : name_(std::move(name)) { + selectors_.emplace_back(std::make_shared(static_cast(AclSelectorFlag::ROOT))); +} + +User::User(const User& user) : name_(user.Name()) { + flags_ = user.flags_.load(); + passwords_ = user.passwords_; + aclString_ = user.aclString_; + for (const auto& item : user.selectors_) { + selectors_.emplace_back(std::make_shared(*item)); + } +} + +std::string User::Name() const { return name_; } + +void User::CleanAclString() { aclString_.clear(); } + +void User::AddPassword(const std::string& password) { passwords_.insert(password); } + +void User::RemovePassword(const std::string& password) { passwords_.erase(password); } + +void User::CleanPassword() { passwords_.clear(); } + +void User::AddSelector(const std::shared_ptr& selector) { selectors_.push_back(selector); } + +pstd::Status User::SetUser(const std::vector& rules) { + std::unique_lock wl(mutex_); + + for (const auto& rule : rules) { + auto status = SetUser(rule); + if (!status.ok()) { + LOG(ERROR) << "SetUser rule:" << rule << status.ToString(); + return status; + } + } + + return pstd::Status::OK(); +} + +pstd::Status User::SetUser(const std::string& op) { + CleanAclString(); + if (op.empty()) { + return pstd::Status::OK(); + } + if (!strcasecmp(op.data(), "on")) { + AddFlags(static_cast(AclUserFlag::ENABLED)); + DecFlags(static_cast(AclUserFlag::DISABLED)); + } else if (!strcasecmp(op.data(), "off")) { + AddFlags(static_cast(AclUserFlag::DISABLED)); + DecFlags(static_cast(AclUserFlag::ENABLED)); + } else if (!strcasecmp(op.data(), "nopass")) { + AddFlags(static_cast(AclUserFlag::NO_PASS)); + CleanPassword(); + } else if (!strcasecmp(op.data(), "resetpass")) { + DecFlags(static_cast(AclUserFlag::NO_PASS)); + CleanPassword(); + } else if (op[0] == '>' || op[0] == '#') { + std::string newpass; + if (op[0] == '>') { + newpass = pstd::sha256(op.data() + 1); + } else { + if (!pstd::isSha256(op.data() + 1)) { + return pstd::Status::Error("password not sha256"); + } + newpass = op.data() + 1; + } + AddPassword(newpass); + DecFlags(static_cast(AclUserFlag::NO_PASS)); + } else if (op[0] == '<' || op[0] == '!') { + std::string delpass; + if (op[0] == '<') { + delpass = pstd::sha256(op.data() + 1); + } else { + if (!pstd::isSha256(op.data() + 1)) { + return pstd::Status::Error("password not sha256"); + } + delpass = op.data() + 1; + } + // passwords_.erase(delpass); + RemovePassword(delpass); + } else if (op[0] == '(' && op[op.size() - 1] == ')') { + auto status = CreateSelectorFromOpSet(op); + if (!status.ok()) { + return status; + } + } else if (!strcasecmp(op.data(), "clearselectors")) { + selectors_.clear(); + return pstd::Status::OK(); + } else if (!strcasecmp(op.data(), "reset")) { + auto status = SetUser("resetpass"); + if (!status.ok()) { + return status; + } + status = SetUser("resetkeys"); + if (!status.ok()) { + return status; + } + status = SetUser("resetchannels"); + if (!status.ok()) { + return status; + } + if (g_pika_conf->acl_pubsub_default() & static_cast(AclSelectorFlag::ALL_CHANNELS)) { + status = SetUser("allchannels"); + if (!status.ok()) { + return status; + } + } + status = SetUser("off"); + if (!status.ok()) { + return status; + } + status = SetUser("-@all"); + if (!status.ok()) { + return status; + } + } else { + auto root = GetRootSelector(); + if (!root) { // does not appear under normal circumstances + LOG(ERROR) << "set user:" << Name() << " not find root selector"; + return pstd::Status::Error("set user error,See pika log for details"); + } + auto status = root->SetSelector(op); + if (!status.ok()) { + return status; + } + } + + return pstd::Status::OK(); +} + +pstd::Status User::CreateSelectorFromOpSet(const std::string& opSet) { + auto selector = std::make_shared(); + auto status = selector->SetSelectorFromOpSet(opSet); + if (!status.ok()) { + return status; + } + AddSelector(selector); + return status; +} + +std::shared_ptr User::GetRootSelector() { + for (const auto& item : selectors_) { + if (item->HasFlags(static_cast(AclSelectorFlag::ROOT))) { + return item; + } + } + return nullptr; +} + +void User::DescribeUser(std::string* str) { + std::unique_lock wl(mutex_); + + if (!aclString_.empty()) { + str->append(aclString_); + return; + } + + // flag + for (const auto& item : Acl::UserFlags) { + if (HasFlags(item.second)) { + aclString_ += " "; + aclString_ += item.first; + } + } + + // password + for (const auto& item : passwords_) { + aclString_ += " #" + item; + } + + // selector + std::string selectorStr; + for (const auto& item : selectors_) { + selectorStr.clear(); + item->ACLDescribeSelector(&selectorStr); + + if (item->HasFlags(static_cast(AclSelectorFlag::ROOT))) { + aclString_ += selectorStr; + } else { + aclString_ += fmt::format(" ({})", selectorStr.data() + 1); + } + } + + str->append(aclString_); +} + +bool User::MatchPassword(const std::string& password) { + std::shared_lock l(mutex_); + return passwords_.find(password) != passwords_.end(); +} + +void User::GetUserDescribe(CmdRes* res) { + std::shared_lock l(mutex_); + + res->AppendArrayLen(12); + + res->AppendString("flags"); + std::vector vector; + for (const auto& item : Acl::UserFlags) { + if (HasFlags(item.second)) { + vector.emplace_back(item.first); + } + } + res->AppendStringVector(vector); + + vector.clear(); + res->AppendString("passwords"); + for (const auto& item : passwords_) { + vector.emplace_back(item); + } + res->AppendStringVector(vector); + + size_t i = 0; + for (const auto& selector : selectors_) { + vector.clear(); + if (i == 0) { // root selector + selector->ACLDescribeSelector(vector); + for (const auto& item : vector) { + res->AppendString(item); + } + + res->AppendString("selectors"); + if (selectors_.size() == 1) { + res->AppendArrayLen(0); + } + ++i; + continue; + } + if (i == 1) { + res->AppendArrayLen(static_cast(selectors_.size()) - 1); + } + selector->ACLDescribeSelector(vector); + res->AppendStringVector(vector); + ++i; + } +} + +AclDeniedCmd User::CheckUserPermission(std::shared_ptr& cmd, const PikaCmdArgsType& argv, int8_t& subCmdIndex, + std::string* errKey) { + std::shared_lock l(mutex_); + + subCmdIndex = -1; + if (cmd->HasSubCommand()) { + subCmdIndex = cmd->SubCmdIndex(argv[1]); + if (subCmdIndex < 0) { + return AclDeniedCmd::NO_SUB_CMD; + } + } + auto keys = cmd->current_key(); + AclDeniedCmd res = AclDeniedCmd::OK; + for (const auto& selector : selectors_) { + res = selector->CheckCanExecCmd(cmd, subCmdIndex, keys, errKey); + if (res == AclDeniedCmd::OK) { + return AclDeniedCmd::OK; + } + } + return res; +} + +std::vector User::AllChannelKey() { + std::vector result; + for (const auto& selector : selectors_) { + for (const auto& item : selector->channels_) { + result.emplace_back(item); + } + } + return result; +} +// class User end + +// class Acl +pstd::Status Acl::Initialization() { + AddUser(CreateDefaultUser()); + UpdateDefaultUserPassword(g_pika_conf->requirepass()); + + auto status = LoadUsersAtStartup(); + auto u = GetUser(DefaultLimitUser); + bool limit_exist = true; + if (nullptr == u) { + AddUser(CreatedUser(DefaultLimitUser)); + limit_exist = false; + } + InitLimitUser(g_pika_conf->GetUserBlackList(), limit_exist); + + if (!status.ok()) { + return status; + } + return status; +} + +std::shared_ptr Acl::GetUser(const std::string& userName) { + auto u = users_.find(userName); + if (u == users_.end()) { + return nullptr; + } + return u->second; +} + +std::shared_ptr Acl::GetUserLock(const std::string& userName) { + std::shared_lock rl(mutex_); + auto u = users_.find(userName); + if (u == users_.end()) { + return nullptr; + } + return u->second; +} + +void Acl::AddUser(const std::shared_ptr& user) { users_[user->Name()] = user; } + +void Acl::AddUserLock(const std::shared_ptr& user) { + std::unique_lock wl(mutex_); + users_[user->Name()] = user; +} + +pstd::Status Acl::LoadUsersAtStartup() { + if (!g_pika_conf->users().empty() && !g_pika_conf->acl_file().empty()) { + return pstd::Status::NotSupported("Only one configuration file and acl file can be used", ""); + } + + if (g_pika_conf->users().empty()) { + return LoadUserFromFile(g_pika_conf->acl_file()); + } else { + return LoadUserConfigured(g_pika_conf->users()); + } +} + +pstd::Status Acl::LoadUserConfigured(std::vector& users) { + std::vector userRules; + for (const auto& item : users) { + userRules.clear(); + pstd::StringSplit(item, ' ', userRules); + if (userRules.size() < 2) { + return pstd::Status::Error("acl from configuration file read rules error"); + } + auto user = GetUser(userRules[0]); + if (user) { + if (user->Name() != DefaultUser) { // only `default` users are allowed to repeat + return pstd::Status::Error("acl user: " + user->Name() + " is repeated"); + } else { + user->SetUser("reset"); + } + } else { + user = CreatedUser(userRules[0]); + } + std::vector aclArgc; + auto subRule = std::vector(userRules.begin() + 1, userRules.end()); + ACLMergeSelectorArguments(subRule, &aclArgc); + + for (const auto& rule : aclArgc) { + auto status = user->SetUser(rule); + if (!status.ok()) { + LOG(ERROR) << "load user from configured file error," << status.ToString(); + return status; + } + } + AddUser(user); + } + + return pstd::Status().OK(); +} + +pstd::Status Acl::LoadUserFromFile(std::set* toUnAuthUsers) { + std::unique_lock wl(mutex_); + + for (const auto& item : users_) { + if (item.first != DefaultUser) { + toUnAuthUsers->insert(item.first); + } + } + + auto status = LoadUserFromFile(g_pika_conf->acl_file()); + if (!status.ok()) { + return status; + } + + return status; +} + +pstd::Status Acl::LoadUserFromFile(const std::string& fileName) { + if (fileName.empty()) { + return pstd::Status::OK(); + } + + std::map> users; + std::vector rules; + + bool hasDefaultUser = false; + + std::ifstream ruleFile(fileName); + if (!ruleFile) { + return pstd::Status::IOError(fmt::format("open file {} fail"), fileName); + } + + DEFER { ruleFile.close(); }; + + int lineNum = 0; + std::string lineContent; + while (std::getline(ruleFile, lineContent)) { + ++lineNum; + if (lineContent.empty()) { + continue; + } + + lineContent = pstd::StringTrim(lineContent, "\r\n "); + rules.clear(); + pstd::StringSplit(lineContent, ' ', rules); + if (rules.empty()) { + continue; + } + + if (rules[0] != "user" || rules.size() < 2) { + LOG(ERROR) << fmt::format("load user from acl file,line:{} '{}' illegal", lineNum, lineContent); + return pstd::Status::Error(fmt::format("line:{} '{}' illegal", lineNum, lineContent)); + } + + auto user = users.find(rules[1]); + if (user != users.end()) { + // if user is exists, exit + auto err = fmt::format("Duplicate user '{}' found on line {}.", rules[1], lineNum); + LOG(ERROR) << err; + return pstd::Status::Error(err); + } + + std::vector aclArgc; + auto subRule = std::vector(rules.begin() + 2, rules.end()); + ACLMergeSelectorArguments(subRule, &aclArgc); + + auto u = CreatedUser(rules[1]); + for (const auto& item : aclArgc) { + auto status = u->SetUser(item); + if (!status.ok()) { + LOG(ERROR) << "load user from acl file error," << status.ToString(); + return status; + } + } + if (rules[1] == DefaultUser) { + hasDefaultUser = true; + } + users[rules[1]] = u; + } + + if (!hasDefaultUser) { + users[DefaultUser] = GetUser(DefaultUser); + } + + users_ = std::move(users); + + return pstd::Status().OK(); +} + +void Acl::UpdateDefaultUserPassword(const std::string& pass) { + std::unique_lock wl(mutex_); + auto u = GetUser(DefaultUser); + u->SetUser("resetpass"); + if (pass.empty()) { + u->SetUser("nopass"); + } else { + if (g_pika_conf->userpass().empty()) { + u->SetUser("nopass"); + } else { + u->SetUser(">" + pass); + } + } +} + +void Acl::InitLimitUser(const std::string& bl, bool limit_exist) { + auto pass = g_pika_conf->userpass(); + std::vector blacklist; + pstd::StringSplit(bl, ',', blacklist); + std::unique_lock wl(mutex_); + auto u = GetUser(DefaultLimitUser); + if (limit_exist) { + if (!bl.empty()) { + for (auto& cmd : blacklist) { + cmd = pstd::StringTrim(cmd, " "); + u->SetUser("-" + cmd); + } + u->SetUser("on"); + } + if (!pass.empty()) { + u->SetUser(">" + pass); + } + } else { + if (pass.empty()) { + u->SetUser("nopass"); + } else { + u->SetUser(">" + pass); + } + u->SetUser("on"); + u->SetUser("+@all"); + u->SetUser("~*"); + u->SetUser("&*"); + + for (auto& cmd : blacklist) { + cmd = pstd::StringTrim(cmd, " "); + u->SetUser("-" + cmd); + } + } +} +// bool Acl::CheckUserCanExec(const std::shared_ptr& cmd, const PikaCmdArgsType& argv) { cmd->name(); } + +std::shared_ptr Acl::CreateDefaultUser() { + auto defaultUser = std::make_shared(DefaultUser); + defaultUser->SetUser("+@all"); + defaultUser->SetUser("~*"); + defaultUser->SetUser("&*"); + defaultUser->SetUser("on"); + defaultUser->SetUser("nopass"); + return defaultUser; +} + +std::shared_ptr Acl::CreatedUser(const std::string& name) { return std::make_shared(name); } + +pstd::Status Acl::SetUser(const std::string& userName, std::vector& op) { + auto user = GetUserLock(userName); + + std::shared_ptr tempUser = nullptr; + bool add = false; + if (!user) { // if the user not exist, create new user + user = CreatedUser(userName); + add = true; + } else { + tempUser = std::make_shared(*user); + } + + std::vector aclArgc; + ACLMergeSelectorArguments(op, &aclArgc); + + auto status = user->SetUser(aclArgc); + if (!status.ok()) { + return status; + } + + if (add) { + AddUserLock(user); + } else { + KillPubsubClientsIfNeeded(tempUser, user); + } + return pstd::Status::OK(); +} + +void Acl::KillPubsubClientsIfNeeded(const std::shared_ptr& origin, const std::shared_ptr& newUser) { + std::shared_lock l(mutex_); + bool match = true; + for (const auto& newUserSelector : newUser->selectors_) { + if (newUserSelector->HasFlags(static_cast(AclSelectorFlag::ALL_CHANNELS))) { // new user has all channels + return; + } + } + auto newChKey = newUser->AllChannelKey(); + + for (const auto& selector : origin->selectors_) { + if (selector->HasFlags(static_cast(AclSelectorFlag::ALL_CHANNELS))) { + match = false; + break; + } + if (!selector->EqualChannel(newChKey)) { + match = false; + break; + } + } + if (match) { + return; + } + g_pika_server->CheckPubsubClientKill(newUser->Name(), newChKey); +} + +uint32_t Acl::GetCommandCategoryFlagByName(const std::string& name) { + for (const auto& item : CommandCategories) { + if (item.first == name) { + return item.second; + } + } + return 0; +} + +std::string Acl::GetCommandCategoryFlagByName(const uint32_t category) { + for (const auto& item : CommandCategories) { + if (item.second == category) { + return item.first; + } + } + + return ""; +} + +std::vector Acl::GetAllCategoryName() { + std::vector result; + result.reserve(CommandCategories.size()); + for (const auto& item : CommandCategories) { + result.emplace_back(item.first); + } + return result; +} + +void Acl::ACLMergeSelectorArguments(std::vector& argv, std::vector* merged) { + bool openBracketStart = false; + std::string selector; + for (const auto& item : argv) { + if (item[0] == '(' && item[item.size() - 1] != ')') { + selector = item; + openBracketStart = true; + continue; + } + + if (openBracketStart) { + selector += " " + item; + if (item[item.size() - 1] == ')') { + openBracketStart = false; + merged->emplace_back(selector); + } + continue; + } + + merged->emplace_back(item); + } +} + +std::shared_ptr Acl::Auth(const std::string& userName, const std::string& password) { + std::shared_lock l(mutex_); + + auto user = GetUser(userName); + if (!user) { + return nullptr; + } + if (user->HasFlags(static_cast(AclUserFlag::DISABLED))) { + return nullptr; + } + + if (user->HasFlags(static_cast(AclUserFlag::NO_PASS))) { + return user; + } + + if (user->MatchPassword(pstd::sha256(password))) { + return user; + } + return nullptr; +} + +std::vector Acl::Users() { + std::shared_lock l(mutex_); + std::vector result; + result.reserve(users_.size()); + + for (const auto& item : users_) { + result.emplace_back(item.first); + } + + return result; +} + +void Acl::DescribeAllUser(std::vector* content) { + std::shared_lock l(mutex_); + content->reserve(users_.size()); + + for (const auto& item : users_) { + std::string saveContent; + saveContent += "user "; + saveContent += item.first; + + item.second->DescribeUser(&saveContent); + content->emplace_back(saveContent); + } +} + +pstd::Status Acl::SaveToFile() { + std::string aclFileName = g_pika_conf->acl_file(); + if (aclFileName.empty()) { + LOG(ERROR) << "save user to acl file, file name is empty"; + return pstd::Status::Error("acl file name is empty"); + } + + std::unique_lock wl(mutex_); + + std::unique_ptr file; + const std::string tmpFile = aclFileName + ".tmp"; + auto status = pstd::NewWritableFile(tmpFile, file); + if (!status.ok()) { + auto error = fmt::format("open acl user file:{} fail, error:{}", aclFileName, status.ToString()); + LOG(ERROR) << error; + return pstd::Status::Error(error); + } + + std::string saveContent; + for (const auto& item : users_) { + saveContent += "user "; + saveContent += item.first; + + item.second->DescribeUser(&saveContent); + saveContent += "\n"; + } + + file->Append(saveContent); + file->Sync(); + file->Close(); + + if (pstd::RenameFile(tmpFile, aclFileName) < 0) { // rename fail + return pstd::Status::Error("save acl rule to file fail. specific information see pika log"); + } + return pstd::Status::OK(); +} + +std::set Acl::DeleteUser(const std::vector& userNames) { + std::unique_lock wl(mutex_); + + std::set delUserNames; + for (const auto& userName : userNames) { + if (users_.erase(userName)) { + delUserNames.insert(userName); + } + } + + return delUserNames; +} + +std::array, 21> Acl::CommandCategories = {{ + {"keyspace", static_cast(AclCategory::KEYSPACE)}, + {"read", static_cast(AclCategory::READ)}, + {"write", static_cast(AclCategory::WRITE)}, + {"set", static_cast(AclCategory::SET)}, + {"sortedset", static_cast(AclCategory::SORTEDSET)}, + {"list", static_cast(AclCategory::LIST)}, + {"hash", static_cast(AclCategory::HASH)}, + {"string", static_cast(AclCategory::STRING)}, + {"bitmap", static_cast(AclCategory::BITMAP)}, + {"hyperloglog", static_cast(AclCategory::HYPERLOGLOG)}, + {"geo", static_cast(AclCategory::GEO)}, + {"stream", static_cast(AclCategory::STREAM)}, + {"pubsub", static_cast(AclCategory::PUBSUB)}, + {"admin", static_cast(AclCategory::ADMIN)}, + {"fast", static_cast(AclCategory::FAST)}, + {"slow", static_cast(AclCategory::SLOW)}, + {"blocking", static_cast(AclCategory::BLOCKING)}, + {"dangerous", static_cast(AclCategory::DANGEROUS)}, + {"connection", static_cast(AclCategory::CONNECTION)}, + {"transaction", static_cast(AclCategory::TRANSACTION)}, + {"scripting", static_cast(AclCategory::SCRIPTING)}, +}}; + +std::array, 3> Acl::UserFlags = {{ + {"on", static_cast(AclUserFlag::ENABLED)}, + {"off", static_cast(AclUserFlag::DISABLED)}, + {"nopass", static_cast(AclUserFlag::NO_PASS)}, +}}; + +std::array, 3> Acl::SelectorFlags = {{ + {"allkeys", static_cast(AclSelectorFlag::ALL_KEYS)}, + {"allchannels", static_cast(AclSelectorFlag::ALL_CHANNELS)}, + {"allcommands", static_cast(AclSelectorFlag::ALL_COMMANDS)}, +}}; + +const std::string Acl::DefaultUser = "default"; +const std::string Acl::DefaultLimitUser = "limit"; +const int64_t Acl::LogGroupingMaxTimeDelta = 60000; + +void Acl::AddLogEntry(int32_t reason, int32_t context, const std::string& username, const std::string& object, + const std::string& cInfo) { + int64_t nowUnix = + std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()) + .count(); + { + std::unique_lock wl(mutex_); + for (const auto& item : logEntries_) { + if (item->Match(reason, context, nowUnix, object, username)) { + item->AddEntry(cInfo, nowUnix); + return; + } + } + auto entry = std::make_unique(reason, context, object, username, nowUnix, cInfo); + logEntries_.push_front(std::move(entry)); + + auto maxLen = g_pika_conf->acl_log_max_len(); + if (logEntries_.size() > maxLen) { // remove overflow log + if (maxLen == 0) { + logEntries_.clear(); + } else { + logEntries_.erase(std::next(logEntries_.begin(), maxLen), logEntries_.end()); + } + } + } +} + +void Acl::GetLog(long count, CmdRes* res) { + std::shared_lock rl(mutex_); + auto size = static_cast(logEntries_.size()); + if (count == -1) { + count = size; + } + if (count > size) { + count = size; + } + if (count == 0) { + res->AppendArrayLen(0); + return; + } + + std::vector items; + res->AppendArrayLen(static_cast(count)); + items.reserve(14); + for (const auto& item : logEntries_) { + items.clear(); + item->GetReplyInfo(&items); + res->AppendStringVector(items); + count--; + if (count == 0) { + break; + } + } +} + +void Acl::ResetLog() { + std::unique_lock wl(mutex_); + logEntries_.clear(); +} +// class Acl end + +// class ACLLogEntry +bool ACLLogEntry::Match(int32_t reason, int32_t context, int64_t ctime, const std::string& object, + const std::string& username) { + if (reason_ != reason) { + return false; + } + if (context_ != context) { + return false; + } + auto delta = ctime_ - ctime; + if (delta > Acl::LogGroupingMaxTimeDelta) { + return false; + }; + if (object_ != object) { + return false; + } + if (username_ != username) { + return false; + } + return true; +} + +void ACLLogEntry::AddEntry(const std::string& cinfo, u_int64_t ctime) { + cinfo_ = cinfo; + ctime_ = ctime; + ++count_; +} + +void ACLLogEntry::GetReplyInfo(std::vector* vector) { + vector->emplace_back("count"); + vector->emplace_back(std::to_string(count_)); + vector->emplace_back("reason"); + switch (reason_) { + case static_cast(AclDeniedCmd::CMD): + vector->emplace_back("command"); + break; + case static_cast(AclDeniedCmd::KEY): + vector->emplace_back("key"); + break; + case static_cast(AclDeniedCmd::CHANNEL): + vector->emplace_back("channel"); + break; + case static_cast(AclDeniedCmd::NO_AUTH): + vector->emplace_back("auth"); + break; + default: + vector->emplace_back("unknown"); + break; + } + + vector->emplace_back("context"); + switch (context_) { + case static_cast(AclLogCtx::TOPLEVEL): + vector->emplace_back("toplevel"); + break; + case static_cast(AclLogCtx::MULTI): + vector->emplace_back("multi"); + break; + case static_cast(AclLogCtx::LUA): + vector->emplace_back("lua"); + break; + default: + vector->emplace_back("unknown"); + break; + } + + vector->emplace_back("object"); + vector->emplace_back(object_); + vector->emplace_back("username"); + vector->emplace_back(username_); + vector->emplace_back("age-seconds"); + int64_t nowUnix = + std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()) + .count(); + + char latitude[32]; + pstd::d2string(latitude, 32, static_cast(nowUnix - ctime_) / 1000); + vector->emplace_back(latitude); + vector->emplace_back("client-info"); + vector->emplace_back(cinfo_); +} + +// class ACLLogEntry end + +// class AclSelector +AclSelector::AclSelector(uint32_t flag) : flags_(flag) { + if (g_pika_conf->acl_pubsub_default()) { + AddFlags(static_cast(AclSelectorFlag::ALL_CHANNELS)); + } +} + +AclSelector::AclSelector(const AclSelector& selector) { + flags_ = selector.Flags(); + allowedCommands_ = selector.allowedCommands_; + subCommand_ = selector.subCommand_; + channels_ = selector.channels_; + commandRules_ = selector.commandRules_; + + for (const auto& item : selector.patterns_) { + auto pattern = std::make_shared(); + pattern->flags = item->flags; + pattern->pattern = item->pattern; + patterns_.emplace_back(pattern); + } +} + +pstd::Status AclSelector::SetSelector(const std::string& op) { + if (!strcasecmp(op.data(), "allkeys") || op == "~*") { + AddFlags(static_cast(AclSelectorFlag::ALL_KEYS)); + patterns_.clear(); + } else if (!strcasecmp(op.data(), "resetkeys")) { + DecFlags(static_cast(AclSelectorFlag::ALL_KEYS)); + patterns_.clear(); + } else if (!strcasecmp(op.data(), "allchannels") || !strcasecmp(op.data(), "&*")) { + AddFlags(static_cast(AclSelectorFlag::ALL_CHANNELS)); + channels_.clear(); + } else if (!strcasecmp(op.data(), "resetchannels")) { + DecFlags(static_cast(AclSelectorFlag::ALL_CHANNELS)); + channels_.clear(); + } else if (!strcasecmp(op.data(), "allcommands") || !strcasecmp(op.data(), "+@all")) { + SetAllCommandSelector(); + } else if (!strcasecmp(op.data(), "nocommands") || !strcasecmp(op.data(), "-@all")) { + RestAllCommandSelector(); + } else if (op[0] == '~' || op[0] == '%') { + if (HasFlags(static_cast(AclSelectorFlag::ALL_KEYS))) { + return pstd::Status::Error( + fmt::format("Error in ACL SETUSER modifier '{}': Adding a pattern after the * " + "pattern (or the 'allkeys' flag) is not valid and does not have any effect." + " Try 'resetkeys' to start with an empty list of patterns", + op)); + } + int flags = 0; + size_t offset = 1; + if (op[0] == '%') { + for (; offset < op.size(); offset++) { + if (toupper(op[offset]) == 'R' && !(flags & static_cast(AclPermission::READ))) { + flags |= static_cast(AclPermission::READ); + } else if (toupper(op[offset]) == 'W' && !(flags & static_cast(AclPermission::WRITE))) { + flags |= static_cast(AclPermission::WRITE); + } else if (op[offset] == '~') { + offset++; + break; + } else { + return pstd::Status::Error("Syntax error"); + } + } + } else { + flags = static_cast(AclPermission::ALL); + } + + if (pstd::isspace(op)) { + return pstd::Status::Error("Syntax error"); + } + + InsertKeyPattern(op.substr(offset, std::string::npos), flags); + DecFlags(static_cast(AclSelectorFlag::ALL_KEYS)); + } else if (op[0] == '&') { + if (HasFlags(static_cast(AclSelectorFlag::ALL_CHANNELS))) { + return pstd::Status::Error( + "Adding a pattern after the * pattern (or the 'allchannels' flag) is not valid and does not have any effect. " + "Try 'resetchannels' to start with an empty list of channels"); + } + if (pstd::isspace(op)) { + return pstd::Status::Error("Syntax error"); + } + InsertChannel(op.substr(1, std::string::npos)); + DecFlags(static_cast(AclSelectorFlag::ALL_CHANNELS)); + } else if (op[0] == '+' && op[1] != '@') { + auto status = SetCommandOp(op, true); + if (!status.ok()) { + return status; + } + UpdateCommonRule(op.data() + 1, true); + } else if (op[0] == '-' && op[1] != '@') { + auto status = SetCommandOp(op, false); + if (!status.ok()) { + return status; + } + UpdateCommonRule(op.data() + 1, false); + } else if ((op[0] == '+' || op[0] == '-') && op[1] == '@') { + bool allow = op[0] == '+' ? true : false; + if (!SetSelectorCommandBitsForCategory(op.data() + 1, allow)) { + return pstd::Status::Error("Unknown command or category name in ACL"); + } + } else { + return pstd::Status::Error("Syntax error"); + } + return pstd::Status(); +} + +pstd::Status AclSelector::SetSelectorFromOpSet(const std::string& opSet) { + if (opSet[0] != '(' || opSet[opSet.size() - 1] != ')') { + return pstd::Status::Error("Unmatched parenthesis in acl selector starting at" + opSet); + } + + std::vector args; + pstd::StringSplit(opSet.substr(1, opSet.size() - 2), ' ', args); + + for (const auto& item : args) { + auto status = SetSelector(item); + if (!status.ok()) { + return status; + } + } + return pstd::Status().OK(); +} + +bool AclSelector::SetSelectorCommandBitsForCategory(const std::string& categoryName, bool allow) { + std::string lowerCategoryName(categoryName); + std::transform(categoryName.begin(), categoryName.end(), lowerCategoryName.begin(), ::tolower); + auto category = Acl::GetCommandCategoryFlagByName(lowerCategoryName.data() + 1); + if (!category) { // not find category + return false; + } + UpdateCommonRule(categoryName, allow); + for (const auto& cmd : *g_pika_cmd_table_manager->cmds_) { + if (cmd.second->AclCategory() & category) { // this cmd belongs to this category + ChangeSelector(cmd.second.get(), allow); + } + } + return true; +} + +void AclSelector::SetAllCommandSelector() { + AddFlags(static_cast(AclSelectorFlag::ALL_COMMANDS)); + allowedCommands_.set(); + for (const auto& cmd : *g_pika_cmd_table_manager->cmds_) { + if (cmd.second->HasSubCommand()) { + SetSubCommand(cmd.second->GetCmdId()); + } + } + CleanCommandRule(); +} + +void AclSelector::RestAllCommandSelector() { + DecFlags(static_cast(AclSelectorFlag::ALL_COMMANDS)); + allowedCommands_.reset(); + ResetSubCommand(); + CleanCommandRule(); +} + +void AclSelector::InsertKeyPattern(const std::string& str, uint32_t flags) { + for (const auto& item : patterns_) { + if (item->pattern == str) { + item->flags |= flags; + return; + } + } + auto pattern = std::make_shared(); + pattern->flags = flags; + pattern->pattern = str; + patterns_.emplace_back(pattern); + return; +} + +void AclSelector::InsertChannel(const std::string& str) { + for (const auto& item : channels_) { + if (item == str) { + return; + } + } + channels_.emplace_back(str); +} + +void AclSelector::ChangeSelector(const Cmd* cmd, bool allow) { + if (allow) { + allowedCommands_.set(cmd->GetCmdId()); + if (cmd->HasSubCommand()) { + SetSubCommand(cmd->GetCmdId()); + } + } else { + allowedCommands_.reset(cmd->GetCmdId()); + DecFlags(static_cast(AclSelectorFlag::ALL_COMMANDS)); + if (cmd->HasSubCommand()) { + ResetSubCommand(cmd->GetCmdId()); + } + } +} + +void AclSelector::ChangeSelector(const std::shared_ptr& cmd, bool allow) { ChangeSelector(cmd.get(), allow); } + +pstd::Status AclSelector::ChangeSelector(const std::shared_ptr& cmd, const std::string& subCmd, bool allow) { + if (cmd->HasSubCommand()) { + auto index = cmd->SubCmdIndex(subCmd); + if (index == -1) { + return pstd::Status::Error("Unknown command or category name in ACL"); + } + if (allow) { + SetSubCommand(cmd->GetCmdId(), index); + } else { + ResetSubCommand(cmd->GetCmdId(), index); + } + } + return pstd::Status::OK(); +} + +void AclSelector::SetSubCommand(uint32_t cmdId) { subCommand_[cmdId] = 0xFFFFFFFF; } + +void AclSelector::SetSubCommand(uint32_t cmdId, uint32_t subCmdIndex) { subCommand_[cmdId] |= (1 << subCmdIndex); } + +void AclSelector::ResetSubCommand() { subCommand_.clear(); } + +void AclSelector::ResetSubCommand(uint32_t cmdId) { subCommand_[cmdId] = 0; } + +void AclSelector::ResetSubCommand(uint32_t cmdId, uint32_t subCmdIndex) { + DecFlags(static_cast(AclSelectorFlag::ALL_COMMANDS)); + subCommand_[cmdId] &= ~(1 << subCmdIndex); +} + +bool AclSelector::CheckSubCommand(uint32_t cmdId, uint32_t subCmdIndex) { + if (subCmdIndex < 0) { + return false; + } + auto bit = subCommand_.find(cmdId); + if (bit == subCommand_.end()) { + return false; + } + + return bit->second & (1 << subCmdIndex); +} + +void AclSelector::ACLDescribeSelector(std::string* str) { + if (HasFlags(static_cast(AclSelectorFlag::ALL_KEYS))) { + str->append(" ~*"); + } else { + for (const auto& item : patterns_) { + str->append(" "); + item->ToString(str); + } + } + + // Pub/sub channel patterns + if (HasFlags(static_cast(AclSelectorFlag::ALL_CHANNELS))) { + str->append(" &*"); + } else if (channels_.empty()) { + str->append(" resetchannels"); + } else { + for (const auto& item : channels_) { + str->append(" &" + item); + } + } + + // Command rules + DescribeSelectorCommandRules(str); +} + +void AclSelector::ACLDescribeSelector(std::vector& vector) { + vector.emplace_back("commands"); + if (allowedCommands_.test(USER_COMMAND_BITS_COUNT - 1)) { + if (commandRules_.empty()) { + vector.emplace_back("+@all"); + } else { + vector.emplace_back("+@all " + commandRules_); + } + } else { + if (commandRules_.empty()) { + vector.emplace_back("-@all"); + } else { + vector.emplace_back("-@all " + commandRules_); + } + } + + vector.emplace_back("key"); + if (HasFlags(static_cast(AclSelectorFlag::ALL_KEYS))) { + vector.emplace_back("~*"); + } else if (patterns_.empty()) { + vector.emplace_back(""); + } else { + std::string keys; + for (auto it = patterns_.begin(); it != patterns_.end(); ++it) { + if (it != patterns_.begin()) { + keys += " "; + (*it)->ToString(&keys); + } + } + vector.emplace_back(keys); + } + + vector.emplace_back("channels"); + if (HasFlags(static_cast(AclSelectorFlag::ALL_CHANNELS))) { + vector.emplace_back("&*"); + } else if (channels_.empty()) { + vector.emplace_back(""); + } else if (channels_.size() == 1) { + vector.emplace_back("&" + channels_.front()); + } else { + vector.emplace_back(fmt::format("{}", fmt::join(channels_, " &"))); + } +} + +AclDeniedCmd AclSelector::CheckCanExecCmd(std::shared_ptr& cmd, int8_t subCmdIndex, + const std::vector& keys, std::string* errKey) { + if (!HasFlags(static_cast(AclSelectorFlag::ALL_COMMANDS)) && !(cmd->flag() & kCmdFlagsNoAuth)) { + if (subCmdIndex < 0) { + if (!allowedCommands_.test(cmd->GetCmdId())) { + return AclDeniedCmd::CMD; + } + } else { // if the command has subCmd + if (!CheckSubCommand(cmd->GetCmdId(), subCmdIndex)) { + return AclDeniedCmd::CMD; + } + } + } + + // key match + if (!HasFlags(static_cast(AclSelectorFlag::ALL_KEYS)) && !keys.empty() && !cmd->hasFlag(kCmdFlagsPubSub)) { + for (const auto& key : keys) { + // if the key is empty, skip, because some command keys for write categories are empty + if (!key.empty() && !CheckKey(key, cmd->flag())) { + if (errKey) { + *errKey = key; + } + return AclDeniedCmd::KEY; + } + } + } + + // channel match + if (!HasFlags(static_cast(AclSelectorFlag::ALL_CHANNELS)) && cmd->hasFlag(kCmdFlagsPubSub)) { + bool isPattern = cmd->name() == kCmdNamePSubscribe || cmd->name() == kCmdNamePUnSubscribe; + for (const auto& key : keys) { + if (!CheckChannel(key, isPattern)) { + if (errKey) { + *errKey = key; + } + return AclDeniedCmd::CHANNEL; + } + } + } + return AclDeniedCmd::OK; +} + +bool AclSelector::EqualChannel(const std::vector& allChannel) { + for (const auto& item : channels_) { + if (std::count(allChannel.begin(), allChannel.end(), item) == 0) { + return false; + } + } + return true; +} + +void AclSelector::DescribeSelectorCommandRules(std::string* str) { + allowedCommands_.test(USER_COMMAND_BITS_COUNT - 1) ? str->append(" +@all") : str->append(" -@all"); + + // Category + if (!commandRules_.empty()) { + str->append(" "); + str->append(commandRules_); + } +} + +pstd::Status AclSelector::SetCommandOp(const std::string& op, bool allow) { + std::string _op(op.data() + 1); + pstd::StringToLower(_op); + if (_op.find('|') == std::string::npos) { + auto cmd = g_pika_cmd_table_manager->GetCmd(_op); + if (!cmd) { + return pstd::Status::Error("Unknown command or category name in ACL"); + } + ChangeSelector(cmd, allow); + return pstd::Status::OK(); + } else { + /* Split the command and subcommand parts. */ + std::vector cmds; + pstd::StringSplit(_op, '|', cmds); + + /* The subcommand cannot be empty, so things like CONFIG| + * are syntax errors of course. */ + if (cmds.size() != 2) { + return pstd::Status::Error("Allowing first-arg of a subcommand is not supported"); + } + + auto parentCmd = g_pika_cmd_table_manager->GetCmd(cmds[0]); + if (!parentCmd) { + return pstd::Status::Error("Unknown command or category name in ACL"); + } + + return ChangeSelector(parentCmd, cmds[1], allow); + + // not support Redis ACL `first-arg` feature + } +} + +void AclSelector::UpdateCommonRule(const std::string& rule, bool allow) { + std::string _rule(rule); + pstd::StringToLower(_rule); + RemoveCommonRule(_rule); + if (commandRules_.empty()) { + commandRules_ += allow ? "+" : "-"; + } else { + commandRules_ += allow ? " +" : " -"; + } + commandRules_ += _rule; +} + +void AclSelector::RemoveCommonRule(const std::string& rule) { + if (commandRules_.empty()) { + return; + } + + const size_t ruleLen = rule.size(); + + size_t start = 0; + while (true) { + start = commandRules_.find(rule, start); + if (start == std::string::npos) { + return; + } + + size_t delNum = 0; // the length to be deleted this time + if (start + ruleLen >= commandRules_.size()) { // the remaining commandRule == rule, delete to end + delNum = ruleLen; + --start; + ++delNum; + } else { + if (commandRules_[start + ruleLen] == ' ') { + delNum = ruleLen + 1; + } else if (commandRules_[start + ruleLen] == '|') { + size_t end = commandRules_.find(' ', start); // find next ' ' + if (end == std::string::npos) { // not find ' ', delete to end + delNum = commandRules_.size() - start; + --start; + ++delNum; + } else { + delNum = end + 1 - start; + } + } else { + start += ruleLen; + continue; // not match + } + } + + if (start > 0) { // the rule not included '-'/'+', but need delete need + --start; + ++delNum; // star position moved one forward So delNum takes +1 + } + + commandRules_.erase(start, delNum); + } +} + +void AclSelector::CleanCommandRule() { commandRules_.clear(); } + +bool AclSelector::CheckKey(const std::string& key, const uint32_t cmdFlag) { + uint32_t selectorFlag = 0; + if (cmdFlag & kCmdFlagsRead) { + selectorFlag |= static_cast(AclPermission::READ); + } + if (cmdFlag & kCmdFlagsWrite) { + selectorFlag |= static_cast(AclPermission::WRITE); + } + if ((selectorFlag & static_cast(AclPermission::WRITE)) && + (selectorFlag & static_cast(AclPermission::READ))) { + selectorFlag |= static_cast(AclPermission::ALL); + } + + for (const auto& item : patterns_) { + if ((item->flags & selectorFlag) != selectorFlag) { + continue; + } + + if (pstd::stringmatchlen(item->pattern.data(), static_cast(item->pattern.size()), key.data(), + static_cast(key.size()), 0)) { + return true; + } + } + return false; +} + +bool AclSelector::CheckChannel(const std::string& key, bool isPattern) { + for (const auto& channel : channels_) { + if (isPattern ? (channel == key) + : (pstd::stringmatchlen(channel.data(), static_cast(channel.size()), key.data(), + static_cast(key.size()), 0))) { + return true; + } + } + return false; +} +// class AclSelector end \ No newline at end of file diff --git a/tools/pika_migrate/src/build_version.cc.in b/tools/pika_migrate/src/build_version.cc.in index de52eeaeba..1d341ef321 100644 --- a/tools/pika_migrate/src/build_version.cc.in +++ b/tools/pika_migrate/src/build_version.cc.in @@ -3,8 +3,6 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#include "include/build_version.h" const char* pika_build_git_sha = - "pika_git_sha:@@GIT_SHA@@"; -const char* pika_build_git_date = "pika_build_git_date:@@GIT_DATE_TIME@@"; -const char* pika_build_compile_date = __DATE__; + "pika_git_sha:@PIKA_GIT_SHA@"; +const char* pika_build_compile_date = "@PIKA_BUILD_DATE@"; diff --git a/tools/pika_migrate/src/migrator_thread.cc b/tools/pika_migrate/src/migrator_thread.cc index a7b7122c51..ba41515f5f 100644 --- a/tools/pika_migrate/src/migrator_thread.cc +++ b/tools/pika_migrate/src/migrator_thread.cc @@ -9,14 +9,11 @@ #include #include +#define GLOG_USE_GLOG_EXPORT #include -#include "blackwidow/blackwidow.h" -#include "src/redis_strings.h" -#include "src/redis_lists.h" -#include "src/redis_hashes.h" -#include "src/redis_sets.h" -#include "src/redis_zsets.h" +#include "storage/storage.h" +#include "storage/src/redis.h" #include "src/scope_snapshot.h" #include "src/strings_value_format.h" @@ -30,8 +27,6 @@ MigratorThread::~MigratorThread() { } void MigratorThread::MigrateStringsDB() { - blackwidow::BlackWidow *bw = (blackwidow::BlackWidow*)(db_); - int64_t scan_batch_num = g_pika_conf->sync_batch_num() * 10; if (MAX_BATCH_NUM < scan_batch_num) { if (g_pika_conf->sync_batch_num() < MAX_BATCH_NUM) { @@ -43,22 +38,21 @@ void MigratorThread::MigrateStringsDB() { int64_t ttl = -1; int64_t cursor = 0; - blackwidow::Status s; + storage::Status s; std::string value; std::vector keys; - std::map type_timestamp; - std::map type_status; + int64_t timestamp; while (true) { - cursor = bw->Scan(blackwidow::DataType::kStrings, cursor, "*", scan_batch_num, &keys); + cursor = storage_->Scan(storage::DataType::kStrings, cursor, "*", scan_batch_num, &keys); for (const auto& key : keys) { - s = bw->Get(key, &value); + s = storage_->Get(key, &value); if (!s.ok()) { LOG(WARNING) << "get " << key << " error: " << s.ToString(); continue; } - pink::RedisCmdArgsType argv; + net::RedisCmdArgsType argv; std::string cmd; argv.push_back("SET"); @@ -66,10 +60,9 @@ void MigratorThread::MigrateStringsDB() { argv.push_back(value); ttl = -1; - type_status.clear(); - type_timestamp = bw->TTL(key, &type_status); - if (type_timestamp[blackwidow::kStrings] != -2) { - ttl = type_timestamp[blackwidow::kStrings]; + timestamp = storage_->TTL(key); + if (timestamp != -2) { + ttl = timestamp; } if (ttl > 0) { @@ -77,7 +70,7 @@ void MigratorThread::MigrateStringsDB() { argv.push_back(std::to_string(ttl)); } - pink::SerializeRedisCommand(argv, &cmd); + net::SerializeRedisCommand(argv, &cmd); PlusNum(); DispatchKey(cmd, key); } @@ -89,8 +82,6 @@ void MigratorThread::MigrateStringsDB() { } void MigratorThread::MigrateListsDB() { - blackwidow::BlackWidow *bw = (blackwidow::BlackWidow*)(db_); - int64_t scan_batch_num = g_pika_conf->sync_batch_num() * 10; if (MAX_BATCH_NUM < scan_batch_num) { if (g_pika_conf->sync_batch_num() < MAX_BATCH_NUM) { @@ -102,18 +93,17 @@ void MigratorThread::MigrateListsDB() { int64_t ttl = -1; int64_t cursor = 0; - blackwidow::Status s; + storage::Status s; std::vector keys; - std::map type_timestamp; - std::map type_status; + int64_t timestamp; while (true) { - cursor = bw->Scan(blackwidow::DataType::kLists, cursor, "*", scan_batch_num, &keys); + cursor = storage_->Scan(storage::DataType::kLists, cursor, "*", scan_batch_num, &keys); for (const auto& key : keys) { int64_t pos = 0; std::vector nodes; - blackwidow::Status s = bw->LRange(key, pos, pos + g_pika_conf->sync_batch_num() - 1, &nodes); + storage::Status s = storage_->LRange(key, pos, pos + g_pika_conf->sync_batch_num() - 1, &nodes); if (!s.ok()) { LOG(WARNING) << "db->LRange(key:" << key << ", pos:" << pos << ", batch size: " << g_pika_conf->sync_batch_num() << ") = " << s.ToString(); @@ -121,7 +111,7 @@ void MigratorThread::MigrateListsDB() { } while (s.ok() && !should_exit_ && !nodes.empty()) { - pink::RedisCmdArgsType argv; + net::RedisCmdArgsType argv; std::string cmd; argv.push_back("RPUSH"); @@ -130,13 +120,13 @@ void MigratorThread::MigrateListsDB() { argv.push_back(node); } - pink::SerializeRedisCommand(argv, &cmd); + net::SerializeRedisCommand(argv, &cmd); PlusNum(); DispatchKey(cmd, key); pos += g_pika_conf->sync_batch_num(); nodes.clear(); - s = bw->LRange(key, pos, pos + g_pika_conf->sync_batch_num() - 1, &nodes); + s = storage_->LRange(key, pos, pos + g_pika_conf->sync_batch_num() - 1, &nodes); if (!s.ok()) { LOG(WARNING) << "db->LRange(key:" << key << ", pos:" << pos << ", batch size:" << g_pika_conf->sync_batch_num() << ") = " << s.ToString(); @@ -144,21 +134,20 @@ void MigratorThread::MigrateListsDB() { } ttl = -1; - type_status.clear(); - type_timestamp = bw->TTL(key, &type_status); - if (type_timestamp[blackwidow::kLists] != -2) { - ttl = type_timestamp[blackwidow::kLists]; + timestamp = storage_->TTL(key); + if (timestamp != -2) { + ttl = timestamp; } if (s.ok() && ttl > 0) { - pink::RedisCmdArgsType argv; + net::RedisCmdArgsType argv; std::string cmd; argv.push_back("EXPIRE"); argv.push_back(key); argv.push_back(std::to_string(ttl)); - pink::SerializeRedisCommand(argv, &cmd); + net::SerializeRedisCommand(argv, &cmd); PlusNum(); DispatchKey(cmd, key); } @@ -171,8 +160,6 @@ void MigratorThread::MigrateListsDB() { } void MigratorThread::MigrateHashesDB() { - blackwidow::BlackWidow *bw = (blackwidow::BlackWidow*)(db_); - int64_t scan_batch_num = g_pika_conf->sync_batch_num() * 10; if (MAX_BATCH_NUM < scan_batch_num) { if (g_pika_conf->sync_batch_num() < MAX_BATCH_NUM) { @@ -184,17 +171,16 @@ void MigratorThread::MigrateHashesDB() { int64_t ttl = -1; int64_t cursor = 0; - blackwidow::Status s; + storage::Status s; std::vector keys; - std::map type_timestamp; - std::map type_status; + int64_t timestamp; while (true) { - cursor = bw->Scan(blackwidow::DataType::kHashes, cursor, "*", scan_batch_num, &keys); + cursor = storage_->Scan(storage::DataType::kHashes, cursor, "*", scan_batch_num, &keys); for (const auto& key : keys) { - std::vector fvs; - blackwidow::Status s = bw->HGetall(key, &fvs); + std::vector fvs; + storage::Status s = storage_->HGetall(key, &fvs); if (!s.ok()) { LOG(WARNING) << "db->HGetall(key:" << key << ") = " << s.ToString(); continue; @@ -202,7 +188,7 @@ void MigratorThread::MigrateHashesDB() { auto it = fvs.begin(); while (!should_exit_ && it != fvs.end()) { - pink::RedisCmdArgsType argv; + net::RedisCmdArgsType argv; std::string cmd; argv.push_back("HMSET"); @@ -214,27 +200,26 @@ void MigratorThread::MigrateHashesDB() { argv.push_back(it->value); } - pink::SerializeRedisCommand(argv, &cmd); + net::SerializeRedisCommand(argv, &cmd); PlusNum(); DispatchKey(cmd, key); } ttl = -1; - type_status.clear(); - type_timestamp = bw->TTL(key, &type_status); - if (type_timestamp[blackwidow::kHashes] != -2) { - ttl = type_timestamp[blackwidow::kHashes]; + timestamp = storage_->TTL(key); + if (timestamp != -2) { + ttl = timestamp; } if (s.ok() && ttl > 0) { - pink::RedisCmdArgsType argv; + net::RedisCmdArgsType argv; std::string cmd; argv.push_back("EXPIRE"); argv.push_back(key); argv.push_back(std::to_string(ttl)); - pink::SerializeRedisCommand(argv, &cmd); + net::SerializeRedisCommand(argv, &cmd); PlusNum(); DispatchKey(cmd, key); } @@ -247,8 +232,6 @@ void MigratorThread::MigrateHashesDB() { } void MigratorThread::MigrateSetsDB() { - blackwidow::BlackWidow *bw = (blackwidow::BlackWidow*)(db_); - int64_t scan_batch_num = g_pika_conf->sync_batch_num() * 10; if (MAX_BATCH_NUM < scan_batch_num) { if (g_pika_conf->sync_batch_num() < MAX_BATCH_NUM) { @@ -260,17 +243,16 @@ void MigratorThread::MigrateSetsDB() { int64_t ttl = -1; int64_t cursor = 0; - blackwidow::Status s; + storage::Status s; std::vector keys; - std::map type_timestamp; - std::map type_status; + int64_t timestamp; while (true) { - cursor = bw->Scan(blackwidow::DataType::kSets, cursor, "*", scan_batch_num, &keys); + cursor = storage_->Scan(storage::DataType::kSets, cursor, "*", scan_batch_num, &keys); for (const auto& key : keys) { std::vector members; - blackwidow::Status s = bw->SMembers(key, &members); + storage::Status s = storage_->SMembers(key, &members); if (!s.ok()) { LOG(WARNING) << "db->SMembers(key:" << key << ") = " << s.ToString(); continue; @@ -278,7 +260,7 @@ void MigratorThread::MigrateSetsDB() { auto it = members.begin(); while (!should_exit_ && it != members.end()) { std::string cmd; - pink::RedisCmdArgsType argv; + net::RedisCmdArgsType argv; argv.push_back("SADD"); argv.push_back(key); @@ -288,27 +270,26 @@ void MigratorThread::MigrateSetsDB() { argv.push_back(*it); } - pink::SerializeRedisCommand(argv, &cmd); + net::SerializeRedisCommand(argv, &cmd); PlusNum(); DispatchKey(cmd, key); } ttl = -1; - type_status.clear(); - type_timestamp = bw->TTL(key, &type_status); - if (type_timestamp[blackwidow::kSets] != -2) { - ttl = type_timestamp[blackwidow::kSets]; + timestamp = storage_->TTL(key); + if (timestamp != -2) { + ttl = timestamp; } if (s.ok() && ttl > 0) { - pink::RedisCmdArgsType argv; + net::RedisCmdArgsType argv; std::string cmd; argv.push_back("EXPIRE"); argv.push_back(key); argv.push_back(std::to_string(ttl)); - pink::SerializeRedisCommand(argv, &cmd); + net::SerializeRedisCommand(argv, &cmd); PlusNum(); DispatchKey(cmd, key); } @@ -321,8 +302,6 @@ void MigratorThread::MigrateSetsDB() { } void MigratorThread::MigrateZsetsDB() { - blackwidow::BlackWidow *bw = (blackwidow::BlackWidow*)(db_); - int64_t scan_batch_num = g_pika_conf->sync_batch_num() * 10; if (MAX_BATCH_NUM < scan_batch_num) { if (g_pika_conf->sync_batch_num() < MAX_BATCH_NUM) { @@ -334,24 +313,23 @@ void MigratorThread::MigrateZsetsDB() { int64_t ttl = -1; int64_t cursor = 0; - blackwidow::Status s; + storage::Status s; std::vector keys; - std::map type_timestamp; - std::map type_status; + int64_t timestamp; while (true) { - cursor = bw->Scan(blackwidow::DataType::kZSets, cursor, "*", scan_batch_num, &keys); + cursor = storage_->Scan(storage::DataType::kZSets, cursor, "*", scan_batch_num, &keys); for (const auto& key : keys) { - std::vector score_members; - blackwidow::Status s = bw->ZRange(key, 0, -1, &score_members); + std::vector score_members; + storage::Status s = storage_->ZRange(key, 0, -1, &score_members); if (!s.ok()) { LOG(WARNING) << "db->ZRange(key:" << key << ") = " << s.ToString(); continue; } auto it = score_members.begin(); while (!should_exit_ && it != score_members.end()) { - pink::RedisCmdArgsType argv; + net::RedisCmdArgsType argv; std::string cmd; argv.push_back("ZADD"); @@ -363,27 +341,107 @@ void MigratorThread::MigrateZsetsDB() { argv.push_back(it->member); } - pink::SerializeRedisCommand(argv, &cmd); + net::SerializeRedisCommand(argv, &cmd); PlusNum(); DispatchKey(cmd, key); } ttl = -1; - type_status.clear(); - type_timestamp = bw->TTL(key, &type_status); - if (type_timestamp[blackwidow::kZSets] != -2) { - ttl = type_timestamp[blackwidow::kZSets]; + timestamp = storage_->TTL(key); + if (timestamp != -2) { + ttl = timestamp; } if (s.ok() && ttl > 0) { - pink::RedisCmdArgsType argv; + net::RedisCmdArgsType argv; std::string cmd; argv.push_back("EXPIRE"); argv.push_back(key); argv.push_back(std::to_string(ttl)); - pink::SerializeRedisCommand(argv, &cmd); + net::SerializeRedisCommand(argv, &cmd); + PlusNum(); + DispatchKey(cmd, key); + } + } + + if (!cursor) { + break; + } + } +} + +void MigratorThread::MigrateStreamsDB() { + int64_t scan_batch_num = g_pika_conf->sync_batch_num() * 10; + if (MAX_BATCH_NUM < scan_batch_num) { + if (g_pika_conf->sync_batch_num() < MAX_BATCH_NUM) { + scan_batch_num = MAX_BATCH_NUM; + } else { + scan_batch_num = g_pika_conf->sync_batch_num() * 2; + } + } + + int64_t ttl = -1; + int64_t cursor = 0; + storage::Status s; + std::vector keys; + int64_t timestamp; + + while (true) { + cursor = storage_->Scan(storage::DataType::kStreams, cursor, "*", scan_batch_num, &keys); + + for (const auto& key : keys) { + std::vector id_message; + storage::StreamScanArgs arg; + storage::StreamUtils::StreamParseIntervalId("-", arg.start_sid, &arg.start_ex, 0); + storage::StreamUtils::StreamParseIntervalId("+", arg.end_sid, &arg.end_ex, UINT64_MAX); + + storage::Status s = storage_->XRange(key, arg, id_message); + if (!s.ok()) { + LOG(WARNING) << "db->XRange(key:" << key << ") = " << s.ToString(); + continue; + } + auto it = id_message.begin(); + while (!should_exit_ && it != id_message.end()) { + net::RedisCmdArgsType argv; + std::string cmd; + + argv.push_back("XADD"); + argv.push_back(key); + for (int idx = 0; + idx < g_pika_conf->sync_batch_num() && !should_exit_ && it != id_message.end(); + idx++, it++) { + std::vector message; + storage::StreamUtils::DeserializeMessage(it->value, message); + storage::streamID sid; + sid.DeserializeFrom(it->field); + argv.push_back(sid.ToString()); + for (const auto& m : message) { + argv.push_back(m); + } + } + + net::SerializeRedisCommand(argv, &cmd); + PlusNum(); + DispatchKey(cmd, key); + } + + ttl = -1; + timestamp = storage_->TTL(key); + if (timestamp != -2) { + ttl = timestamp; + } + + if (s.ok() && ttl > 0) { + net::RedisCmdArgsType argv; + std::string cmd; + + argv.push_back("EXPIRE"); + argv.push_back(key); + argv.push_back(std::to_string(ttl)); + + net::SerializeRedisCommand(argv, &cmd); PlusNum(); DispatchKey(cmd, key); } @@ -397,31 +455,35 @@ void MigratorThread::MigrateZsetsDB() { void MigratorThread::MigrateDB() { switch (int(type_)) { - case int(blackwidow::kStrings) : { + case int(storage::DataType::kStrings) : { MigrateStringsDB(); break; } - case int(blackwidow::kLists) : { + case int(storage::DataType::kLists) : { MigrateListsDB(); break; } - case int(blackwidow::kHashes) : { + case int(storage::DataType::kHashes) : { MigrateHashesDB(); break; } - case int(blackwidow::kSets) : { + case int(storage::DataType::kSets) : { MigrateSetsDB(); break; } - case int(blackwidow::kZSets) : { + case int(storage::DataType::kZSets) : { MigrateZsetsDB(); break; } + case int(storage::DataType::kStreams) : { + MigrateStreamsDB(); + break; + } default: { LOG(WARNING) << "illegal db type " << type_; break; @@ -440,28 +502,32 @@ void MigratorThread::DispatchKey(const std::string &command, const std::string& const char* GetDBTypeString(int type) { switch (type) { - case int(blackwidow::kStrings) : { - return "blackwidow::kStrings"; + case int(storage::DataType::kStrings) : { + return "storage::kStrings"; + } + + case int(storage::DataType::kLists) : { + return "storage::kLists"; } - case int(blackwidow::kLists) : { - return "blackwidow::kLists"; + case int(storage::DataType::kHashes) : { + return "storage::kHashes"; } - case int(blackwidow::kHashes) : { - return "blackwidow::kHashes"; + case int(storage::DataType::kSets) : { + return "storage::kSets"; } - case int(blackwidow::kSets) : { - return "blackwidow::kSets"; + case int(storage::DataType::kZSets) : { + return "storage::kZSets"; } - case int(blackwidow::kZSets) : { - return "blackwidow::kZSets"; + case int(storage::DataType::kStreams) : { + return "storage::kStreams"; } default: { - return "blackwidow::Unknown"; + return "storage::Unknown"; } } } diff --git a/tools/pika_migrate/src/pika.cc b/tools/pika_migrate/src/pika.cc index 0408752dd9..a530e3fbda 100644 --- a/tools/pika_migrate/src/pika.cc +++ b/tools/pika_migrate/src/pika.cc @@ -3,51 +3,73 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#include #include #include +#include +#include -#include "slash/include/env.h" -#include "include/pika_rm.h" -#include "include/pika_server.h" -#include "include/pika_command.h" -#include "include/pika_conf.h" +#include "net/include/net_stats.h" +#include "pstd/include/pika_codis_slot.h" #include "include/pika_define.h" -#include "include/pika_version.h" +#include "pstd/include/pstd_defer.h" +#include "include/pika_conf.h" +#include "pstd/include/env.h" #include "include/pika_cmd_table_manager.h" +#include "include/pika_slot_command.h" +#include "include/build_version.h" +#include "include/pika_command.h" +#include "include/pika_server.h" +#include "include/pika_version.h" +#include "include/pika_rm.h" -#ifdef TCMALLOC_EXTENSION -#include -#endif +std::unique_ptr g_pika_conf; +// todo : change to unique_ptr will coredump +PikaServer* g_pika_server = nullptr; +std::unique_ptr g_pika_rm; -PikaConf* g_pika_conf; -PikaServer* g_pika_server; -PikaReplicaManager* g_pika_rm; +std::unique_ptr g_pika_cmd_table_manager; -PikaCmdTableManager* g_pika_cmd_table_manager; +extern std::unique_ptr g_network_statistic; static void version() { - char version[32]; - snprintf(version, sizeof(version), "%d.%d.%d", PIKA_MAJOR, - PIKA_MINOR, PIKA_PATCH); - printf("-----------Pika server %s ----------\n", version); + char version[32]; + snprintf(version, sizeof(version), "%d.%d.%d", PIKA_MAJOR, PIKA_MINOR, PIKA_PATCH); + std::cout << "-----------Pika server----------" << std::endl; + std::cout << "pika_version: " << version << std::endl; + std::cout << pika_build_git_sha << std::endl; + std::cout << "pika_build_compile_date: " << pika_build_compile_date << std::endl; + // fake version for client SDK + std::cout << "redis_version: " << version << std::endl; +} + +static void PrintPikaLogo() { + printf(" ............. .... ..... ..... ..... \n" + " ################# #### ##### ##### ####### \n" + " #### ##### #### ##### ##### ######### \n" + " #### ##### #### ##### ##### #### ##### \n" + " #### ##### #### ##### ##### #### ##### \n" + " ################ #### ##### ##### #### ##### \n" + " #### #### ##### ##### ################# \n" + " #### #### ##### ###### ##### ##### \n" + " #### #### ##### ###### ##### ##### \n"); } static void PikaConfInit(const std::string& path) { printf("path : %s\n", path.c_str()); - g_pika_conf = new PikaConf(path); + g_pika_conf = std::make_unique(path); if (g_pika_conf->Load() != 0) { LOG(FATAL) << "pika load conf error"; } version(); printf("-----------Pika config list----------\n"); g_pika_conf->DumpConf(); + PrintPikaLogo(); printf("-----------Pika config end----------\n"); } static void PikaGlogInit() { - if (!slash::FileExists(g_pika_conf->log_path())) { - slash::CreatePath(g_pika_conf->log_path()); + if (!pstd::FileExists(g_pika_conf->log_path())) { + pstd::CreatePath(g_pika_conf->log_path()); } if (!g_pika_conf->daemonize()) { @@ -61,7 +83,9 @@ static void PikaGlogInit() { } static void daemonize() { - if (fork() != 0) exit(0); /* parent exits */ + if (fork()) { + exit(0); /* parent exits */ + } setsid(); /* create a new session */ } @@ -75,21 +99,20 @@ static void close_std() { } } -static void create_pid_file(void) { +static void create_pid_file() { /* Try to write the pid file in a best-effort way. */ std::string path(g_pika_conf->pidfile()); size_t pos = path.find_last_of('/'); if (pos != std::string::npos) { - // mkpath(path.substr(0, pos).c_str(), 0755); - slash::CreateDir(path.substr(0, pos)); + pstd::CreateDir(path.substr(0, pos)); } else { path = kPikaPidFile; } - FILE *fp = fopen(path.c_str(), "w"); + FILE* fp = fopen(path.c_str(), "w"); if (fp) { - fprintf(fp,"%d\n",(int)getpid()); + fprintf(fp, "%d\n", static_cast(getpid())); fclose(fp); } } @@ -107,31 +130,29 @@ static void PikaSignalSetup() { signal(SIGTERM, &IntSigHandle); } -static void usage() -{ - char version[32]; - snprintf(version, sizeof(version), "%d.%d.%d", PIKA_MAJOR, - PIKA_MINOR, PIKA_PATCH); - fprintf(stderr, - "Pika module %s\n" - "usage: pika [-hv] [-c conf/file]\n" - "\t-h -- show this help\n" - "\t-c conf/file -- config file \n" - " example: ./output/bin/pika -c ./conf/pika.conf\n", - version - ); +static void usage() { + char version[32]; + snprintf(version, sizeof(version), "%d.%d.%d", PIKA_MAJOR, PIKA_MINOR, PIKA_PATCH); + fprintf(stderr, + "Pika module %s\n" + "usage: pika [-hv] [-c conf/file]\n" + "\t-h -- show this help\n" + "\t-c conf/file -- config file \n" + "\t-v -- show version\n" + " example: ./output/bin/pika -c ./conf/pika.conf\n", + version); } -int main(int argc, char *argv[]) { +int main(int argc, char* argv[]) { if (argc != 2 && argc != 3) { usage(); exit(-1); } bool path_opt = false; - char c; + signed char c; char path[1024]; - while (-1 != (c = getopt(argc, argv, "c:hv"))) { + while (-1 != (c = static_cast(getopt(argc, argv, "c:hv")))) { switch (c) { case 'c': snprintf(path, 1024, "%s", optarg); @@ -149,14 +170,13 @@ int main(int argc, char *argv[]) { } } - if (path_opt == false) { - fprintf (stderr, "Please specify the conf file path\n" ); + if (!path_opt) { + fprintf(stderr, "Please specify the conf file path\n"); usage(); exit(-1); } -#ifdef TCMALLOC_EXTENSION - MallocExtension::instance()->Initialize(); -#endif + g_pika_cmd_table_manager = std::make_unique(); + g_pika_cmd_table_manager->InitCmdTable(); PikaConfInit(path); rlimit limit; @@ -168,9 +188,12 @@ int main(int argc, char *argv[]) { limit.rlim_cur = maxfiles; limit.rlim_max = maxfiles; if (setrlimit(RLIMIT_NOFILE, &limit) != -1) { - LOG(WARNING) << "your 'limit -n ' of " << old_limit << " is not enough for Redis to start. pika have successfully reconfig it to " << limit.rlim_cur; + LOG(WARNING) << "your 'limit -n ' of " << old_limit + << " is not enough for Redis to start. pika have successfully reconfig it to " << limit.rlim_cur; } else { - LOG(FATAL) << "your 'limit -n ' of " << old_limit << " is not enough for Redis to start. pika can not reconfig it(" << strerror(errno) << "), do it by yourself"; + LOG(FATAL) << "your 'limit -n ' of " << old_limit + << " is not enough for Redis to start. pika can not reconfig it(" << strerror(errno) + << "), do it by yourself"; } } @@ -180,35 +203,56 @@ int main(int argc, char *argv[]) { create_pid_file(); } - PikaGlogInit(); PikaSignalSetup(); LOG(INFO) << "Server at: " << path; g_pika_server = new PikaServer(); - g_pika_rm = new PikaReplicaManager(); - g_pika_cmd_table_manager = new PikaCmdTableManager(); + g_pika_rm = std::make_unique(); + g_network_statistic = std::make_unique(); + g_pika_server->InitDBStruct(); + //the cmd table of g_pika_cmd_table_manager must be inited before calling PikaServer::InitStatistic(CmdTable* ) + g_pika_server->InitStatistic(g_pika_cmd_table_manager->GetCmdTable()); + auto status = g_pika_server->InitAcl(); + if (!status.ok()) { + LOG(FATAL) << status.ToString(); + } if (g_pika_conf->daemonize()) { close_std(); } + DEFER { + delete g_pika_server; + g_pika_server = nullptr; + g_pika_rm.reset(); + g_pika_cmd_table_manager.reset(); + g_network_statistic.reset(); + ::google::ShutdownGoogleLogging(); + g_pika_conf.reset(); + }; + + // wash data if necessary + if (g_pika_conf->wash_data()) { + auto dbs = g_pika_server->GetDB(); + for (auto& kv : dbs) { + if (!kv.second->WashData()) { + LOG(FATAL) << "write batch error in WashData"; + return 1; + } + } + } + g_pika_rm->Start(); g_pika_server->Start(); - + if (g_pika_conf->daemonize()) { unlink(g_pika_conf->pidfile().c_str()); } // stop PikaReplicaManager first,avoid internal threads - // may references to dead PikaServer + // may reference to dead PikaServer g_pika_rm->Stop(); - delete g_pika_server; - delete g_pika_rm; - delete g_pika_cmd_table_manager; - ::google::ShutdownGoogleLogging(); - delete g_pika_conf; - return 0; } diff --git a/tools/pika_migrate/src/pika_acl.cc b/tools/pika_migrate/src/pika_acl.cc new file mode 100644 index 0000000000..b6fe3375b7 --- /dev/null +++ b/tools/pika_migrate/src/pika_acl.cc @@ -0,0 +1,328 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include + +#include "include/pika_acl.h" +#include "include/pika_client_conn.h" +#include "include/pika_cmd_table_manager.h" +#include "include/pika_command.h" + +const static int AclGenPassMaxBit = 4096; + +extern std::unique_ptr g_pika_cmd_table_manager; + +void PikaAclCmd::Do() { + if (subCmd_ == "cat") { + Cat(); + } else if (subCmd_ == "deluser") { + DelUser(); + } else if (subCmd_ == "dryrun") { + DryRun(); + } else if (subCmd_ == "genpass") { + GenPass(); + } else if (subCmd_ == "getuser") { + GetUser(); + } else if (subCmd_ == "list") { + List(); + } else if (subCmd_ == "load") { + Load(); + } else if (subCmd_ == "log") { + Log(); + } else if (subCmd_ == "save") { + Save(); + } else if (subCmd_ == "setuser") { + SetUser(); + } else if (subCmd_ == "users") { + Users(); + } else if (subCmd_ == "whoami") { + WhoAmI(); + } else if (subCmd_ == "help") { + Help(); + } else { + res_.SetRes(CmdRes::kSyntaxErr, KCmdNameAcl); + return; + } +} + +void PikaAclCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, KCmdNameAcl); + return; + } + + subCmd_ = argv_[1]; + pstd::StringToLower(subCmd_); + + if (argv_.size() < 3) { + if (subCmd_ == "setuser" || subCmd_ == "deluser" || subCmd_ == "getuser") { + res_.SetRes(CmdRes::kWrongNum, fmt::format("'acl|{}'", subCmd_)); + return; + } + } + + if (subCmd_ == "dryrun" && argv_.size() < 4) { + res_.SetRes(CmdRes::kWrongNum, "'acl|dryrun'"); + return; + } + if (subCmd_ == "log" && argv_.size() != 2 && argv_.size() != 3) { + res_.SetRes(CmdRes::kWrongNum, "'acl|log'"); + return; + } + + if (subCmd_ == "save" || subCmd_ == "load") { + if (g_pika_conf->acl_file().empty()) { + res().SetRes(CmdRes::kErrOther, + "This Pika is not configured to use an ACL file. You may want to specify users via the " + "ACL SETUSER command and then issue a CONFIG REWRITE (assuming you have a Redis configuration file " + "set) in order to store users in the Pika configuration."); + return; + } + } +} + +void PikaAclCmd::Cat() { + if (argv_.size() > 3) { + res().SetRes(CmdRes::kErrOther, "unknown subcommand or wrong number of arguments for 'CAT'"); + return; + } + if (argv_.size() == 2) { + res().AppendStringVector(Acl::GetAllCategoryName()); + return; + } + auto category = Acl::GetCommandCategoryFlagByName(argv_[2]); + if (category == 0) { + res().SetRes(CmdRes::kErrOther, fmt::format("Unknown category '{}'", argv_[2])); + return; + } + res().AppendStringVector(g_pika_cmd_table_manager->GetAclCategoryCmdNames(category)); +} + +void PikaAclCmd::DelUser() { + for (auto it = argv_.begin() + 2; it != argv_.end(); ++it) { + if (it->data() == Acl::DefaultUser) { + res().SetRes(CmdRes::kErrOther, "The 'default' user cannot be removed"); + return; + } + if (it->data() == Acl::DefaultLimitUser) { + res().SetRes(CmdRes::kErrOther, "The 'limit' user cannot be removed"); + return; + } + } + + std::vector userNames(argv_.begin() + 2, argv_.end()); + auto delUserNames = g_pika_server->Acl()->DeleteUser(userNames); + res().AppendInteger(static_cast(delUserNames.size())); + + g_pika_server->AllClientUnAuth(delUserNames); +} + +void PikaAclCmd::DryRun() { + auto user = g_pika_server->Acl()->GetUserLock(argv_[2]); + + if (!user) { + res().SetRes(CmdRes::kErrOther, fmt::format("User '{}' not found", argv_[2])); + return; + } + auto cmd = g_pika_cmd_table_manager->GetCmd(argv_[3]); + + if (!cmd) { + res().SetRes(CmdRes::kErrOther, fmt::format("Command '{}' not found", argv_[3])); + return; + } + + PikaCmdArgsType args; + if (argv_.size() > 4) { + args = PikaCmdArgsType(argv_.begin() + 3, argv_.end()); + } + if (!cmd->CheckArg(args.size())) { + res().SetRes(CmdRes::kWrongNum, cmd->name()); + return; + } + + int8_t subCmdIndex = -1; + AclDeniedCmd checkRes = user->CheckUserPermission(cmd, args, subCmdIndex, nullptr); + + switch (checkRes) { + case AclDeniedCmd::OK: + res().SetRes(CmdRes::kOk); + break; + case AclDeniedCmd::CMD: + res().SetRes(CmdRes::kErrOther, + cmd->HasSubCommand() + ? fmt::format("This user has no permissions to run the '{}|{}' command", argv_[3], argv_[4]) + : fmt::format("This user has no permissions to run the '{}' command", argv_[3])); + break; + case AclDeniedCmd::KEY: + res().SetRes(CmdRes::kErrOther, + cmd->HasSubCommand() + ? fmt::format("This user has no permissions to run the '{}|{}' key", argv_[3], argv_[4]) + : fmt::format("This user has no permissions to run the '{}' key", argv_[3])); + break; + case AclDeniedCmd::CHANNEL: + res().SetRes(CmdRes::kErrOther, + cmd->HasSubCommand() + ? fmt::format("This user has no permissions to run the '{}|{}' channel", argv_[3], argv_[4]) + : fmt::format("This user has no permissions to run the '{}' channel", argv_[3])); + break; + case AclDeniedCmd::NUMBER: + res().SetRes(CmdRes::kErrOther, fmt::format("wrong number of arguments for '{}' command", argv_[3])); + break; + default: + break; + } +} + +void PikaAclCmd::GenPass() { + int bits = 256; + if (argv_.size() > 2) { + try { + bits = std::stoi(argv_[2]); + } catch (std::exception& e) { + res().SetRes(CmdRes::kErrOther, fmt::format("Invalid bits value: {}", argv_[2])); + return; + } + } + + if (bits <= 0 || bits > AclGenPassMaxBit) { + res().SetRes( + CmdRes::kErrOther, + fmt::format( + "ACL GENPASS argument must be the number of bits for the output password, a positive number up to 4096 {}", + bits)); + return; + } + + std::string pass = pstd::getRandomHexChars((bits + 3) / 4); + res().AppendString(pass); +} + +void PikaAclCmd::GetUser() { + auto user = g_pika_server->Acl()->GetUserLock(argv_[2]); + + if (!user) { + res().AppendStringLen(-1); + return; + } + + user->GetUserDescribe(&res_); +} + +void PikaAclCmd::List() { + std::vector result; + g_pika_server->Acl()->DescribeAllUser(&result); + + res().AppendStringVector(result); +} + +void PikaAclCmd::Load() { + std::set toUnAuthUsers; + auto status = g_pika_server->Acl()->LoadUserFromFile(&toUnAuthUsers); + if (status.ok()) { + res().SetRes(CmdRes::kOk); + g_pika_server->AllClientUnAuth(toUnAuthUsers); + return; + } + + res().SetRes(CmdRes::kErrOther, status.ToString()); +} + +void PikaAclCmd::Log() { + if (argv_.size() == 2) { + g_pika_server->Acl()->GetLog(-1, &res_); + return; + } + + long count = 0; + if (!strcasecmp(argv_[2].data(), "reset")) { + g_pika_server->Acl()->ResetLog(); + res().SetRes(CmdRes::kOk); + return; + } + if (!pstd::string2int(argv_[2].data(), argv_[2].size(), &count)) { + res().SetRes(CmdRes::kErrOther, fmt::format("Invalid count value: {}", argv_[2])); + return; + } + + g_pika_server->Acl()->GetLog(count, &res_); +} + +void PikaAclCmd::Save() { + auto status = g_pika_server->Acl()->SaveToFile(); + + if (status.ok()) { + res().SetRes(CmdRes::kOk); + } else { + res().SetRes(CmdRes::kErrOther, status.ToString()); + } +} + +void PikaAclCmd::SetUser() { + std::vector rule; + if (argv_.size() > 3) { + rule = std::vector(argv_.begin() + 3, argv_.end()); + } + + if (pstd::isspace(argv_[2])) { + res().SetRes(CmdRes::kErrOther, "Usernames can't contain spaces or null characters"); + return; + } + auto status = g_pika_server->Acl()->SetUser(argv_[2], rule); + if (status.ok()) { + res().SetRes(CmdRes::kOk); + return; + } + LOG(ERROR) << "ACL SETUSER modifier " + status.ToString(); + res().SetRes(CmdRes::kErrOther, "ACL SETUSER modifier " + status.ToString()); +} + +void PikaAclCmd::Users() { res().AppendStringVector(g_pika_server->Acl()->Users()); } + +void PikaAclCmd::WhoAmI() { + std::shared_ptr conn = std::dynamic_pointer_cast(GetConn()); + auto name = conn->UserName(); + + if (name.empty()) { + res().AppendString(Acl::DefaultUser); + } else { + res().AppendString(name); + } +} + +void PikaAclCmd::Help() { + if (argv_.size() > 2) { + res().SetRes(CmdRes::kWrongNum, "acl|help"); + return; + } + const std::vector info = { + "CAT []", + " List all commands that belong to , or all command categories", + " when no category is specified.", + "DELUSER [ ...]", + " Delete a list of users.", + "DRYRUN [ ...]", + " Returns whether the user can execute the given command without executing the command.", + "GETUSER ", + " Get the user's details.", + "GENPASS []", + " Generate a secure 256-bit user password. The optional `bits` argument can", + " be used to specify a different size.", + "LIST", + " Show users details in config file format.", + "LOAD", + " Reload users from the ACL file.", + "LOG [ | RESET]", + " Show the ACL log entries.", + "SAVE", + " Save the current config to the ACL file.", + "SETUSER [ ...]", + " Create or modify a user with the specified attributes.", + "USERS", + " List all the registered usernames.", + "WHOAMI", + " Return the current connection username."}; + + res().AppendStringVector(info); +} diff --git a/tools/pika_migrate/src/pika_admin.cc b/tools/pika_migrate/src/pika_admin.cc index 8424a6e8c0..3c0cf13b11 100644 --- a/tools/pika_migrate/src/pika_admin.cc +++ b/tools/pika_migrate/src/pika_admin.cc @@ -5,30 +5,32 @@ #include "include/pika_admin.h" -#include +#include #include #include -#include "slash/include/rsync.h" +#include +#include -#include "include/pika_conf.h" -#include "include/pika_server.h" +#include + +#include "include/build_version.h" +#include "include/pika_cmd_table_manager.h" #include "include/pika_rm.h" +#include "include/pika_server.h" #include "include/pika_version.h" -#include "include/build_version.h" - -#ifdef TCMALLOC_EXTENSION -#include -#endif +#include "include/pika_conf.h" +#include "pstd/include/rsync.h" +#include "include/throttle.h" +using pstd::Status; -extern PikaServer *g_pika_server; -extern PikaConf *g_pika_conf; -extern PikaReplicaManager *g_pika_rm; +extern PikaServer* g_pika_server; +extern std::unique_ptr g_pika_rm; +extern std::unique_ptr g_pika_cmd_table_manager; -static std::string ConstructPinginPubSubResp(const PikaCmdArgsType &argv) { +static std::string ConstructPinginPubSubResp(const PikaCmdArgsType& argv) { if (argv.size() > 2) { - return "-ERR wrong number of arguments for " + kCmdNamePing + - " command\r\n"; + return "-ERR wrong number of arguments for " + kCmdNamePing + " command\r\n"; } std::stringstream resp; @@ -43,6 +45,53 @@ static std::string ConstructPinginPubSubResp(const PikaCmdArgsType &argv) { return resp.str(); } +static double MethodofCommandStatistics(const uint64_t time_consuming, const uint64_t frequency) { + return (static_cast(time_consuming) / 1000.0) / static_cast(frequency); +} + +static double MethodofTotalTimeCalculation(const uint64_t time_consuming) { + return static_cast(time_consuming) / 1000.0; +} + +enum AuthResult { + OK, + INVALID_PASSWORD, + NO_REQUIRE_PASS, + INVALID_CONN, +}; + +static AuthResult AuthenticateUser(const std::string& cmdName, const std::string& userName, const std::string& pwd, + const std::shared_ptr& conn, bool defaultAuth) { + if (defaultAuth) { + auto defaultUser = g_pika_server->Acl()->GetUserLock(Acl::DefaultUser); + if (defaultUser->HasFlags(static_cast(AclUserFlag::NO_PASS))) { + return AuthResult::NO_REQUIRE_PASS; + } + } + + auto user = g_pika_server->Acl()->Auth(userName, pwd); + + if (!user) { + std::string cInfo; + if (auto ptr = std::dynamic_pointer_cast(conn); ptr) { + ptr->ClientInfoToString(&cInfo, cmdName); + } + g_pika_server->Acl()->AddLogEntry(static_cast(AclDeniedCmd::NO_AUTH), + static_cast(AclLogCtx::TOPLEVEL), userName, "AUTH", cInfo); + return AuthResult::INVALID_PASSWORD; + } + + if (!conn) { + LOG(WARNING) << " weak ptr is empty"; + return AuthResult::INVALID_CONN; + } + std::shared_ptr cli_conn = std::dynamic_pointer_cast(conn); + + cli_conn->DoAuth(user); + + return AuthResult::OK; +} + /* * slaveof no one * slaveof ip port @@ -59,34 +108,30 @@ void SlaveofCmd::DoInitial() { return; } - if (argv_.size() == 3 - && !strcasecmp(argv_[1].data(), "no") - && !strcasecmp(argv_[2].data(), "one")) { - is_noone_ = true; + if (argv_.size() == 3 && (strcasecmp(argv_[1].data(), "no") == 0) && (strcasecmp(argv_[2].data(), "one") == 0)) { + is_none_ = true; return; } - - // self is master of A , want to slavof B - if (g_pika_server->role() & PIKA_ROLE_MASTER) { + // self is master of A , want to slaveof B + if ((g_pika_server->role() & PIKA_ROLE_MASTER) != 0) { res_.SetRes(CmdRes::kErrOther, "already master of others, invalid usage"); return; } master_ip_ = argv_[1]; std::string str_master_port = argv_[2]; - if (!slash::string2l(str_master_port.data(), str_master_port.size(), &master_port_) || master_port_ <= 0) { + if ((pstd::string2int(str_master_port.data(), str_master_port.size(), &master_port_) == 0) || master_port_ <= 0) { res_.SetRes(CmdRes::kInvalidInt); return; } - if ((master_ip_ == "127.0.0.1" || master_ip_ == g_pika_server->host()) - && master_port_ == g_pika_server->port()) { - res_.SetRes(CmdRes::kErrOther, "you fucked up"); + if ((pstd::StringToLower(master_ip_) == "localhost" || master_ip_ == "127.0.0.1" || master_ip_ == g_pika_server->host()) && master_port_ == g_pika_server->port()) { + res_.SetRes(CmdRes::kErrOther, "The master ip:port and the slave ip:port are the same"); return; } if (argv_.size() == 4) { - if (!strcasecmp(argv_[3].data(), "force")) { + if (strcasecmp(argv_[3].data(), "force") == 0) { g_pika_server->SetForceFullSync(true); } else { res_.SetRes(CmdRes::kWrongNum, kCmdNameSlaveof); @@ -94,27 +139,33 @@ void SlaveofCmd::DoInitial() { } } -void SlaveofCmd::Do(std::shared_ptr partition) { +void SlaveofCmd::Do() { // Check if we are already connected to the specified master - if ((master_ip_ == "127.0.0.1" || g_pika_server->master_ip() == master_ip_) - && g_pika_server->master_port() == master_port_) { + if ((master_ip_ == "127.0.0.1" || g_pika_server->master_ip() == master_ip_) && + g_pika_server->master_port() == master_port_) { res_.SetRes(CmdRes::kOk); return; } g_pika_server->RemoveMaster(); - if (is_noone_) { + if (is_none_) { res_.SetRes(CmdRes::kOk); g_pika_conf->SetSlaveof(std::string()); return; } - bool sm_ret = g_pika_server->SetMaster(master_ip_, master_port_); - + /* The return value of the slaveof command OK does not really represent whether + * the data synchronization was successful, but only changes the status of the + * slaveof executor to slave */ + + bool sm_ret = g_pika_server->SetMaster(master_ip_, static_cast(master_port_)); + if (sm_ret) { res_.SetRes(CmdRes::kOk); + g_pika_server->ClearCacheDbAsync(db_); g_pika_conf->SetSlaveof(master_ip_ + ":" + std::to_string(master_port_)); + g_pika_server->SetFirstMetaSync(true); } else { res_.SetRes(CmdRes::kErrOther, "Server is not in correct state for slaveof"); } @@ -131,12 +182,7 @@ void DbSlaveofCmd::DoInitial() { res_.SetRes(CmdRes::kWrongNum, kCmdNameDbSlaveof); return; } - if (!g_pika_conf->classic_mode()) { - res_.SetRes(CmdRes::kErrOther, "DbSlaveof only support on classic mode"); - return; - } - if (g_pika_server->role() ^ PIKA_ROLE_SLAVE - || !g_pika_server->MetaSyncDone()) { + if (((g_pika_server->role() ^ PIKA_ROLE_SLAVE) != 0) || !g_pika_server->MetaSyncDone()) { res_.SetRes(CmdRes::kErrOther, "Not currently a slave"); return; } @@ -147,29 +193,27 @@ void DbSlaveofCmd::DoInitial() { } db_name_ = argv_[1]; - if (!g_pika_server->IsTableExist(db_name_)) { + if (!g_pika_server->IsDBExist(db_name_)) { res_.SetRes(CmdRes::kErrOther, "Invaild db name"); return; } - if (argv_.size() == 3 - && !strcasecmp(argv_[2].data(), "force")) { + if (argv_.size() == 3 && (strcasecmp(argv_[2].data(), "force") == 0)) { force_sync_ = true; return; } if (argv_.size() == 4) { - if (!strcasecmp(argv_[2].data(), "no") - && !strcasecmp(argv_[3].data(), "one")) { - is_noone_ = true; + if ((strcasecmp(argv_[2].data(), "no") == 0) && (strcasecmp(argv_[3].data(), "one") == 0)) { + is_none_ = true; return; } - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &filenum_) || filenum_ < 0) { + if ((pstd::string2int(argv_[2].data(), argv_[2].size(), &filenum_) == 0) || filenum_ < 0) { res_.SetRes(CmdRes::kInvalidInt); return; } - if (!slash::string2l(argv_[3].data(), argv_[3].size(), &offset_) || offset_ < 0) { + if ((pstd::string2int(argv_[3].data(), argv_[3].size(), &offset_) == 0) || offset_ < 0) { res_.SetRes(CmdRes::kInvalidInt); return; } @@ -177,31 +221,26 @@ void DbSlaveofCmd::DoInitial() { } } -void DbSlaveofCmd::Do(std::shared_ptr partition) { - std::shared_ptr slave_partition = - g_pika_rm->GetSyncSlavePartitionByName(PartitionInfo(db_name_,0)); - if (!slave_partition) { +void DbSlaveofCmd::Do() { + std::shared_ptr slave_db = g_pika_rm->GetSyncSlaveDBByName(DBInfo(db_name_)); + if (!slave_db) { res_.SetRes(CmdRes::kErrOther, "Db not found"); return; } Status s; - if (is_noone_) { - // In classic mode a table has only one partition - s = g_pika_rm->SendRemoveSlaveNodeRequest(db_name_, 0); + if (is_none_) { + s = g_pika_rm->SendRemoveSlaveNodeRequest(db_name_); } else { - if (slave_partition->State() == ReplState::kNoConnect - || slave_partition->State() == ReplState::kError) { + if (slave_db->State() == ReplState::kNoConnect || slave_db->State() == ReplState::kError || + slave_db->State() == ReplState::kDBNoConnect) { if (have_offset_) { - std::shared_ptr db_partition = - g_pika_server->GetPartitionByDbName(db_name_); - db_partition->logger()->SetProducerStatus(filenum_, offset_); + std::shared_ptr db = g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name_)); + db->Logger()->SetProducerStatus(filenum_, offset_); } - ReplState state = force_sync_ - ? ReplState::kTryDBSync : ReplState::kTryConnect; - s = g_pika_rm->ActivateSyncSlavePartition( - RmNode(g_pika_server->master_ip(), g_pika_server->master_port(), - db_name_, 0), state); + ReplState state = force_sync_ ? ReplState::kTryDBSync : ReplState::kTryConnect; + s = g_pika_rm->ActivateSyncSlaveDB( + RmNode(g_pika_server->master_ip(), g_pika_server->master_port(), db_name_, 0), state); } } @@ -217,36 +256,53 @@ void AuthCmd::DoInitial() { res_.SetRes(CmdRes::kWrongNum, kCmdNameAuth); return; } - pwd_ = argv_[1]; } -void AuthCmd::Do(std::shared_ptr partition) { - std::string root_password(g_pika_conf->requirepass()); - std::string user_password(g_pika_conf->userpass()); - if (user_password.empty() && root_password.empty()) { - res_.SetRes(CmdRes::kErrOther, "Client sent AUTH, but no password is set"); +void AuthCmd::Do() { + std::shared_ptr conn = GetConn(); + if (!conn) { + res_.SetRes(CmdRes::kErrOther, kCmdNamePing); + LOG(WARNING) << name_ << " weak ptr is empty"; return; } - if (pwd_ == user_password) { - res_.SetRes(CmdRes::kOk, "USER"); - } - if (pwd_ == root_password) { - res_.SetRes(CmdRes::kOk, "ROOT"); + std::string userName = ""; + std::string pwd = ""; + bool defaultAuth = false; + if (argv_.size() == 2) { + pwd = argv_[1]; +// defaultAuth = true; + } else { + userName = argv_[1]; + pwd = argv_[2]; } - if (res_.none()) { - res_.SetRes(CmdRes::kInvalidPwd); - return; + + AuthResult authResult; + if (userName == "") { + // default + authResult = AuthenticateUser(name(), Acl::DefaultUser, pwd, conn, true); + if (authResult != AuthResult::OK && authResult != AuthResult::NO_REQUIRE_PASS) { + // Limit + authResult = AuthenticateUser(name(), Acl::DefaultLimitUser, pwd, conn, defaultAuth); + } + } else { + authResult = AuthenticateUser(name(), userName, pwd, conn, defaultAuth); } - std::shared_ptr conn = GetConn(); - if (!conn) { - res_.SetRes(CmdRes::kErrOther, kCmdNamePing); - LOG(WARNING) << name_ << " weak ptr is empty"; - return; + switch (authResult) { + case AuthResult::INVALID_CONN: + res_.SetRes(CmdRes::kErrOther, kCmdNamePing); + return; + case AuthResult::INVALID_PASSWORD: + res_.AppendContent("-WRONGPASS invalid username-password pair or user is disabled."); + return; + case AuthResult::NO_REQUIRE_PASS: + res_.SetRes(CmdRes::kErrOther, "Client sent AUTH, but no password is set"); + return; + case AuthResult::OK: + break; } - std::shared_ptr cli_conn = std::dynamic_pointer_cast(conn); - cli_conn->auth_stat().ChecknUpdate(res().raw_message()); + res_.SetRes(CmdRes::kOk); } void BgsaveCmd::DoInitial() { @@ -255,28 +311,29 @@ void BgsaveCmd::DoInitial() { return; } if (argv_.size() == 2) { - std::vector tables; - slash::StringSplit(argv_[1], COMMA, tables); - for (const auto& table : tables) { - if (!g_pika_server->IsTableExist(table)) { - res_.SetRes(CmdRes::kInvalidTable, table); + std::vector dbs; + pstd::StringSplit(argv_[1], COMMA, dbs); + for (const auto& db : dbs) { + if (!g_pika_server->IsDBExist(db)) { + res_.SetRes(CmdRes::kInvalidDB, db); return; } else { - bgsave_tables_.insert(table); + bgsave_dbs_.insert(db); } } + } else { + bgsave_dbs_ = g_pika_server->GetAllDBName(); } } -void BgsaveCmd::Do(std::shared_ptr partition) { - g_pika_server->DoSameThingSpecificTable(TaskType::kBgSave, bgsave_tables_); +void BgsaveCmd::Do() { + g_pika_server->DoSameThingSpecificDB(bgsave_dbs_, {TaskType::kBgSave}); LogCommand(); res_.AppendContent("+Background saving started"); } void CompactCmd::DoInitial() { - if (!CheckArg(argv_.size()) - || argv_.size() > 3) { + if (!CheckArg(argv_.size()) || argv_.size() > 3) { res_.SetRes(CmdRes::kWrongNum, kCmdNameCompact); return; } @@ -287,78 +344,94 @@ void CompactCmd::DoInitial() { } if (argv_.size() == 1) { - struct_type_ = "all"; + compact_dbs_ = g_pika_server->GetAllDBName(); } else if (argv_.size() == 2) { - struct_type_ = argv_[1]; - } else if (argv_.size() == 3) { - std::vector tables; - slash::StringSplit(argv_[1], COMMA, tables); - for (const auto& table : tables) { - if (!g_pika_server->IsTableExist(table)) { - res_.SetRes(CmdRes::kInvalidTable, table); + std::vector dbs; + pstd::StringSplit(argv_[1], COMMA, dbs); + for (const auto& db : dbs) { + if (!g_pika_server->IsDBExist(db)) { + res_.SetRes(CmdRes::kInvalidDB, db); return; } else { - compact_tables_.insert(table); + compact_dbs_.insert(db); } } - struct_type_ = argv_[2]; } } -void CompactCmd::Do(std::shared_ptr partition) { - if (!strcasecmp(struct_type_.data(), "all")) { - g_pika_server->DoSameThingSpecificTable(TaskType::kCompactAll, compact_tables_); - } else if (!strcasecmp(struct_type_.data(), "string")) { - g_pika_server->DoSameThingSpecificTable(TaskType::kCompactStrings, compact_tables_); - } else if (!strcasecmp(struct_type_.data(), "hash")) { - g_pika_server->DoSameThingSpecificTable(TaskType::kCompactHashes, compact_tables_); - } else if (!strcasecmp(struct_type_.data(), "set")) { - g_pika_server->DoSameThingSpecificTable(TaskType::kCompactSets, compact_tables_); - } else if (!strcasecmp(struct_type_.data(), "zset")) { - g_pika_server->DoSameThingSpecificTable(TaskType::kCompactZSets, compact_tables_); - } else if (!strcasecmp(struct_type_.data(), "list")) { - g_pika_server->DoSameThingSpecificTable(TaskType::kCompactList, compact_tables_); - } else { - res_.SetRes(CmdRes::kInvalidDbType, struct_type_); +/* + * Because meta-CF stores the meta information of all data structures, + * the compact operation can only operate on all data types without + * specifying data types + */ +void CompactCmd::Do() { + g_pika_server->DoSameThingSpecificDB(compact_dbs_, {TaskType::kCompactAll}); + LogCommand(); + res_.SetRes(CmdRes::kOk); +} + +void CompactRangeCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameCompactRange); + return; + } + + if (g_pika_server->IsKeyScaning()) { + res_.SetRes(CmdRes::kErrOther, "The info keyspace operation is executing, Try again later"); return; } + + std::vector dbs; + pstd::StringSplit(argv_[1], COMMA, dbs); + for (const auto& db : dbs) { + if (!g_pika_server->IsDBExist(db)) { + res_.SetRes(CmdRes::kInvalidDB, db); + return; + } else { + compact_dbs_.insert(db); + } + } + start_key_ = argv_[2]; + end_key_ = argv_[3]; +} + +void CompactRangeCmd::Do() { + g_pika_server->DoSameThingSpecificDB(compact_dbs_, {TaskType::kCompactRangeAll, {start_key_, end_key_}}); LogCommand(); res_.SetRes(CmdRes::kOk); } void PurgelogstoCmd::DoInitial() { - if (!CheckArg(argv_.size()) - || argv_.size() > 3) { + if (!CheckArg(argv_.size()) || argv_.size() > 3) { res_.SetRes(CmdRes::kWrongNum, kCmdNamePurgelogsto); return; } std::string filename = argv_[1]; - if (filename.size() <= kBinlogPrefixLen || - kBinlogPrefix != filename.substr(0, kBinlogPrefixLen)) { + if (filename.size() <= kBinlogPrefixLen || kBinlogPrefix != filename.substr(0, kBinlogPrefixLen)) { res_.SetRes(CmdRes::kInvalidParameter); return; } std::string str_num = filename.substr(kBinlogPrefixLen); int64_t num = 0; - if (!slash::string2l(str_num.data(), str_num.size(), &num) || num < 0) { + if ((pstd::string2int(str_num.data(), str_num.size(), &num) == 0) || num < 0) { res_.SetRes(CmdRes::kInvalidParameter); return; } num_ = num; - table_ = (argv_.size() == 3) ? argv_[2] :g_pika_conf->default_table(); - if (!g_pika_server->IsTableExist(table_)) { - res_.SetRes(CmdRes::kInvalidTable, table_); + db_ = (argv_.size() == 3) ? argv_[2] : g_pika_conf->default_db(); + if (!g_pika_server->IsDBExist(db_)) { + res_.SetRes(CmdRes::kInvalidDB, db_); return; } } -void PurgelogstoCmd::Do(std::shared_ptr partition) { - std::shared_ptr table_partition = g_pika_server->GetTablePartitionById(table_, 0); - if (!table_partition) { - res_.SetRes(CmdRes::kErrOther, "Partition not found"); +void PurgelogstoCmd::Do() { + std::shared_ptr sync_db = g_pika_rm->GetSyncMasterDBByName(DBInfo(db_)); + if (!sync_db) { + res_.SetRes(CmdRes::kErrOther, "DB not found"); } else { - table_partition->PurgeLogs(num_, true); + sync_db->StableLogger()->PurgeStableLogs(num_, true); res_.SetRes(CmdRes::kOk); } } @@ -370,11 +443,11 @@ void PingCmd::DoInitial() { } } -void PingCmd::Do(std::shared_ptr partition) { - std::shared_ptr conn = GetConn(); +void PingCmd::Do() { + std::shared_ptr conn = GetConn(); if (!conn) { res_.SetRes(CmdRes::kErrOther, kCmdNamePing); - LOG(WARNING) << name_ << " weak ptr is empty"; + LOG(WARNING) << name_ << " weak ptr is empty"; return; } std::shared_ptr cli_conn = std::dynamic_pointer_cast(conn); @@ -384,87 +457,171 @@ void PingCmd::Do(std::shared_ptr partition) { } res_.SetRes(CmdRes::kPong); } - void SelectCmd::DoInitial() { if (!CheckArg(argv_.size())) { res_.SetRes(CmdRes::kWrongNum, kCmdNameSelect); return; } - if (g_pika_conf->classic_mode()) { - int index = atoi(argv_[1].data()); - if (std::to_string(index) != argv_[1]) { - res_.SetRes(CmdRes::kInvalidIndex, kCmdNameSelect); - return; - } else if (index < 0 || index >= g_pika_conf->databases()) { - res_.SetRes(CmdRes::kInvalidIndex, kCmdNameSelect + " DB index is out of range"); - return; - } else { - table_name_ = "db" + argv_[1]; - } - } else { - // only pika codis use sharding mode currently, but pika - // codis only support single db, so in sharding mode we - // do no thing in select command - table_name_ = g_pika_conf->default_table(); + db_name_ = "db" + argv_[1]; + db_ = g_pika_server->GetDB(db_name_); + sync_db_ = g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name_)); + int index = atoi(argv_[1].data()); + if (std::to_string(index) != argv_[1]) { + res_.SetRes(CmdRes::kInvalidIndex, kCmdNameSelect); + return; } - if (!g_pika_server->IsTableExist(table_name_)) { - res_.SetRes(CmdRes::kInvalidTable, kCmdNameSelect); + if (index < 0 || index >= g_pika_conf->databases()) { + res_.SetRes(CmdRes::kInvalidIndex, kCmdNameSelect + " DB index is out of range"); + return; + } + if (db_ == nullptr || sync_db_ == nullptr) { + res_.SetRes(CmdRes::kInvalidDB, kCmdNameSelect); return; } } -void SelectCmd::Do(std::shared_ptr partition) { - std::shared_ptr conn = - std::dynamic_pointer_cast(GetConn()); +void SelectCmd::Do() { + std::shared_ptr conn = std::dynamic_pointer_cast(GetConn()); if (!conn) { res_.SetRes(CmdRes::kErrOther, kCmdNameSelect); - LOG(WARNING) << name_ << " weak ptr is empty"; + LOG(WARNING) << name_ << " weak ptr is empty"; return; } - conn->SetCurrentTable(table_name_); + conn->SetCurrentDb(db_name_); res_.SetRes(CmdRes::kOk); } void FlushallCmd::DoInitial() { + flushall_succeed_ = false; if (!CheckArg(argv_.size())) { res_.SetRes(CmdRes::kWrongNum, kCmdNameFlushall); return; } } -void FlushallCmd::Do(std::shared_ptr partition) { - if (!partition) { - LOG(INFO) << "Flushall, but partition not found"; + +void FlushallCmd::Do() { + std::lock_guard l_trw(g_pika_server->GetDBLock()); + for (const auto& db_item : g_pika_server->GetDB()) { + if (db_item.second->IsKeyScaning()) { + res_.SetRes(CmdRes::kErrOther, "The keyscan operation is executing, Try again later"); + return; + } + } + g_pika_rm->DBLock(); + for (const auto& db_item : g_pika_server->GetDB()) { + db_item.second->DBLock(); + } + flushall_succeed_ = FlushAllWithoutLock(); + for (const auto& db_item : g_pika_server->GetDB()) { + db_item.second->DBUnlock(); + } + g_pika_rm->DBUnlock(); + if (flushall_succeed_) { + res_.SetRes(CmdRes::kOk); + } else if (res_.ret() == CmdRes::kErrOther){ + //flushdb failed and the res_ was set } else { - partition->FlushDB(); + //flushall failed, but res_ was not set + res_.SetRes(CmdRes::kErrOther, + "Flushall failed, maybe only some of the dbs successfully flushed while some not, check WARNING/ERROR log to know " + "more, you can try again moment later"); + } +} + +void FlushallCmd::DoThroughDB() { + Do(); +} + +void FlushallCmd::DoFlushCache(std::shared_ptr db) { + // clear cache + if (PIKA_CACHE_NONE != g_pika_conf->cache_mode()) { + g_pika_server->ClearCacheDbAsync(std::move(db)); + } +} + +bool FlushallCmd::FlushAllWithoutLock() { + for (const auto& db_item : g_pika_server->GetDB()) { + std::shared_ptr db = db_item.second; + DBInfo p_info(db->GetDBName()); + if (g_pika_rm->GetSyncMasterDBs().find(p_info) == g_pika_rm->GetSyncMasterDBs().end()) { + LOG(ERROR) << p_info.db_name_ + " not found when flushall db"; + res_.SetRes(CmdRes::kErrOther,p_info.db_name_ + " not found when flushall db"); + return false; + } + bool success = DoWithoutLock(db); + if (!success) { return false; } + } + return true; +} + +bool FlushallCmd::DoWithoutLock(std::shared_ptr db) { + if (!db) { + LOG(ERROR) << "Flushall, but DB not found"; + res_.SetRes(CmdRes::kErrOther,db->GetDBName() + " not found when flushall db"); + return false; + } + bool success = db->FlushDBWithoutLock(); + if (!success) { + // if the db is not flushed, return before clear the cache + res_.SetRes(CmdRes::kErrOther,db->GetDBName() + " flushall failed due to other Errors, please check Error/Warning log to know more"); + return false; + } + DoFlushCache(db); + return true; +} + + +void FlushallCmd::DoBinlogByDB(const std::shared_ptr& sync_db) { + if (res().ok() && is_write() && g_pika_conf->write_binlog()) { + std::shared_ptr conn_ptr = GetConn(); + std::shared_ptr resp_ptr = GetResp(); + // Consider that dummy cmd appended by system, both conn and resp are null. + if ((!conn_ptr || !resp_ptr) && (name_ != kCmdDummy)) { + if (!conn_ptr) { + LOG(WARNING) << sync_db->SyncDBInfo().ToString() << " conn empty."; + } + if (!resp_ptr) { + LOG(WARNING) << sync_db->SyncDBInfo().ToString() << " resp empty."; + } + res().SetRes(CmdRes::kErrOther); + return; + } + + Status s = sync_db->ConsensusProposeLog(shared_from_this()); + if (!s.ok()) { + LOG(WARNING) << sync_db->SyncDBInfo().ToString() << " Writing binlog failed, maybe no space left on device " + << s.ToString(); + res().SetRes(CmdRes::kErrOther, s.ToString()); + return; + } + } +} + + +void FlushallCmd::DoBinlog() { + if (flushall_succeed_) { + for (auto& db : g_pika_server->GetDB()) { + DBInfo info(db.second->GetDBName()); + DoBinlogByDB(g_pika_rm->GetSyncMasterDBByName(info)); + } } } -// flushall convert flushdb writes to every partition binlog -std::string FlushallCmd::ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) { +//let flushall use +std::string FlushallCmd::ToRedisProtocol() { std::string content; content.reserve(RAW_ARGS_LEN); RedisAppendLen(content, 1, "*"); // to flushdb cmd std::string flushdb_cmd("flushdb"); - RedisAppendLen(content, flushdb_cmd.size(), "$"); + RedisAppendLenUint64(content, flushdb_cmd.size(), "$"); RedisAppendContent(content, flushdb_cmd); - return PikaBinlogTransverter::BinlogEncode(BinlogType::TypeFirst, - exec_time, - std::stoi(server_id), - logic_id, - filenum, - offset, - content, - {}); + return content; } void FlushdbCmd::DoInitial() { + flush_succeed_ = false; if (!CheckArg(argv_.size())) { res_.SetRes(CmdRes::kWrongNum, kCmdNameFlushdb); return; @@ -472,32 +629,65 @@ void FlushdbCmd::DoInitial() { if (argv_.size() == 1) { db_name_ = "all"; } else { - std::string struct_type = argv_[1]; - if (!strcasecmp(struct_type.data(), "string")) { - db_name_ = "strings"; - } else if (!strcasecmp(struct_type.data(), "hash")) { - db_name_ = "hashes"; - } else if (!strcasecmp(struct_type.data(), "set")) { - db_name_ = "sets"; - } else if (!strcasecmp(struct_type.data(), "zset")) { - db_name_ = "zsets"; - } else if (!strcasecmp(struct_type.data(), "list")) { - db_name_ = "lists"; - } else { - res_.SetRes(CmdRes::kInvalidDbType); - } + LOG(WARNING) << "not supported to flushdb with specific type in Floyd"; + res_.SetRes(CmdRes::kInvalidParameter, "not supported to flushdb with specific type in Floyd"); } } -void FlushdbCmd::Do(std::shared_ptr partition) { - if (!partition) { - LOG(INFO) << "Flushdb, but partition not found"; +void FlushdbCmd::Do() { + if (!db_) { + res_.SetRes(CmdRes::kInvalidDB, "DB not found while flushdb"); + return; + } + if (db_->IsKeyScaning()) { + res_.SetRes(CmdRes::kErrOther, "The keyscan operation is executing, Try again later"); + return; + } + std::lock_guard s_prw(g_pika_rm->GetDBLock()); + std::lock_guard l_prw(db_->GetDBLock()); + flush_succeed_ = DoWithoutLock(); + if (flush_succeed_) { + res_.SetRes(CmdRes::kOk); + } else if (res_.ret() == CmdRes::kErrOther || res_.ret() == CmdRes::kInvalidParameter) { + //flushdb failed and res_ was set } else { - if (db_name_ == "all") { - partition->FlushDB(); - } else { - partition->FlushSubDB(db_name_); - } + res_.SetRes(CmdRes::kErrOther, "flushdb failed, maybe you cna try again later(check WARNING/ERROR log to know more)"); + } +} + +void FlushdbCmd::DoThroughDB() { + Do(); +} + +void FlushdbCmd::DoUpdateCache() { + if (!flush_succeed_) { + //if flushdb failed, also do not clear the cache + return; + } + // clear cache + if (g_pika_conf->cache_mode() != PIKA_CACHE_NONE) { + g_pika_server->ClearCacheDbAsync(db_); + } +} + +bool FlushdbCmd::DoWithoutLock() { + if (!db_) { + LOG(ERROR) << db_name_ << " Flushdb, but DB not found"; + res_.SetRes(CmdRes::kErrOther, db_name_ + " Flushdb, but DB not found"); + return false; + } + DBInfo p_info(db_->GetDBName()); + if (g_pika_rm->GetSyncMasterDBs().find(p_info) == g_pika_rm->GetSyncMasterDBs().end()) { + LOG(ERROR) << "DB not found when flushing " << db_->GetDBName(); + res_.SetRes(CmdRes::kErrOther, db_->GetDBName() + " Flushdb, but DB not found"); + return false; + } + return db_->FlushDBWithoutLock(); +} + +void FlushdbCmd::DoBinlog() { + if (flush_succeed_) { + Cmd::DoBinlog(); } } @@ -506,60 +696,105 @@ void ClientCmd::DoInitial() { res_.SetRes(CmdRes::kWrongNum, kCmdNameClient); return; } - if (!strcasecmp(argv_[1].data(), "list") && argv_.size() == 2) { + + if ((strcasecmp(argv_[1].data(), "getname") == 0) && argv_.size() == 2) { + operation_ = argv_[1]; + return; + } + + if ((strcasecmp(argv_[1].data(), "setname") == 0) && argv_.size() != 3) { + res_.SetRes(CmdRes::kErrOther, + "Unknown subcommand or wrong number of arguments for " + "'SETNAME'., try CLIENT SETNAME "); + return; + } + if ((strcasecmp(argv_[1].data(), "setname") == 0) && argv_.size() == 3) { + operation_ = argv_[1]; + return; + } + + if ((strcasecmp(argv_[1].data(), "list") == 0) && argv_.size() == 2) { // nothing - } else if (!strcasecmp(argv_[1].data(), "list") && argv_.size() == 5) { - if (!strcasecmp(argv_[2].data(), "order") && - !strcasecmp(argv_[3].data(), "by")) { + } else if ((strcasecmp(argv_[1].data(), "list") == 0) && argv_.size() == 5) { + if ((strcasecmp(argv_[2].data(), "order") == 0) && (strcasecmp(argv_[3].data(), "by") == 0)) { info_ = argv_[4]; } else { - res_.SetRes(CmdRes::kErrOther, - "Syntax error, try CLIENT (LIST [order by [addr|idle])"); + res_.SetRes(CmdRes::kErrOther, "Syntax error, try CLIENT (LIST [order by [addr|idle])"); return; } - } else if (!strcasecmp(argv_[1].data(), "kill") && argv_.size() == 3) { + } else if (argv_.size() == 3 && (strcasecmp(argv_[1].data(), "kill") == 0)) { info_ = argv_[2]; + } else if (argv_.size() == 4 && + (strcasecmp(argv_[1].data(), "kill") == 0) && + (strcasecmp(argv_[2].data(), "type") == 0) && + ((strcasecmp(argv_[3].data(), KILLTYPE_NORMAL.data()) == 0) || (strcasecmp(argv_[3].data(), KILLTYPE_PUBSUB.data()) == 0))) { + //kill all if user wanna kill a type + info_ = "type"; + kill_type_ = argv_[3]; } else { - res_.SetRes(CmdRes::kErrOther, - "Syntax error, try CLIENT (LIST [order by [addr|idle]| KILL ip:port)"); + res_.SetRes(CmdRes::kErrOther, "Syntax error, try CLIENT (LIST [order by [addr|idle]| KILL ip:port)"); return; } operation_ = argv_[1]; - return; } -void ClientCmd::Do(std::shared_ptr partition) { - if (!strcasecmp(operation_.data(), "list")) { +void ClientCmd::Do() { + std::shared_ptr conn = GetConn(); + if (!conn) { + res_.SetRes(CmdRes::kErrOther, kCmdNameClient); + return; + } + + if ((strcasecmp(operation_.data(), "getname") == 0) && argv_.size() == 2) { + res_.AppendString(conn->name()); + return; + } + + if ((strcasecmp(operation_.data(), "setname") == 0) && argv_.size() == 3) { + std::string name = argv_[2]; + conn->set_name(name); + res_.SetRes(CmdRes::kOk); + return; + } + + if (strcasecmp(operation_.data(), "list") == 0) { struct timeval now; - gettimeofday(&now, NULL); + gettimeofday(&now, nullptr); std::vector clients; g_pika_server->ClientList(&clients); - std::vector::iterator iter = clients.begin(); - std::string reply = ""; + auto iter = clients.begin(); + std::string reply; char buf[128]; - if (!strcasecmp(info_.data(), "addr")) { + if (strcasecmp(info_.data(), "addr") == 0) { std::sort(clients.begin(), clients.end(), AddrCompare); - } else if (!strcasecmp(info_.data(), "idle")) { + } else if (strcasecmp(info_.data(), "idle") == 0) { std::sort(clients.begin(), clients.end(), IdleCompare); } while (iter != clients.end()) { - snprintf(buf, sizeof(buf), "addr=%s fd=%d idle=%ld\n", - iter->ip_port.c_str(), iter->fd, - iter->last_interaction == 0 ? 0 : now.tv_sec - iter->last_interaction); + snprintf(buf, sizeof(buf), "addr=%s fd=%d idle=%ld\n", iter->ip_port.c_str(), iter->fd, + iter->last_interaction == 0 ? 0 : now.tv_sec - iter->last_interaction); // NOLINT reply.append(buf); iter++; } res_.AppendString(reply); - } else if (!strcasecmp(operation_.data(), "kill") && - !strcasecmp(info_.data(), "all")) { + } else if ((strcasecmp(operation_.data(), "kill") == 0) && (strcasecmp(info_.data(), "all") == 0)) { g_pika_server->ClientKillAll(); res_.SetRes(CmdRes::kOk); + } else if ((strcasecmp(operation_.data(), "kill") == 0) && (strcasecmp(info_.data(), "type") == 0)) { + if (kill_type_ == KILLTYPE_NORMAL) { + g_pika_server->ClientKillAllNormal(); + res_.SetRes(CmdRes::kOk); + } else if (kill_type_ == KILLTYPE_PUBSUB) { + g_pika_server->ClientKillPubSub(); + res_.SetRes(CmdRes::kOk); + } else { + res_.SetRes(CmdRes::kErrOther, "kill type is unknown"); + } } else if (g_pika_server->ClientKill(info_) == 1) { res_.SetRes(CmdRes::kOk); } else { res_.SetRes(CmdRes::kErrOther, "No such client"); } - return; } void ShutdownCmd::DoInitial() { @@ -569,25 +804,28 @@ void ShutdownCmd::DoInitial() { } // For now, only shutdown need check local - if (is_local()) { - std::shared_ptr conn = GetConn(); + if (IsLocal()) { + std::shared_ptr conn = GetConn(); if (conn) { - if (conn->ip_port().find("127.0.0.1") == std::string::npos - && conn->ip_port().find(g_pika_server->host()) == std::string::npos) { - LOG(WARNING) << "\'shutdown\' should be localhost" << " command from " << conn->ip_port(); + if (conn->ip_port().find("127.0.0.1") == std::string::npos && + conn->ip_port().find(g_pika_server->host()) == std::string::npos) { + LOG(WARNING) << "\'shutdown\' should be localhost" + << " command from " << conn->ip_port(); res_.SetRes(CmdRes::kErrOther, kCmdNameShutdown + " should be localhost"); } } else { - LOG(WARNING) << name_ << " weak ptr is empty"; + LOG(WARNING) << name_ << " weak ptr is empty"; res_.SetRes(CmdRes::kErrOther, kCmdNameShutdown); return; } } } // no return -void ShutdownCmd::Do(std::shared_ptr partition) { +void ShutdownCmd::Do() { DLOG(WARNING) << "handle \'shutdown\'"; + db_->DBUnlockShared(); g_pika_server->Exit(); + db_->DBLockShared(); res_.SetRes(CmdRes::kNone); } @@ -596,12 +834,24 @@ const std::string InfoCmd::kAllSection = "all"; const std::string InfoCmd::kServerSection = "server"; const std::string InfoCmd::kClientsSection = "clients"; const std::string InfoCmd::kStatsSection = "stats"; -const std::string InfoCmd::kExecCountSection= "command_exec_count"; +const std::string InfoCmd::kExecCountSection = "command_exec_count"; const std::string InfoCmd::kCPUSection = "cpu"; const std::string InfoCmd::kReplicationSection = "replication"; const std::string InfoCmd::kKeyspaceSection = "keyspace"; const std::string InfoCmd::kDataSection = "data"; +const std::string InfoCmd::kRocksDBSection = "rocksdb"; const std::string InfoCmd::kDebugSection = "debug"; +const std::string InfoCmd::kCommandStatsSection = "commandstats"; +const std::string InfoCmd::kCacheSection = "cache"; + + +const std::string ClientCmd::KILLTYPE_NORMAL = "normal"; +const std::string ClientCmd::KILLTYPE_PUBSUB = "pubsub"; + +void InfoCmd::Execute() { + std::shared_ptr db = g_pika_server->GetDB(db_name_); + Do(); +} void InfoCmd::DoInitial() { size_t argc = argv_.size(); @@ -612,26 +862,28 @@ void InfoCmd::DoInitial() { if (argc == 1) { info_section_ = kInfo; return; - } //then the agc is 2 or 3 + } // then the agc is 2 or 3 - if (!strcasecmp(argv_[1].data(), kAllSection.data())) { + if (strcasecmp(argv_[1].data(), kAllSection.data()) == 0) { info_section_ = kInfoAll; - } else if (!strcasecmp(argv_[1].data(), kServerSection.data())) { + keyspace_scan_dbs_ = g_pika_server->GetAllDBName(); + } else if (strcasecmp(argv_[1].data(), kServerSection.data()) == 0) { info_section_ = kInfoServer; - } else if (!strcasecmp(argv_[1].data(), kClientsSection.data())) { + } else if (strcasecmp(argv_[1].data(), kClientsSection.data()) == 0) { info_section_ = kInfoClients; - } else if (!strcasecmp(argv_[1].data(), kStatsSection.data())) { + } else if (strcasecmp(argv_[1].data(), kStatsSection.data()) == 0) { info_section_ = kInfoStats; - } else if (!strcasecmp(argv_[1].data(), kExecCountSection.data())) { + } else if (strcasecmp(argv_[1].data(), kExecCountSection.data()) == 0) { info_section_ = kInfoExecCount; - } else if (!strcasecmp(argv_[1].data(), kCPUSection.data())) { + } else if (strcasecmp(argv_[1].data(), kCPUSection.data()) == 0) { info_section_ = kInfoCPU; - } else if (!strcasecmp(argv_[1].data(), kReplicationSection.data())) { + } else if (strcasecmp(argv_[1].data(), kReplicationSection.data()) == 0) { info_section_ = kInfoReplication; - } else if (!strcasecmp(argv_[1].data(), kKeyspaceSection.data())) { + } else if (strcasecmp(argv_[1].data(), kKeyspaceSection.data()) == 0) { info_section_ = kInfoKeyspace; if (argc == 2) { LogCommand(); + return; } // info keyspace [ 0 | 1 | off ] @@ -651,23 +903,31 @@ void InfoCmd::DoInitial() { } if (argc == 4) { - std::vector tables; - slash::StringSplit(argv_[3], COMMA, tables); - for (const auto& table : tables) { - if (!g_pika_server->IsTableExist(table)) { - res_.SetRes(CmdRes::kInvalidTable, table); + std::vector dbs; + pstd::StringSplit(argv_[3], COMMA, dbs); + for (const auto& db : dbs) { + if (!g_pika_server->IsDBExist(db)) { + res_.SetRes(CmdRes::kInvalidDB, db); return; } else { - keyspace_scan_tables_.insert(table); + keyspace_scan_dbs_.insert(db); } } + } else { + keyspace_scan_dbs_ = g_pika_server->GetAllDBName(); } LogCommand(); return; - } else if (!strcasecmp(argv_[1].data(), kDataSection.data())) { + } else if (strcasecmp(argv_[1].data(), kDataSection.data()) == 0) { info_section_ = kInfoData; - } else if (!strcasecmp(argv_[1].data(), kDebugSection.data())) { + } else if (strcasecmp(argv_[1].data(), kRocksDBSection.data()) == 0) { + info_section_ = kInfoRocksDB; + } else if (strcasecmp(argv_[1].data(), kDebugSection.data()) == 0) { info_section_ = kInfoDebug; + } else if (strcasecmp(argv_[1].data(), kCommandStatsSection.data()) == 0) { + info_section_ = kInfoCommandStats; + } else if (strcasecmp(argv_[1].data(), kCacheSection.data()) == 0) { + info_section_ = kInfoCache; } else { info_section_ = kInfoErr; } @@ -676,7 +936,7 @@ void InfoCmd::DoInitial() { } } -void InfoCmd::Do(std::shared_ptr partition) { +void InfoCmd::Do() { std::string info; switch (info_section_) { case kInfo: @@ -705,11 +965,17 @@ void InfoCmd::Do(std::shared_ptr partition) { info.append("\r\n"); InfoExecCount(info); info.append("\r\n"); + InfoCommandStats(info); + info.append("\r\n"); + InfoCache(info, db_); + info.append("\r\n"); InfoCPU(info); info.append("\r\n"); InfoReplication(info); info.append("\r\n"); InfoKeyspace(info); + info.append("\r\n"); + InfoRocksDB(info); break; case kInfoServer: InfoServer(info); @@ -735,18 +1001,24 @@ void InfoCmd::Do(std::shared_ptr partition) { case kInfoData: InfoData(info); break; + case kInfoRocksDB: + InfoRocksDB(info); + break; case kInfoDebug: InfoDebug(info); break; + case kInfoCommandStats: + InfoCommandStats(info); + break; + case kInfoCache: + InfoCache(info, db_); + break; default: - //kInfoErr is nothing + // kInfoErr is nothing break; } - - res_.AppendStringLen(info.size()); - res_.AppendContent(info); - return; + res_.AppendString(info); } void InfoCmd::InfoServer(std::string& info) { @@ -757,33 +1029,35 @@ void InfoCmd::InfoServer(std::string& info) { host_info_valid = true; } - time_t current_time_s = time(NULL); + time_t current_time_s = time(nullptr); std::stringstream tmp_stream; char version[32]; - snprintf(version, sizeof(version), "%d.%d.%d", PIKA_MAJOR, - PIKA_MINOR, PIKA_PATCH); + snprintf(version, sizeof(version), "%d.%d.%d", PIKA_MAJOR, PIKA_MINOR, PIKA_PATCH); tmp_stream << "# Server\r\n"; tmp_stream << "pika_version:" << version << "\r\n"; tmp_stream << pika_build_git_sha << "\r\n"; - tmp_stream << "pika_build_compile_date: " << - pika_build_compile_date << "\r\n"; + tmp_stream << "pika_build_compile_date: " << pika_build_compile_date << "\r\n"; tmp_stream << "os:" << host_info.sysname << " " << host_info.release << " " << host_info.machine << "\r\n"; tmp_stream << "arch_bits:" << (reinterpret_cast(&host_info.machine) + strlen(host_info.machine) - 2) << "\r\n"; tmp_stream << "process_id:" << getpid() << "\r\n"; tmp_stream << "tcp_port:" << g_pika_conf->port() << "\r\n"; tmp_stream << "thread_num:" << g_pika_conf->thread_num() << "\r\n"; tmp_stream << "sync_thread_num:" << g_pika_conf->sync_thread_num() << "\r\n"; + tmp_stream << "sync_binlog_thread_num:" << g_pika_conf->sync_binlog_thread_num() << "\r\n"; tmp_stream << "uptime_in_seconds:" << (current_time_s - g_pika_server->start_time_s()) << "\r\n"; - tmp_stream << "uptime_in_days:" << (current_time_s / (24*3600) - g_pika_server->start_time_s() / (24*3600) + 1) << "\r\n"; + tmp_stream << "uptime_in_days:" << (current_time_s / (24 * 3600) - g_pika_server->start_time_s() / (24 * 3600) + 1) + << "\r\n"; tmp_stream << "config_file:" << g_pika_conf->conf_path() << "\r\n"; tmp_stream << "server_id:" << g_pika_conf->server_id() << "\r\n"; + tmp_stream << "run_id:" << g_pika_conf->run_id() << "\r\n"; info.append(tmp_stream.str()); } void InfoCmd::InfoClients(std::string& info) { std::stringstream tmp_stream; - tmp_stream << "# Clients\r\n"; + tmp_stream << "# Clients" + << "\r\n"; tmp_stream << "connected_clients:" << g_pika_server->ClientList() << "\r\n"; info.append(tmp_stream.str()); @@ -791,16 +1065,52 @@ void InfoCmd::InfoClients(std::string& info) { void InfoCmd::InfoStats(std::string& info) { std::stringstream tmp_stream; - tmp_stream << "# Stats\r\n"; + tmp_stream << "# Stats" + << "\r\n"; tmp_stream << "total_connections_received:" << g_pika_server->accumulative_connections() << "\r\n"; tmp_stream << "instantaneous_ops_per_sec:" << g_pika_server->ServerCurrentQps() << "\r\n"; tmp_stream << "total_commands_processed:" << g_pika_server->ServerQueryNum() << "\r\n"; + tmp_stream << "keyspace_hits:" << g_pika_server->ServerKeyspaceHits() << "\r\n"; + tmp_stream << "keyspace_misses:" << g_pika_server->ServerKeyspaceMisses() << "\r\n"; + + // Network stats + tmp_stream << "total_net_input_bytes:" << g_pika_server->NetInputBytes() + g_pika_server->NetReplInputBytes() + << "\r\n"; + tmp_stream << "total_net_output_bytes:" << g_pika_server->NetOutputBytes() + g_pika_server->NetReplOutputBytes() + << "\r\n"; + tmp_stream << "total_net_repl_input_bytes:" << g_pika_server->NetReplInputBytes() << "\r\n"; + tmp_stream << "total_net_repl_output_bytes:" << g_pika_server->NetReplOutputBytes() << "\r\n"; + tmp_stream << "instantaneous_input_kbps:" << g_pika_server->InstantaneousInputKbps() << "\r\n"; + tmp_stream << "instantaneous_output_kbps:" << g_pika_server->InstantaneousOutputKbps() << "\r\n"; + tmp_stream << "instantaneous_input_repl_kbps:" << g_pika_server->InstantaneousInputReplKbps() << "\r\n"; + tmp_stream << "instantaneous_output_repl_kbps:" << g_pika_server->InstantaneousOutputReplKbps() << "\r\n"; + tmp_stream << "is_bgsaving:" << (g_pika_server->IsBgSaving() ? "Yes" : "No") << "\r\n"; tmp_stream << "is_scaning_keyspace:" << (g_pika_server->IsKeyScaning() ? "Yes" : "No") << "\r\n"; tmp_stream << "is_compact:" << (g_pika_server->IsCompacting() ? "Yes" : "No") << "\r\n"; tmp_stream << "compact_cron:" << g_pika_conf->compact_cron() << "\r\n"; tmp_stream << "compact_interval:" << g_pika_conf->compact_interval() << "\r\n"; - + time_t current_time_s = time(nullptr); + PikaServer::BGSlotsReload bgslotsreload_info = g_pika_server->bgslots_reload(); + bool is_reloading = g_pika_server->GetSlotsreloading(); + tmp_stream << "is_slots_reloading:" << (is_reloading ? "Yes, " : "No, ") << bgslotsreload_info.s_start_time << ", " + << (is_reloading ? (current_time_s - bgslotsreload_info.start_time) + : (bgslotsreload_info.end_time - bgslotsreload_info.start_time)) + << "\r\n"; + PikaServer::BGSlotsCleanup bgslotscleanup_info = g_pika_server->bgslots_cleanup(); + bool is_cleaningup = g_pika_server->GetSlotscleaningup(); + tmp_stream << "is_slots_cleaningup:" << (is_cleaningup ? "Yes, " : "No, ") << bgslotscleanup_info.s_start_time << ", " + << (is_cleaningup ? (current_time_s - bgslotscleanup_info.start_time) + : (bgslotscleanup_info.end_time - bgslotscleanup_info.start_time)) + << "\r\n"; + bool is_migrating = g_pika_server->pika_migrate_thread_->IsMigrating(); + time_t start_migration_time = g_pika_server->pika_migrate_thread_->GetStartTime(); + time_t end_migration_time = g_pika_server->pika_migrate_thread_->GetEndTime(); + std::string start_migration_time_str = g_pika_server->pika_migrate_thread_->GetStartTimeStr(); + tmp_stream << "is_slots_migrating:" << (is_migrating ? "Yes, " : "No, ") << start_migration_time_str << ", " + << (is_migrating ? (current_time_s - start_migration_time) : (end_migration_time - start_migration_time)) + << "\r\n"; + tmp_stream << "slow_logs_count:" << g_pika_server->SlowlogCount() << "\r\n"; info.append(tmp_stream.str()); } @@ -808,8 +1118,8 @@ void InfoCmd::InfoExecCount(std::string& info) { std::stringstream tmp_stream; tmp_stream << "# Command_Exec_Count\r\n"; - std::unordered_map command_exec_count_table = g_pika_server->ServerExecCountTable(); - for (const auto& item : command_exec_count_table) { + std::unordered_map command_exec_count_db = g_pika_server->ServerExecCountDB(); + for (const auto& item : command_exec_count_db) { if (item.second == 0) { continue; } @@ -819,290 +1129,398 @@ void InfoCmd::InfoExecCount(std::string& info) { } void InfoCmd::InfoCPU(std::string& info) { - struct rusage self_ru, c_ru; + struct rusage self_ru; + struct rusage c_ru; getrusage(RUSAGE_SELF, &self_ru); getrusage(RUSAGE_CHILDREN, &c_ru); std::stringstream tmp_stream; - tmp_stream << "# CPU\r\n"; - tmp_stream << "used_cpu_sys:" << - setiosflags(std::ios::fixed) << std::setprecision(2) << - (float)self_ru.ru_stime.tv_sec+(float)self_ru.ru_stime.tv_usec/1000000 << - "\r\n"; - tmp_stream << "used_cpu_user:" << - setiosflags(std::ios::fixed) << std::setprecision(2) << - (float)self_ru.ru_utime.tv_sec+(float)self_ru.ru_utime.tv_usec/1000000 << - "\r\n"; - tmp_stream << "used_cpu_sys_children:" << - setiosflags(std::ios::fixed) << std::setprecision(2) << - (float)c_ru.ru_stime.tv_sec+(float)c_ru.ru_stime.tv_usec/1000000 << - "\r\n"; - tmp_stream << "used_cpu_user_children:" << - setiosflags(std::ios::fixed) << std::setprecision(2) << - (float)c_ru.ru_utime.tv_sec+(float)c_ru.ru_utime.tv_usec/1000000 << - "\r\n"; - info.append(tmp_stream.str()); -} - -void InfoCmd::InfoShardingReplication(std::string& info) { - int role = 0; - std::string slave_list_string; - uint32_t slave_num = g_pika_server->GetShardingSlaveListString(slave_list_string); - if (slave_num) { - role |= PIKA_ROLE_MASTER; - } - std::string common_master; - std::string master_ip; - int master_port = 0; - g_pika_rm->FindCommonMaster(&common_master); - if (!common_master.empty()) { - role |= PIKA_ROLE_SLAVE; - if(!slash::ParseIpPortString(common_master, master_ip, master_port)) { - return; - } - } - - std::stringstream tmp_stream; - tmp_stream << "# Replication("; - switch (role) { - case PIKA_ROLE_SINGLE : - case PIKA_ROLE_MASTER : tmp_stream << "MASTER)\r\nrole:master\r\n"; break; - case PIKA_ROLE_SLAVE : tmp_stream << "SLAVE)\r\nrole:slave\r\n"; break; - case PIKA_ROLE_MASTER | PIKA_ROLE_SLAVE : tmp_stream << "Master && SLAVE)\r\nrole:master&&slave\r\n"; break; - default: info.append("ERR: server role is error\r\n"); return; - } - switch (role) { - case PIKA_ROLE_SLAVE : - tmp_stream << "master_host:" << master_ip << "\r\n"; - tmp_stream << "master_port:" << master_port << "\r\n"; - tmp_stream << "master_link_status:up"<< "\r\n"; - tmp_stream << "slave_priority:" << g_pika_conf->slave_priority() << "\r\n"; - break; - case PIKA_ROLE_MASTER | PIKA_ROLE_SLAVE : - tmp_stream << "master_host:" << master_ip << "\r\n"; - tmp_stream << "master_port:" << master_port << "\r\n"; - tmp_stream << "master_link_status:up"<< "\r\n"; - case PIKA_ROLE_SINGLE : - case PIKA_ROLE_MASTER : - tmp_stream << "connected_slaves:" << slave_num << "\r\n" << slave_list_string; - } + tmp_stream << "# CPU" + << "\r\n"; + tmp_stream << "used_cpu_sys:" << std::setiosflags(std::ios::fixed) << std::setprecision(2) + << static_cast(self_ru.ru_stime.tv_sec) + static_cast(self_ru.ru_stime.tv_usec) / 1000000 + << "\r\n"; + tmp_stream << "used_cpu_user:" << std::setiosflags(std::ios::fixed) << std::setprecision(2) + << static_cast(self_ru.ru_utime.tv_sec) + static_cast(self_ru.ru_utime.tv_usec) / 1000000 + << "\r\n"; + tmp_stream << "used_cpu_sys_children:" << std::setiosflags(std::ios::fixed) << std::setprecision(2) + << static_cast(c_ru.ru_stime.tv_sec) + static_cast(c_ru.ru_stime.tv_usec) / 1000000 + << "\r\n"; + tmp_stream << "used_cpu_user_children:" << std::setiosflags(std::ios::fixed) << std::setprecision(2) + << static_cast(c_ru.ru_utime.tv_sec) + static_cast(c_ru.ru_utime.tv_usec) / 1000000 + << "\r\n"; info.append(tmp_stream.str()); } void InfoCmd::InfoReplication(std::string& info) { - if (!g_pika_conf->classic_mode()) { - // In Sharding mode, show different replication info - InfoShardingReplication(info); - return; - } - int host_role = g_pika_server->role(); std::stringstream tmp_stream; std::stringstream out_of_sync; - - bool all_partition_sync = true; - slash::RWLock table_rwl(&g_pika_server->tables_rw_, false); - for (const auto& table_item : g_pika_server->tables_) { - slash::RWLock partition_rwl(&table_item.second->partitions_rw_, false); - for (const auto& partition_item : table_item.second->partitions_) { - std::shared_ptr slave_partition = - g_pika_rm->GetSyncSlavePartitionByName( - PartitionInfo(table_item.second->GetTableName(), - partition_item.second->GetPartitionId())); - if (!slave_partition) { - out_of_sync << "(" << partition_item.second->GetPartitionName() << ": InternalError)"; - continue; - } - if (slave_partition->State() != ReplState::kConnected) { - all_partition_sync = false; - out_of_sync << "(" << partition_item.second->GetPartitionName() << ":"; - if (slave_partition->State() == ReplState::kNoConnect) { - out_of_sync << "NoConnect)"; - } else if (slave_partition->State() == ReplState::kWaitDBSync) { - out_of_sync << "WaitDBSync)"; - } else if (slave_partition->State() == ReplState::kError) { - out_of_sync << "Error)"; - } else { - out_of_sync << "Other)"; - } + std::stringstream repl_connect_status; + int32_t syncing_full_count = 0; + bool all_db_sync = true; + std::shared_lock db_rwl(g_pika_server->dbs_rw_); + for (const auto& db_item : g_pika_server->GetDB()) { + std::shared_ptr slave_db = + g_pika_rm->GetSyncSlaveDBByName(DBInfo(db_item.second->GetDBName())); + if (!slave_db) { + out_of_sync << "(" << db_item.first << ": InternalError)"; + continue; + } + repl_connect_status << db_item.first << ":"; + if (slave_db->State() != ReplState::kConnected) { + all_db_sync = false; + out_of_sync << "(" << db_item.first << ":"; + if (slave_db->State() == ReplState::kNoConnect) { + out_of_sync << "NoConnect)"; + repl_connect_status << "no_connect"; + } else if (slave_db->State() == ReplState::kWaitDBSync) { + out_of_sync << "WaitDBSync)"; + repl_connect_status << "syncing_full"; + ++syncing_full_count; + } else if (slave_db->State() == ReplState::kError) { + out_of_sync << "Error)"; + repl_connect_status << "error"; + } else if (slave_db->State() == ReplState::kWaitReply) { + out_of_sync << "kWaitReply)"; + repl_connect_status << "connecting"; + } else if (slave_db->State() == ReplState::kTryConnect) { + out_of_sync << "kTryConnect)"; + repl_connect_status << "try_to_incr_sync"; + } else if (slave_db->State() == ReplState::kTryDBSync) { + out_of_sync << "kTryDBSync)"; + repl_connect_status << "try_to_full_sync"; + } else if (slave_db->State() == ReplState::kDBNoConnect) { + out_of_sync << "kDBNoConnect)"; + repl_connect_status << "no_connect"; + } else { + out_of_sync << "Other)"; + repl_connect_status << "error"; } + } else { //slave_db->State() equal to kConnected + repl_connect_status << "connected"; } + repl_connect_status << "\r\n"; } tmp_stream << "# Replication("; switch (host_role) { - case PIKA_ROLE_SINGLE : - case PIKA_ROLE_MASTER : tmp_stream << "MASTER)\r\nrole:master\r\n"; break; - case PIKA_ROLE_SLAVE : tmp_stream << "SLAVE)\r\nrole:slave\r\n"; break; - case PIKA_ROLE_MASTER | PIKA_ROLE_SLAVE : tmp_stream << "Master && SLAVE)\r\nrole:master&&slave\r\n"; break; - default: info.append("ERR: server role is error\r\n"); return; + case PIKA_ROLE_SINGLE: + case PIKA_ROLE_MASTER: + tmp_stream << "MASTER)\r\nrole:master\r\n"; + break; + case PIKA_ROLE_SLAVE: + tmp_stream << "SLAVE)\r\nrole:slave\r\n"; + break; + case PIKA_ROLE_MASTER | PIKA_ROLE_SLAVE: + tmp_stream << "Master && SLAVE)\r\nrole:master&&slave\r\n"; + break; + default: + info.append("ERR: server role is error\r\n"); + return; } - + tmp_stream << "ReplicationID:" << g_pika_conf->replication_id() << "\r\n"; std::string slaves_list_str; switch (host_role) { - case PIKA_ROLE_SLAVE : + case PIKA_ROLE_SLAVE: tmp_stream << "master_host:" << g_pika_server->master_ip() << "\r\n"; tmp_stream << "master_port:" << g_pika_server->master_port() << "\r\n"; - tmp_stream << "master_link_status:" << (((g_pika_server->repl_state() == PIKA_REPL_META_SYNC_DONE) - && all_partition_sync) ? "up" : "down") << "\r\n"; + tmp_stream << "master_link_status:" + << (((g_pika_server->repl_state() == PIKA_REPL_META_SYNC_DONE) && all_db_sync) ? "up" : "down") + << "\r\n"; + tmp_stream << "repl_connect_status:\r\n" << repl_connect_status.str(); tmp_stream << "slave_priority:" << g_pika_conf->slave_priority() << "\r\n"; tmp_stream << "slave_read_only:" << g_pika_conf->slave_read_only() << "\r\n"; - if (!all_partition_sync) { - tmp_stream <<"db_repl_error_state:" << out_of_sync.str() << "\r\n"; + if (!all_db_sync) { + tmp_stream << "db_repl_state:" << out_of_sync.str() << "\r\n"; } break; - case PIKA_ROLE_MASTER | PIKA_ROLE_SLAVE : + case PIKA_ROLE_MASTER | PIKA_ROLE_SLAVE: tmp_stream << "master_host:" << g_pika_server->master_ip() << "\r\n"; tmp_stream << "master_port:" << g_pika_server->master_port() << "\r\n"; - tmp_stream << "master_link_status:" << (((g_pika_server->repl_state() == PIKA_REPL_META_SYNC_DONE) - && all_partition_sync) ? "up" : "down") << "\r\n"; + tmp_stream << "master_link_status:" + << (((g_pika_server->repl_state() == PIKA_REPL_META_SYNC_DONE) && all_db_sync) ? "up" : "down") + << "\r\n"; + tmp_stream << "repl_connect_status:\r\n" << repl_connect_status.str(); tmp_stream << "slave_read_only:" << g_pika_conf->slave_read_only() << "\r\n"; - if (!all_partition_sync) { - tmp_stream <<"db_repl_error_state:" << out_of_sync.str() << "\r\n"; + if (!all_db_sync) { + tmp_stream << "db_repl_state:" << out_of_sync.str() << "\r\n"; } - case PIKA_ROLE_SINGLE : - case PIKA_ROLE_MASTER : - tmp_stream << "connected_slaves:" << g_pika_server->GetSlaveListString(slaves_list_str) << "\r\n" << slaves_list_str; + case PIKA_ROLE_SINGLE: + case PIKA_ROLE_MASTER: + tmp_stream << "connected_slaves:" << g_pika_server->GetSlaveListString(slaves_list_str) << "\r\n" + << slaves_list_str; } + //if current instance is syncing full or has full sync corrupted, it's not qualified to be a new master + if (syncing_full_count == 0 && g_pika_conf->GetUnfinishedFullSyncCount() == 0) { + tmp_stream << "is_eligible_for_master_election:true" << "\r\n"; + } else { + tmp_stream << "is_eligible_for_master_election:false" << "\r\n"; + } Status s; uint32_t filenum = 0; uint64_t offset = 0; + uint64_t slave_repl_offset = 0; std::string safety_purge; - for (const auto& t_item : g_pika_server->tables_) { - slash::RWLock partition_rwl(&t_item.second->partitions_rw_, false); - for (const auto& p_item : t_item.second->partitions_) { - p_item.second->logger()->GetProducerStatus(&filenum, &offset); - tmp_stream << p_item.second->GetPartitionName() << " binlog_offset=" << filenum << " " << offset; - s = g_pika_rm->GetSafetyPurgeBinlogFromSMP(p_item.second->GetTableName(), p_item.second->GetPartitionId(), &safety_purge); - tmp_stream << ",safety_purge=" << (s.ok() ? safety_purge : "error") << "\r\n"; + std::shared_ptr master_db = nullptr; + for (const auto& t_item : g_pika_server->dbs_) { + std::shared_lock db_rwl(t_item.second->dbs_rw_); + std::string db_name = t_item.first; + master_db = g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name)); + if (!master_db) { + LOG(WARNING) << "Sync Master DB: " << db_name << " NotFound"; + continue; } - } - + master_db->Logger()->GetProducerStatus(&filenum, &offset); + slave_repl_offset += static_cast(filenum) * static_cast(g_pika_conf->binlog_file_size()); + slave_repl_offset += offset; + tmp_stream << db_name << ":binlog_offset=" << filenum << " " << offset; + s = master_db->GetSafetyPurgeBinlog(&safety_purge); + tmp_stream << ",safety_purge=" << (s.ok() ? safety_purge : "error") << "\r\n"; + } + tmp_stream << "slave_repl_offset:" << slave_repl_offset << "\r\n"; info.append(tmp_stream.str()); } void InfoCmd::InfoKeyspace(std::string& info) { if (off_) { - g_pika_server->DoSameThingSpecificTable(TaskType::kStopKeyScan, keyspace_scan_tables_); + g_pika_server->DoSameThingSpecificDB(keyspace_scan_dbs_, {TaskType::kStopKeyScan}); info.append("OK\r\n"); return; } - std::string table_name; + std::string db_name; KeyScanInfo key_scan_info; - int32_t duration; - std::vector key_infos; + int32_t duration = 0; + std::vector key_infos; std::stringstream tmp_stream; - tmp_stream << "# Keyspace\r\n"; - slash::RWLock rwl(&g_pika_server->tables_rw_, false); - for (const auto& table_item : g_pika_server->tables_) { - if (keyspace_scan_tables_.empty() - || keyspace_scan_tables_.find(table_item.first) != keyspace_scan_tables_.end()) { - table_name = table_item.second->GetTableName(); - key_scan_info = table_item.second->GetKeyScanInfo(); + tmp_stream << "# Keyspace" + << "\r\n"; + if (argv_.size() > 1 && strcasecmp(argv_[1].data(), kAllSection.data()) == 0) { + tmp_stream << "# Start async statistics\r\n"; + } else if (argv_.size() == 3 && strcasecmp(argv_[1].data(), kKeyspaceSection.data()) == 0) { + tmp_stream << "# Start async statistics\r\n"; + } else { + tmp_stream << "# Use \"info keyspace 1\" to do async statistics\r\n"; + } + std::shared_lock rwl(g_pika_server->dbs_rw_); + for (const auto& db_item : g_pika_server->dbs_) { + if (keyspace_scan_dbs_.find(db_item.first) != keyspace_scan_dbs_.end()) { + db_name = db_item.second->GetDBName(); + key_scan_info = db_item.second->GetKeyScanInfo(); key_infos = key_scan_info.key_infos; duration = key_scan_info.duration; - if (key_infos.size() != 5) { + if (key_infos.size() != (size_t)(storage::DataTypeNum)) { + LOG(ERROR) << "key_infos size is not equal with expected, potential data inconsistency"; info.append("info keyspace error\r\n"); return; } tmp_stream << "# Time:" << key_scan_info.s_start_time << "\r\n"; if (duration == -2) { - tmp_stream << "# Duration: " << "In Waiting\r\n"; + tmp_stream << "# Duration: " + << "In Waiting\r\n"; } else if (duration == -1) { - tmp_stream << "# Duration: " << "In Processing\r\n"; + tmp_stream << "# Duration: " + << "In Processing\r\n"; } else if (duration >= 0) { - tmp_stream << "# Duration: " << std::to_string(duration) + "s" << "\r\n"; + tmp_stream << "# Duration: " << std::to_string(duration) + "s" + << "\r\n"; } - tmp_stream << table_name << " Strings_keys=" << key_infos[0].keys << ", expires=" << key_infos[0].expires << ", invaild_keys=" << key_infos[0].invaild_keys << "\r\n"; - tmp_stream << table_name << " Hashes_keys=" << key_infos[1].keys << ", expires=" << key_infos[1].expires << ", invaild_keys=" << key_infos[1].invaild_keys << "\r\n"; - tmp_stream << table_name << " Lists_keys=" << key_infos[2].keys << ", expires=" << key_infos[2].expires << ", invaild_keys=" << key_infos[2].invaild_keys << "\r\n"; - tmp_stream << table_name << " Zsets_keys=" << key_infos[3].keys << ", expires=" << key_infos[3].expires << ", invaild_keys=" << key_infos[3].invaild_keys << "\r\n"; - tmp_stream << table_name << " Sets_keys=" << key_infos[4].keys << ", expires=" << key_infos[4].expires << ", invaild_keys=" << key_infos[4].invaild_keys << "\r\n\r\n"; + tmp_stream << db_name << " Strings_keys=" << key_infos[0].keys << ", expires=" << key_infos[0].expires + << ", invalid_keys=" << key_infos[0].invaild_keys << "\r\n"; + tmp_stream << db_name << " Hashes_keys=" << key_infos[1].keys << ", expires=" << key_infos[1].expires + << ", invalid_keys=" << key_infos[1].invaild_keys << "\r\n"; + tmp_stream << db_name << " Lists_keys=" << key_infos[2].keys << ", expires=" << key_infos[2].expires + << ", invalid_keys=" << key_infos[2].invaild_keys << "\r\n"; + tmp_stream << db_name << " Zsets_keys=" << key_infos[3].keys << ", expires=" << key_infos[3].expires + << ", invalid_keys=" << key_infos[3].invaild_keys << "\r\n"; + tmp_stream << db_name << " Sets_keys=" << key_infos[4].keys << ", expires=" << key_infos[4].expires + << ", invalid_keys=" << key_infos[4].invaild_keys << "\r\n\r\n"; + tmp_stream << db_name << " Streams_keys=" << key_infos[5].keys << ", expires=" << key_infos[5].expires + << ", invalid_keys=" << key_infos[5].invaild_keys << "\r\n\r\n"; } } info.append(tmp_stream.str()); - if (rescan_) { - g_pika_server->DoSameThingSpecificTable(TaskType::kStartKeyScan, keyspace_scan_tables_); + g_pika_server->DoSameThingSpecificDB(keyspace_scan_dbs_, {TaskType::kStartKeyScan}); } - return; } void InfoCmd::InfoData(std::string& info) { std::stringstream tmp_stream; std::stringstream db_fatal_msg_stream; - int64_t db_size = slash::Du(g_pika_conf->db_path()); - tmp_stream << "# Data" << "\r\n"; + uint64_t db_size = g_pika_server->GetDBSize(); + uint64_t log_size = g_pika_server->GetLogSize(); + + tmp_stream << "# Data" + << "\r\n"; tmp_stream << "db_size:" << db_size << "\r\n"; tmp_stream << "db_size_human:" << (db_size >> 20) << "M\r\n"; - int64_t log_size = slash::Du(g_pika_conf->log_path()); tmp_stream << "log_size:" << log_size << "\r\n"; tmp_stream << "log_size_human:" << (log_size >> 20) << "M\r\n"; tmp_stream << "compression:" << g_pika_conf->compression() << "\r\n"; // rocksdb related memory usage - std::map type_result; + std::map background_errors; uint64_t total_background_errors = 0; - uint64_t total_memtable_usage = 0, memtable_usage = 0; - uint64_t total_table_reader_usage = 0, table_reader_usage = 0; - slash::RWLock table_rwl(&g_pika_server->tables_rw_, false); - for (const auto& table_item : g_pika_server->tables_) { - slash::RWLock partition_rwl(&table_item.second->partitions_rw_, false); - for (const auto& patition_item : table_item.second->partitions_) { - type_result.clear(); - memtable_usage = table_reader_usage = 0; - patition_item.second->DbRWLockReader(); - patition_item.second->db()->GetUsage(blackwidow::PROPERTY_TYPE_ROCKSDB_MEMTABLE, &memtable_usage); - patition_item.second->db()->GetUsage(blackwidow::PROPERTY_TYPE_ROCKSDB_TABLE_READER, &table_reader_usage); - patition_item.second->db()->GetUsage(blackwidow::PROPERTY_TYPE_ROCKSDB_BACKGROUND_ERRORS, &type_result); - patition_item.second->DbRWUnLock(); - total_memtable_usage += memtable_usage; - total_table_reader_usage += table_reader_usage; - for (const auto& item : type_result) { - if (item.second != 0) { - db_fatal_msg_stream << (total_background_errors != 0 ? "," : ""); - db_fatal_msg_stream << patition_item.second->GetPartitionName() << "/" << item.first; - total_background_errors += item.second; - } + uint64_t total_memtable_usage = 0; + uint64_t total_table_reader_usage = 0; + uint64_t memtable_usage = 0; + uint64_t table_reader_usage = 0; + std::shared_lock db_rwl(g_pika_server->dbs_rw_); + for (const auto& db_item : g_pika_server->dbs_) { + if (!db_item.second) { + continue; + } + background_errors.clear(); + memtable_usage = table_reader_usage = 0; + db_item.second->DBLockShared(); + db_item.second->storage()->GetUsage(storage::PROPERTY_TYPE_ROCKSDB_CUR_SIZE_ALL_MEM_TABLES, &memtable_usage); + db_item.second->storage()->GetUsage(storage::PROPERTY_TYPE_ROCKSDB_ESTIMATE_TABLE_READER_MEM, &table_reader_usage); + db_item.second->storage()->GetUsage(storage::PROPERTY_TYPE_ROCKSDB_BACKGROUND_ERRORS, &background_errors); + db_item.second->DBUnlockShared(); + total_memtable_usage += memtable_usage; + total_table_reader_usage += table_reader_usage; + for (const auto& item : background_errors) { + if (item.second != 0) { + db_fatal_msg_stream << (total_background_errors != 0 ? "," : ""); + db_fatal_msg_stream << db_item.first << "/" << item.first; + total_background_errors += item.second; } } } tmp_stream << "used_memory:" << (total_memtable_usage + total_table_reader_usage) << "\r\n"; tmp_stream << "used_memory_human:" << ((total_memtable_usage + total_table_reader_usage) >> 20) << "M\r\n"; + tmp_stream << "db_memtable_usage:" << total_memtable_usage << "\r\n"; tmp_stream << "db_tablereader_usage:" << total_table_reader_usage << "\r\n"; tmp_stream << "db_fatal:" << (total_background_errors != 0 ? "1" : "0") << "\r\n"; - tmp_stream << "db_fatal_msg:" << (total_background_errors != 0 ? db_fatal_msg_stream.str() : "NULL") << "\r\n"; + tmp_stream << "db_fatal_msg:" << (total_background_errors != 0 ? db_fatal_msg_stream.str() : "nullptr") << "\r\n"; info.append(tmp_stream.str()); - return; +} + +void InfoCmd::InfoRocksDB(std::string& info) { + std::stringstream tmp_stream; + + tmp_stream << "# RocksDB" + << "\r\n"; + + std::shared_lock db_rwl(g_pika_server->dbs_rw_); + for (const auto& db_item : g_pika_server->dbs_) { + if (!db_item.second) { + continue; + } + std::string rocksdb_info; + db_item.second->DBLockShared(); + db_item.second->storage()->GetRocksDBInfo(rocksdb_info); + db_item.second->DBUnlockShared(); + tmp_stream << rocksdb_info; + } + info.append(tmp_stream.str()); } void InfoCmd::InfoDebug(std::string& info) { std::stringstream tmp_stream; - tmp_stream << "# Synchronization Status" << "\r\n"; + tmp_stream << "# Synchronization Status" + << "\r\n"; + info.append(tmp_stream.str()); g_pika_rm->RmStatus(&info); - return; + + tmp_stream.str(std::string()); + tmp_stream << "# Running Status " + << "\r\n"; + + info.append(tmp_stream.str()); + g_pika_server->ServerStatus(&info); } -void ConfigCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameConfig); - return; +void InfoCmd::InfoCommandStats(std::string& info) { + std::stringstream tmp_stream; + tmp_stream.precision(2); + tmp_stream.setf(std::ios::fixed); + tmp_stream << "# Commandstats" << "\r\n"; + auto cmdstat_map = g_pika_cmd_table_manager->GetCommandStatMap(); + for (auto iter : *cmdstat_map) { + if (iter.second.cmd_count != 0) { + tmp_stream << iter.first << ":" + << "calls=" << iter.second.cmd_count << ", usec=" + << MethodofTotalTimeCalculation(iter.second.cmd_time_consuming) + << ", usec_per_call="; + if (!iter.second.cmd_time_consuming) { + tmp_stream << 0 << "\r\n"; + } else { + tmp_stream << MethodofCommandStatistics(iter.second.cmd_time_consuming, iter.second.cmd_count) + << "\r\n"; + } + } } - size_t argc = argv_.size(); - if (!strcasecmp(argv_[1].data(), "get")) { - if (argc != 3) { - res_.SetRes(CmdRes::kErrOther, "Wrong number of arguments for CONFIG get"); + info.append(tmp_stream.str()); +} + +void InfoCmd::InfoCache(std::string& info, std::shared_ptr db) { + std::stringstream tmp_stream; + tmp_stream << "# Cache" << "\r\n"; + if (PIKA_CACHE_NONE == g_pika_conf->cache_mode()) { + tmp_stream << "cache_status:Disable" << "\r\n"; + } else { + auto cache_info = db->GetCacheInfo(); + tmp_stream << "cache_status:" << CacheStatusToString(cache_info.status) << "\r\n"; + tmp_stream << "cache_db_num:" << cache_info.cache_num << "\r\n"; + tmp_stream << "cache_keys:" << cache_info.keys_num << "\r\n"; + tmp_stream << "cache_memory:" << cache_info.used_memory << "\r\n"; + tmp_stream << "cache_memory_human:" << (cache_info.used_memory >> 20) << "M\r\n"; + tmp_stream << "hits:" << cache_info.hits << "\r\n"; + tmp_stream << "all_cmds:" << cache_info.hits + cache_info.misses << "\r\n"; + tmp_stream << "hits_per_sec:" << cache_info.hits_per_sec << "\r\n"; + tmp_stream << "read_cmd_per_sec:" << cache_info.read_cmd_per_sec << "\r\n"; + tmp_stream << "hitratio_per_sec:" << std::setprecision(4) << cache_info.hitratio_per_sec << "%" << "\r\n"; + tmp_stream << "hitratio_all:" << std::setprecision(4) << cache_info.hitratio_all << "%" << "\r\n"; + tmp_stream << "load_keys_per_sec:" << cache_info.load_keys_per_sec << "\r\n"; + tmp_stream << "waitting_load_keys_num:" << cache_info.waitting_load_keys_num << "\r\n"; + } + info.append(tmp_stream.str()); +} + +std::string InfoCmd::CacheStatusToString(int status) { + switch (status) { + case PIKA_CACHE_STATUS_NONE: + return std::string("None"); + case PIKA_CACHE_STATUS_OK: + return std::string("Ok"); + case PIKA_CACHE_STATUS_INIT: + return std::string("Init"); + case PIKA_CACHE_STATUS_RESET: + return std::string("Reset"); + case PIKA_CACHE_STATUS_DESTROY: + return std::string("Destroy"); + case PIKA_CACHE_STATUS_CLEAR: + return std::string("Clear"); + default: + return std::string("Unknown"); + } +} +void ConfigCmd::Execute() { + Do(); +} + +void ConfigCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameConfig); + return; + } + size_t argc = argv_.size(); + if (strcasecmp(argv_[1].data(), "get") == 0) { + if (argc != 3) { + res_.SetRes(CmdRes::kErrOther, "Wrong number of arguments for CONFIG get"); return; } - } else if (!strcasecmp(argv_[1].data(), "set")) { + } else if (strcasecmp(argv_[1].data(), "set") == 0) { if (argc == 3 && argv_[2] != "*") { res_.SetRes(CmdRes::kErrOther, "Wrong number of arguments for CONFIG set"); return; @@ -1110,12 +1528,12 @@ void ConfigCmd::DoInitial() { res_.SetRes(CmdRes::kErrOther, "Wrong number of arguments for CONFIG set"); return; } - } else if (!strcasecmp(argv_[1].data(), "rewrite")) { + } else if (strcasecmp(argv_[1].data(), "rewrite") == 0) { if (argc != 2) { res_.SetRes(CmdRes::kErrOther, "Wrong number of arguments for CONFIG rewrite"); return; } - } else if (!strcasecmp(argv_[1].data(), "resetstat")) { + } else if (strcasecmp(argv_[1].data(), "resetstat") == 0) { if (argc != 2) { res_.SetRes(CmdRes::kErrOther, "Wrong number of arguments for CONFIG resetstat"); return; @@ -1125,391 +1543,731 @@ void ConfigCmd::DoInitial() { return; } config_args_v_.assign(argv_.begin() + 1, argv_.end()); - return; } -void ConfigCmd::Do(std::shared_ptr partition) { +void ConfigCmd::Do() { std::string config_ret; - if (!strcasecmp(config_args_v_[0].data(), "get")) { + if (strcasecmp(config_args_v_[0].data(), "get") == 0) { ConfigGet(config_ret); - } else if (!strcasecmp(config_args_v_[0].data(), "set")) { - ConfigSet(config_ret); - } else if (!strcasecmp(config_args_v_[0].data(), "rewrite")) { + } else if (strcasecmp(config_args_v_[0].data(), "set") == 0) { + ConfigSet(db_); + } else if (strcasecmp(config_args_v_[0].data(), "rewrite") == 0) { ConfigRewrite(config_ret); - } else if (!strcasecmp(config_args_v_[0].data(), "resetstat")) { + } else if (strcasecmp(config_args_v_[0].data(), "resetstat") == 0) { ConfigResetstat(config_ret); + } else if (strcasecmp(config_args_v_[0].data(), "rewritereplicationid") == 0) { + ConfigRewriteReplicationID(config_ret); } res_.AppendStringRaw(config_ret); - return; } -static void EncodeString(std::string *dst, const std::string &value) { +static void EncodeString(std::string* dst, const std::string& value) { dst->append("$"); dst->append(std::to_string(value.size())); - dst->append("\r\n"); + dst->append(kNewLine); dst->append(value.data(), value.size()); - dst->append("\r\n"); + dst->append(kNewLine); } -static void EncodeInt32(std::string *dst, const int32_t v) { +template +static void EncodeNumber(std::string* dst, const T v) { std::string vstr = std::to_string(v); dst->append("$"); dst->append(std::to_string(vstr.length())); - dst->append("\r\n"); + dst->append(kNewLine); dst->append(vstr); - dst->append("\r\n"); + dst->append(kNewLine); } -static void EncodeInt64(std::string *dst, const int64_t v) { - std::string vstr = std::to_string(v); - dst->append("$"); - dst->append(std::to_string(vstr.length())); - dst->append("\r\n"); - dst->append(vstr); - dst->append("\r\n"); -} - -void ConfigCmd::ConfigGet(std::string &ret) { +void ConfigCmd::ConfigGet(std::string& ret) { size_t elements = 0; std::string config_body; std::string pattern = config_args_v_[1]; - if (slash::stringmatch(pattern.data(), "port", 1)) { + if (pstd::stringmatch(pattern.data(), "port", 1) != 0) { elements += 2; EncodeString(&config_body, "port"); - EncodeInt32(&config_body, g_pika_conf->port()); + EncodeNumber(&config_body, g_pika_conf->port()); } - if (slash::stringmatch(pattern.data(), "thread-num", 1)) { + if (pstd::stringmatch(pattern.data(), "thread-num", 1) != 0) { elements += 2; EncodeString(&config_body, "thread-num"); - EncodeInt32(&config_body, g_pika_conf->thread_num()); + EncodeNumber(&config_body, g_pika_conf->thread_num()); } - if (slash::stringmatch(pattern.data(), "thread-pool-size", 1)) { + if (pstd::stringmatch(pattern.data(), "thread-pool-size", 1) != 0) { elements += 2; EncodeString(&config_body, "thread-pool-size"); - EncodeInt32(&config_body, g_pika_conf->thread_pool_size()); + EncodeNumber(&config_body, g_pika_conf->thread_pool_size()); + } + + if (pstd::stringmatch(pattern.data(), "slow-cmd-thread-pool-size", 1) != 0) { + elements += 2; + EncodeString(&config_body, "slow-cmd-thread-pool-size"); + EncodeNumber(&config_body, g_pika_conf->slow_cmd_thread_pool_size()); + } + + if (pstd::stringmatch(pattern.data(), "admin-thread-pool-size", 1) != 0) { + elements += 2; + EncodeString(&config_body, "admin-thread-pool-size"); + EncodeNumber(&config_body, g_pika_conf->admin_thread_pool_size()); } - if (slash::stringmatch(pattern.data(), "sync-thread-num", 1)) { + if (pstd::stringmatch(pattern.data(), "userblacklist", 1) != 0) { + elements += 2; + EncodeString(&config_body, "userblacklist"); + EncodeString(&config_body, g_pika_conf->user_blacklist_string()); + } + if (pstd::stringmatch(pattern.data(), "slow-cmd-list", 1) != 0) { + elements += 2; + EncodeString(&config_body, "slow-cmd-list"); + EncodeString(&config_body, g_pika_conf->GetSlowCmd()); + } + if (pstd::stringmatch(pattern.data(), "admin-cmd-list", 1) != 0) { + elements += 2; + EncodeString(&config_body, "admin-cmd-list"); + EncodeString(&config_body, g_pika_conf->GetAdminCmd()); + } + if (pstd::stringmatch(pattern.data(), "sync-thread-num", 1) != 0) { elements += 2; EncodeString(&config_body, "sync-thread-num"); - EncodeInt32(&config_body, g_pika_conf->sync_thread_num()); + EncodeNumber(&config_body, g_pika_conf->sync_thread_num()); } - if (slash::stringmatch(pattern.data(), "log-path", 1)) { + if (pstd::stringmatch(pattern.data(), "sync-binlog-thread-num", 1) != 0) { + elements += 2; + EncodeString(&config_body, "sync-binlog-thread-num"); + EncodeNumber(&config_body, g_pika_conf->sync_binlog_thread_num()); + } + + if (pstd::stringmatch(pattern.data(), "log-path", 1) != 0) { elements += 2; EncodeString(&config_body, "log-path"); EncodeString(&config_body, g_pika_conf->log_path()); } - if (slash::stringmatch(pattern.data(), "db-path", 1)) { + if (pstd::stringmatch(pattern.data(), "db-path", 1) != 0) { elements += 2; EncodeString(&config_body, "db-path"); EncodeString(&config_body, g_pika_conf->db_path()); } - if (slash::stringmatch(pattern.data(), "maxmemory", 1)) { + if (pstd::stringmatch(pattern.data(), "maxmemory", 1) != 0) { elements += 2; EncodeString(&config_body, "maxmemory"); - EncodeInt64(&config_body, g_pika_conf->write_buffer_size()); + EncodeNumber(&config_body, g_pika_conf->write_buffer_size()); } - if (slash::stringmatch(pattern.data(), "write-buffer-size", 1)) { + if (pstd::stringmatch(pattern.data(), "write-buffer-size", 1) != 0) { elements += 2; EncodeString(&config_body, "write-buffer-size"); - EncodeInt64(&config_body, g_pika_conf->write_buffer_size()); + EncodeNumber(&config_body, g_pika_conf->write_buffer_size()); + } + + if (pstd::stringmatch(pattern.data(), "arena-block-size", 1) != 0) { + elements += 2; + EncodeString(&config_body, "arena-block-size"); + EncodeNumber(&config_body, g_pika_conf->arena_block_size()); } - if (slash::stringmatch(pattern.data(), "timeout", 1)) { + if (pstd::stringmatch(pattern.data(), "max-write-buffer-num", 1) != 0) { + elements += 2; + EncodeString(&config_body, "max-write-buffer-num"); + EncodeNumber(&config_body, g_pika_conf->max_write_buffer_number()); + } + + if (pstd::stringmatch(pattern.data(), "timeout", 1) != 0) { elements += 2; EncodeString(&config_body, "timeout"); - EncodeInt32(&config_body, g_pika_conf->timeout()); + EncodeNumber(&config_body, g_pika_conf->timeout()); } - if (slash::stringmatch(pattern.data(), "requirepass", 1)) { + if (pstd::stringmatch(pattern.data(), "requirepass", 1) != 0) { elements += 2; EncodeString(&config_body, "requirepass"); EncodeString(&config_body, g_pika_conf->requirepass()); } - if (slash::stringmatch(pattern.data(), "masterauth", 1)) { + if (pstd::stringmatch(pattern.data(), "masterauth", 1) != 0) { elements += 2; EncodeString(&config_body, "masterauth"); EncodeString(&config_body, g_pika_conf->masterauth()); } - if (slash::stringmatch(pattern.data(), "userpass", 1)) { + if (pstd::stringmatch(pattern.data(), "userpass", 1) != 0) { elements += 2; EncodeString(&config_body, "userpass"); EncodeString(&config_body, g_pika_conf->userpass()); } - if (slash::stringmatch(pattern.data(), "userblacklist", 1)) { + if (pstd::stringmatch(pattern.data(), "instance-mode", 1) != 0) { elements += 2; - EncodeString(&config_body, "userblacklist"); - EncodeString(&config_body, (g_pika_conf->suser_blacklist()).c_str()); + EncodeString(&config_body, "instance-mode"); + EncodeString(&config_body, "classic"); } - if (slash::stringmatch(pattern.data(), "instance-mode", 1)) { + if (pstd::stringmatch(pattern.data(), "databases", 1) != 0) { elements += 2; - EncodeString(&config_body, "instance-mode"); - EncodeString(&config_body, (g_pika_conf->classic_mode() ? "classic" : "sharding")); + EncodeString(&config_body, "databases"); + EncodeNumber(&config_body, g_pika_conf->databases()); } - if (g_pika_conf->classic_mode() - && slash::stringmatch(pattern.data(), "databases", 1)) { + if (pstd::stringmatch(pattern.data(), "daemonize", 1)) { elements += 2; - EncodeString(&config_body, "databases"); - EncodeInt32(&config_body, g_pika_conf->databases()); + EncodeString(&config_body, "daemonize"); + EncodeString(&config_body, g_pika_conf->daemonize() ? "yes" : "no"); } - if (!g_pika_conf->classic_mode() - && slash::stringmatch(pattern.data(), "default-slot-num", 1)) { + if (pstd::stringmatch(pattern.data(), "slotmigrate", 1)) { elements += 2; - EncodeString(&config_body, "default-slot-num"); - EncodeInt32(&config_body, g_pika_conf->default_slot_num()); + EncodeString(&config_body, "slotmigrate"); + EncodeString(&config_body, g_pika_conf->slotmigrate() ? "yes" : "no"); } - if (slash::stringmatch(pattern.data(), "daemonize", 1)) { + if (pstd::stringmatch(pattern.data(), "slow-cmd-pool", 1)) { elements += 2; - EncodeString(&config_body, "daemonize"); - EncodeString(&config_body, g_pika_conf->daemonize() ? "yes" : "no"); + EncodeString(&config_body, "slow-cmd-pool"); + EncodeString(&config_body, g_pika_conf->slow_cmd_pool() ? "yes" : "no"); + } + + if (pstd::stringmatch(pattern.data(), "slotmigrate-thread-num", 1)!= 0) { + elements += 2; + EncodeString(&config_body, "slotmigrate-thread-num"); + EncodeNumber(&config_body, g_pika_conf->slotmigrate_thread_num()); } - if (slash::stringmatch(pattern.data(), "dump-path", 1)) { + if (pstd::stringmatch(pattern.data(), "thread-migrate-keys-num", 1)!= 0) { + elements += 2; + EncodeString(&config_body, "thread-migrate-keys-num"); + EncodeNumber(&config_body, g_pika_conf->thread_migrate_keys_num()); + } + + if (pstd::stringmatch(pattern.data(), "dump-path", 1) != 0) { elements += 2; EncodeString(&config_body, "dump-path"); EncodeString(&config_body, g_pika_conf->bgsave_path()); } - if (slash::stringmatch(pattern.data(), "dump-expire", 1)) { + if (pstd::stringmatch(pattern.data(), "dump-expire", 1) != 0) { elements += 2; EncodeString(&config_body, "dump-expire"); - EncodeInt32(&config_body, g_pika_conf->expire_dump_days()); + EncodeNumber(&config_body, g_pika_conf->expire_dump_days()); } - if (slash::stringmatch(pattern.data(), "dump-prefix", 1)) { + if (pstd::stringmatch(pattern.data(), "dump-prefix", 1) != 0) { elements += 2; EncodeString(&config_body, "dump-prefix"); EncodeString(&config_body, g_pika_conf->bgsave_prefix()); } - if (slash::stringmatch(pattern.data(), "pidfile", 1)) { + if (pstd::stringmatch(pattern.data(), "pidfile", 1) != 0) { elements += 2; EncodeString(&config_body, "pidfile"); EncodeString(&config_body, g_pika_conf->pidfile()); } - if (slash::stringmatch(pattern.data(), "maxclients", 1)) { + if (pstd::stringmatch(pattern.data(), "maxclients", 1) != 0) { elements += 2; EncodeString(&config_body, "maxclients"); - EncodeInt32(&config_body, g_pika_conf->maxclients()); + EncodeNumber(&config_body, g_pika_conf->maxclients()); } - if (slash::stringmatch(pattern.data(), "target-file-size-base", 1)) { + if (pstd::stringmatch(pattern.data(), "target-file-size-base", 1) != 0) { elements += 2; EncodeString(&config_body, "target-file-size-base"); - EncodeInt32(&config_body, g_pika_conf->target_file_size_base()); + EncodeNumber(&config_body, g_pika_conf->target_file_size_base()); } - if (slash::stringmatch(pattern.data(), "max-cache-statistic-keys", 1)) { + if (pstd::stringmatch(pattern.data(), "max-cache-statistic-keys", 1) != 0) { elements += 2; EncodeString(&config_body, "max-cache-statistic-keys"); - EncodeInt32(&config_body, g_pika_conf->max_cache_statistic_keys()); + EncodeNumber(&config_body, g_pika_conf->max_cache_statistic_keys()); } - if (slash::stringmatch(pattern.data(), "small-compaction-threshold", 1)) { + if (pstd::stringmatch(pattern.data(), "small-compaction-threshold", 1) != 0) { elements += 2; EncodeString(&config_body, "small-compaction-threshold"); - EncodeInt32(&config_body, g_pika_conf->small_compaction_threshold()); + EncodeNumber(&config_body, g_pika_conf->small_compaction_threshold()); + } + + if (pstd::stringmatch(pattern.data(), "small-compaction-duration-threshold", 1) != 0) { + elements += 2; + EncodeString(&config_body, "small-compaction-duration-threshold"); + EncodeNumber(&config_body, g_pika_conf->small_compaction_duration_threshold()); } - if (slash::stringmatch(pattern.data(), "max-background-flushes", 1)) { + if (pstd::stringmatch(pattern.data(), "max-background-flushes", 1) != 0) { elements += 2; EncodeString(&config_body, "max-background-flushes"); - EncodeInt32(&config_body, g_pika_conf->max_background_flushes()); + EncodeNumber(&config_body, g_pika_conf->max_background_flushes()); } - if (slash::stringmatch(pattern.data(), "max-background-compactions", 1)) { + if (pstd::stringmatch(pattern.data(), "max-background-compactions", 1) != 0) { elements += 2; EncodeString(&config_body, "max-background-compactions"); - EncodeInt32(&config_body, g_pika_conf->max_background_compactions()); + EncodeNumber(&config_body, g_pika_conf->max_background_compactions()); + } + + if (pstd::stringmatch(pattern.data(), "max-background-jobs", 1) != 0) { + elements += 2; + EncodeString(&config_body, "max-background-jobs"); + EncodeNumber(&config_body, g_pika_conf->max_background_jobs()); } - if (slash::stringmatch(pattern.data(), "max-cache-files", 1)) { + if (pstd::stringmatch(pattern.data(), "max-cache-files", 1) != 0) { elements += 2; EncodeString(&config_body, "max-cache-files"); - EncodeInt32(&config_body, g_pika_conf->max_cache_files()); + EncodeNumber(&config_body, g_pika_conf->max_cache_files()); } - if (slash::stringmatch(pattern.data(), "max-bytes-for-level-multiplier", 1)) { + if (pstd::stringmatch(pattern.data(), "max-bytes-for-level-multiplier", 1) != 0) { elements += 2; EncodeString(&config_body, "max-bytes-for-level-multiplier"); - EncodeInt32(&config_body, g_pika_conf->max_bytes_for_level_multiplier()); + EncodeNumber(&config_body, g_pika_conf->max_bytes_for_level_multiplier()); } - if (slash::stringmatch(pattern.data(), "block-size", 1)) { + if (pstd::stringmatch(pattern.data(), "block-size", 1) != 0) { elements += 2; EncodeString(&config_body, "block-size"); - EncodeInt64(&config_body, g_pika_conf->block_size()); + EncodeNumber(&config_body, g_pika_conf->block_size()); } - if (slash::stringmatch(pattern.data(), "block-cache", 1)) { + if (pstd::stringmatch(pattern.data(), "block-cache", 1) != 0) { elements += 2; EncodeString(&config_body, "block-cache"); - EncodeInt64(&config_body, g_pika_conf->block_cache()); + EncodeNumber(&config_body, g_pika_conf->block_cache()); } - if (slash::stringmatch(pattern.data(), "share-block-cache", 1)) { + if (pstd::stringmatch(pattern.data(), "share-block-cache", 1) != 0) { elements += 2; EncodeString(&config_body, "share-block-cache"); EncodeString(&config_body, g_pika_conf->share_block_cache() ? "yes" : "no"); } - if (slash::stringmatch(pattern.data(), "cache-index-and-filter-blocks", 1)) { + if (pstd::stringmatch(pattern.data(), "enable-partitioned-index-filters", 1) != 0) { + elements += 2; + EncodeString(&config_body, "enable-partitioned-index-filters"); + EncodeString(&config_body, g_pika_conf->enable_partitioned_index_filters() ? "yes" : "no"); + } + + if (pstd::stringmatch(pattern.data(), "cache-index-and-filter-blocks", 1) != 0) { elements += 2; EncodeString(&config_body, "cache-index-and-filter-blocks"); EncodeString(&config_body, g_pika_conf->cache_index_and_filter_blocks() ? "yes" : "no"); } - if (slash::stringmatch(pattern.data(), "optimize-filters-for-hits", 1)) { + if (pstd::stringmatch(pattern.data(), "optimize-filters-for-hits", 1) != 0) { elements += 2; EncodeString(&config_body, "optimize-filters-for-hits"); EncodeString(&config_body, g_pika_conf->optimize_filters_for_hits() ? "yes" : "no"); } - if (slash::stringmatch(pattern.data(), "level-compaction-dynamic-level-bytes", 1)) { + if (pstd::stringmatch(pattern.data(), "level-compaction-dynamic-level-bytes", 1) != 0) { elements += 2; EncodeString(&config_body, "level-compaction-dynamic-level-bytes"); EncodeString(&config_body, g_pika_conf->level_compaction_dynamic_level_bytes() ? "yes" : "no"); } - if (slash::stringmatch(pattern.data(), "expire-logs-days", 1)) { + if (pstd::stringmatch(pattern.data(), "expire-logs-days", 1) != 0) { elements += 2; EncodeString(&config_body, "expire-logs-days"); - EncodeInt32(&config_body, g_pika_conf->expire_logs_days()); + EncodeNumber(&config_body, g_pika_conf->expire_logs_days()); } - if (slash::stringmatch(pattern.data(), "expire-logs-nums", 1)) { + if (pstd::stringmatch(pattern.data(), "expire-logs-nums", 1) != 0) { elements += 2; EncodeString(&config_body, "expire-logs-nums"); - EncodeInt32(&config_body, g_pika_conf->expire_logs_nums()); + EncodeNumber(&config_body, g_pika_conf->expire_logs_nums()); } - if (slash::stringmatch(pattern.data(), "root-connection-num", 1)) { + if (pstd::stringmatch(pattern.data(), "root-connection-num", 1) != 0) { elements += 2; EncodeString(&config_body, "root-connection-num"); - EncodeInt32(&config_body, g_pika_conf->root_connection_num()); + EncodeNumber(&config_body, g_pika_conf->root_connection_num()); } - if (slash::stringmatch(pattern.data(), "slowlog-write-errorlog", 1)) { + if (pstd::stringmatch(pattern.data(), "slowlog-write-errorlog", 1) != 0) { elements += 2; EncodeString(&config_body, "slowlog-write-errorlog"); EncodeString(&config_body, g_pika_conf->slowlog_write_errorlog() ? "yes" : "no"); } - if (slash::stringmatch(pattern.data(), "slowlog-log-slower-than", 1)) { + if (pstd::stringmatch(pattern.data(), "slowlog-log-slower-than", 1) != 0) { elements += 2; EncodeString(&config_body, "slowlog-log-slower-than"); - EncodeInt32(&config_body, g_pika_conf->slowlog_slower_than()); + EncodeNumber(&config_body, g_pika_conf->slowlog_slower_than()); } - if (slash::stringmatch(pattern.data(), "slowlog-max-len", 1)) { + if (pstd::stringmatch(pattern.data(), "slowlog-max-len", 1) != 0) { elements += 2; EncodeString(&config_body, "slowlog-max-len"); - EncodeInt32(&config_body, g_pika_conf->slowlog_max_len()); + EncodeNumber(&config_body, g_pika_conf->slowlog_max_len()); } - if (slash::stringmatch(pattern.data(), "write-binlog", 1)) { + if (pstd::stringmatch(pattern.data(), "write-binlog", 1) != 0) { elements += 2; EncodeString(&config_body, "write-binlog"); EncodeString(&config_body, g_pika_conf->write_binlog() ? "yes" : "no"); } - - if (slash::stringmatch(pattern.data(), "binlog-file-size", 1)) { + if (pstd::stringmatch(pattern.data(), "binlog-file-size", 1) != 0) { elements += 2; EncodeString(&config_body, "binlog-file-size"); - EncodeInt32(&config_body, g_pika_conf->binlog_file_size()); + EncodeNumber(&config_body, g_pika_conf->binlog_file_size()); } - if (slash::stringmatch(pattern.data(), "max-cache-statistic-keys", 1)) { + if (pstd::stringmatch(pattern.data(), "max-write-buffer-size", 1) != 0) { elements += 2; - EncodeString(&config_body, "max-cache-statistic-keys"); - EncodeInt32(&config_body, g_pika_conf->max_cache_statistic_keys()); + EncodeString(&config_body, "max-write-buffer-size"); + EncodeNumber(&config_body, g_pika_conf->max_write_buffer_size()); } - if (slash::stringmatch(pattern.data(), "small-compaction-threshold", 1)) { + if (pstd::stringmatch(pattern.data(), "max-total-wal-size", 1) != 0) { elements += 2; - EncodeString(&config_body, "small-compaction-threshold"); - EncodeInt32(&config_body, g_pika_conf->small_compaction_threshold()); + EncodeString(&config_body, "max-total-wal-size"); + EncodeNumber(&config_body, g_pika_conf->MaxTotalWalSize()); } - if (slash::stringmatch(pattern.data(), "max-write-buffer-size", 1)) { + if (pstd::stringmatch(pattern.data(), "min-write-buffer-number-to-merge", 1) != 0) { elements += 2; - EncodeString(&config_body, "max-write-buffer-size"); - EncodeInt64(&config_body, g_pika_conf->max_write_buffer_size()); + EncodeString(&config_body, "min-write-buffer-number-to-merge"); + EncodeNumber(&config_body, g_pika_conf->min_write_buffer_number_to_merge()); + } + + if (pstd::stringmatch(pattern.data(), "level0-stop-writes-trigger", 1) != 0) { + elements += 2; + EncodeString(&config_body, "level0-stop-writes-trigger"); + EncodeNumber(&config_body, g_pika_conf->level0_stop_writes_trigger()); + } + + if (pstd::stringmatch(pattern.data(), "level0-slowdown-writes-trigger", 1) != 0) { + elements += 2; + EncodeString(&config_body, "level0-slowdown-writes-trigger"); + EncodeNumber(&config_body, g_pika_conf->level0_slowdown_writes_trigger()); + } + + if (pstd::stringmatch(pattern.data(), "level0-file-num-compaction-trigger", 1) != 0) { + elements += 2; + EncodeString(&config_body, "level0-file-num-compaction-trigger"); + EncodeNumber(&config_body, g_pika_conf->level0_file_num_compaction_trigger()); } - if (slash::stringmatch(pattern.data(), "max-client-response-size", 1)) { + if (pstd::stringmatch(pattern.data(), "max-client-response-size", 1) != 0) { elements += 2; EncodeString(&config_body, "max-client-response-size"); - EncodeInt64(&config_body, g_pika_conf->max_client_response_size()); + EncodeNumber(&config_body, g_pika_conf->max_client_response_size()); } - if (slash::stringmatch(pattern.data(), "compression", 1)) { + if (pstd::stringmatch(pattern.data(), "compression", 1) != 0) { elements += 2; EncodeString(&config_body, "compression"); EncodeString(&config_body, g_pika_conf->compression()); } - if (slash::stringmatch(pattern.data(), "db-sync-path", 1)) { + if (pstd::stringmatch(pattern.data(), "db-sync-path", 1) != 0) { elements += 2; EncodeString(&config_body, "db-sync-path"); EncodeString(&config_body, g_pika_conf->db_sync_path()); } - if (slash::stringmatch(pattern.data(), "db-sync-speed", 1)) { + if (pstd::stringmatch(pattern.data(), "db-sync-speed", 1) != 0) { elements += 2; EncodeString(&config_body, "db-sync-speed"); - EncodeInt32(&config_body, g_pika_conf->db_sync_speed()); + EncodeNumber(&config_body, g_pika_conf->db_sync_speed()); } - if (slash::stringmatch(pattern.data(), "compact-cron", 1)) { + if (pstd::stringmatch(pattern.data(), "compact-cron", 1) != 0) { elements += 2; EncodeString(&config_body, "compact-cron"); EncodeString(&config_body, g_pika_conf->compact_cron()); } - if (slash::stringmatch(pattern.data(), "compact-interval", 1)) { + if (pstd::stringmatch(pattern.data(), "compact-interval", 1) != 0) { elements += 2; EncodeString(&config_body, "compact-interval"); EncodeString(&config_body, g_pika_conf->compact_interval()); } - - if (slash::stringmatch(pattern.data(), "network-interface", 1)) { + if (pstd::stringmatch(pattern.data(), "disable_auto_compactions", 1) != 0) { + elements += 2; + EncodeString(&config_body, "disable_auto_compactions"); + EncodeString(&config_body, g_pika_conf->disable_auto_compactions() ? "true" : "false"); + } + if (pstd::stringmatch(pattern.data(), "network-interface", 1) != 0) { elements += 2; EncodeString(&config_body, "network-interface"); EncodeString(&config_body, g_pika_conf->network_interface()); } - if (slash::stringmatch(pattern.data(), "slaveof", 1)) { + if (pstd::stringmatch(pattern.data(), "slaveof", 1) != 0) { elements += 2; EncodeString(&config_body, "slaveof"); EncodeString(&config_body, g_pika_conf->slaveof()); } - if (slash::stringmatch(pattern.data(), "slave-priority", 1)) { + if (pstd::stringmatch(pattern.data(), "slave-priority", 1) != 0) { elements += 2; EncodeString(&config_body, "slave-priority"); - EncodeInt32(&config_body, g_pika_conf->slave_priority()); + EncodeNumber(&config_body, g_pika_conf->slave_priority()); + } + + // fake string for redis-benchmark + if (pstd::stringmatch(pattern.data(), "save", 1) != 0) { + elements += 2; + EncodeString(&config_body, "save"); + EncodeString(&config_body, ""); } - if (slash::stringmatch(pattern.data(), "sync-window-size", 1)) { + if (pstd::stringmatch(pattern.data(), "appendonly", 1) != 0) { + elements += 2; + EncodeString(&config_body, "appendonly"); + EncodeString(&config_body, "no"); + } + + if (pstd::stringmatch(pattern.data(), "sync-window-size", 1) != 0) { elements += 2; EncodeString(&config_body, "sync-window-size"); - EncodeInt32(&config_body, g_pika_conf->sync_window_size()); + EncodeNumber(&config_body, g_pika_conf->sync_window_size()); + } + + if (pstd::stringmatch(pattern.data(), "max-conn-rbuf-size", 1) != 0) { + elements += 2; + EncodeString(&config_body, "max-conn-rbuf-size"); + EncodeNumber(&config_body, g_pika_conf->max_conn_rbuf_size()); + } + + if (pstd::stringmatch(pattern.data(), "replication-num", 1) != 0) { + elements += 2; + EncodeString(&config_body, "replication-num"); + EncodeNumber(&config_body, g_pika_conf->replication_num()); + } + if (pstd::stringmatch(pattern.data(), "consensus-level", 1) != 0) { + elements += 2; + EncodeString(&config_body, "consensus-level"); + EncodeNumber(&config_body, g_pika_conf->consensus_level()); + } + + if (pstd::stringmatch(pattern.data(), "rate-limiter-mode", 1) != 0) { + elements += 2; + EncodeString(&config_body, "rate-limiter-mode"); + EncodeNumber(&config_body, g_pika_conf->rate_limiter_mode()); + } + + if (pstd::stringmatch(pattern.data(), "rate-limiter-bandwidth", 1) != 0) { + elements += 2; + EncodeString(&config_body, "rate-limiter-bandwidth"); + EncodeNumber(&config_body, g_pika_conf->rate_limiter_bandwidth()); + } + + if (pstd::stringmatch(pattern.data(), "delayed-write-rate", 1) != 0) { + elements += 2; + EncodeString(&config_body, "delayed-write-rate"); + EncodeNumber(&config_body, g_pika_conf->delayed_write_rate()); + } + + if (pstd::stringmatch(pattern.data(), "max-compaction-bytes", 1) != 0) { + elements += 2; + EncodeString(&config_body, "max-compaction-bytes"); + EncodeNumber(&config_body, g_pika_conf->max_compaction_bytes()); + } + + if (pstd::stringmatch(pattern.data(), "rate-limiter-refill-period-us", 1) != 0) { + elements += 2; + EncodeString(&config_body, "rate-limiter-refill-period-us"); + EncodeNumber(&config_body, g_pika_conf->rate_limiter_refill_period_us()); + } + + if (pstd::stringmatch(pattern.data(), "rate-limiter-fairness", 1) != 0) { + elements += 2; + EncodeString(&config_body, "rate-limiter-fairness"); + EncodeNumber(&config_body, g_pika_conf->rate_limiter_fairness()); + } + + if (pstd::stringmatch(pattern.data(), "rate-limiter-auto-tuned", 1) != 0) { + elements += 2; + EncodeString(&config_body, "rate-limiter-auto-tuned"); + EncodeString(&config_body, g_pika_conf->rate_limiter_auto_tuned() ? "yes" : "no"); + } + + if (pstd::stringmatch(pattern.data(), "run-id", 1) != 0) { + elements += 2; + EncodeString(&config_body, "run-id"); + EncodeString(&config_body, g_pika_conf->run_id()); + } + + if (pstd::stringmatch(pattern.data(), "blob-cache", 1) != 0) { + elements += 2; + EncodeString(&config_body, "blob-cache"); + EncodeNumber(&config_body, g_pika_conf->blob_cache()); + } + + if (pstd::stringmatch(pattern.data(), "blob-compression-type", 1) != 0) { + elements += 2; + EncodeString(&config_body, "blob-compression-type"); + EncodeString(&config_body, g_pika_conf->blob_compression_type()); + } + + if (pstd::stringmatch(pattern.data(), "blob-file-size", 1) != 0) { + elements += 2; + EncodeString(&config_body, "blob-file-size"); + EncodeNumber(&config_body, g_pika_conf->blob_file_size()); + } + + if (pstd::stringmatch(pattern.data(), "blob-garbage-collection-age-cutoff", 1) != 0) { + elements += 2; + EncodeString(&config_body, "blob-garbage-collection-age-cutoff"); + EncodeNumber(&config_body, g_pika_conf->blob_garbage_collection_age_cutoff()); + } + + if (pstd::stringmatch(pattern.data(), "blob-garbage-collection-force-threshold", 1) != 0) { + elements += 2; + EncodeString(&config_body, "blob-garbage-collection-force-threshold"); + EncodeNumber(&config_body, g_pika_conf->blob_garbage_collection_force_threshold()); + } + + if (pstd::stringmatch(pattern.data(), "blob-num-shard-bits", 1) != 0) { + elements += 2; + EncodeString(&config_body, "blob-num-shard-bits"); + EncodeNumber(&config_body, g_pika_conf->blob_num_shard_bits()); + } + + if (pstd::stringmatch(pattern.data(), "compression-per-level", 1) != 0) { + elements += 2; + EncodeString(&config_body, "compression-per-level"); + EncodeString(&config_body, g_pika_conf->compression_all_levels()); + } + + if (pstd::stringmatch(pattern.data(), "default-slot-num", 1) != 0) { + elements += 2; + EncodeString(&config_body, "default-slot-num"); + EncodeNumber(&config_body, g_pika_conf->default_slot_num()); + } + + if (pstd::stringmatch(pattern.data(), "enable-blob-files", 1) != 0) { + elements += 2; + EncodeString(&config_body, "enable-blob-files"); + EncodeString(&config_body, g_pika_conf->enable_blob_files() ? "yes" : "no"); + } + + if (pstd::stringmatch(pattern.data(), "enable-blob-garbage-collection", 1) != 0) { + elements += 2; + EncodeString(&config_body, "enable-blob-garbage-collection"); + EncodeString(&config_body, g_pika_conf->enable_blob_garbage_collection() ? "yes" : "no"); + } + + if (pstd::stringmatch(pattern.data(), "loglevel", 1) != 0) { + elements += 2; + EncodeString(&config_body, "loglevel"); + EncodeString(&config_body, g_pika_conf->log_level()); + } + + if (pstd::stringmatch(pattern.data(), "min-blob-size", 1) != 0) { + elements += 2; + EncodeString(&config_body, "min-blob-size"); + EncodeNumber(&config_body, g_pika_conf->min_blob_size()); + } + + if (pstd::stringmatch(pattern.data(), "pin_l0_filter_and_index_blocks_in_cache", 1) != 0) { + elements += 2; + EncodeString(&config_body, "pin_l0_filter_and_index_blocks_in_cache"); + EncodeString(&config_body, g_pika_conf->pin_l0_filter_and_index_blocks_in_cache() ? "yes" : "no"); + } + + if (pstd::stringmatch(pattern.data(), "slave-read-only", 1) != 0) { + elements += 2; + EncodeString(&config_body, "slave-read-only"); + EncodeString(&config_body, g_pika_conf->slave_read_only() ? "yes" : "no"); + } + + if (pstd::stringmatch(pattern.data(), "throttle-bytes-per-second", 1) != 0) { + elements += 2; + EncodeString(&config_body, "throttle-bytes-per-second"); + EncodeNumber(&config_body, g_pika_conf->throttle_bytes_per_second()); + } + + if (pstd::stringmatch(pattern.data(), "max-rsync-parallel-num", 1) != 0) { + elements += 2; + EncodeString(&config_body, "max-rsync-parallel-num"); + EncodeNumber(&config_body, g_pika_conf->max_rsync_parallel_num()); + } + + if (pstd::stringmatch(pattern.data(), "replication-id", 1) != 0) { + elements += 2; + EncodeString(&config_body, "replication-id"); + EncodeString(&config_body, g_pika_conf->replication_id()); + } + + + if (pstd::stringmatch(pattern.data(), "cache-num", 1)) { + elements += 2; + EncodeString(&config_body, "cache-num"); + EncodeNumber(&config_body, g_pika_conf->GetCacheNum()); + } + + if (pstd::stringmatch(pattern.data(), "cache-model", 1)) { + elements += 2; + EncodeString(&config_body, "cache-model"); + EncodeNumber(&config_body, g_pika_conf->cache_mode()); + } + + if (pstd::stringmatch(pattern.data(), "cache-type", 1)) { + elements += 2; + EncodeString(&config_body, "cache-type"); + EncodeString(&config_body, g_pika_conf->scache_type()); + } + + if (pstd::stringmatch(pattern.data(), "zset-cache-start-direction", 1)) { + elements += 2; + EncodeString(&config_body, "zset-cache-start-direction"); + EncodeNumber(&config_body, g_pika_conf->zset_cache_start_direction()); + } + + if (pstd::stringmatch(pattern.data(), "zset-cache-field-num-per-key", 1)) { + elements += 2; + EncodeString(&config_body, "zset-cache-field-num-per-key"); + EncodeNumber(&config_body, g_pika_conf->zset_cache_field_num_per_key()); + } + + if (pstd::stringmatch(pattern.data(), "cache-maxmemory", 1)) { + elements += 2; + EncodeString(&config_body, "cache-maxmemory"); + EncodeNumber(&config_body, g_pika_conf->cache_maxmemory()); + } + + if (pstd::stringmatch(pattern.data(), "cache-maxmemory-policy", 1)) { + elements += 2; + EncodeString(&config_body, "cache-maxmemory-policy"); + EncodeNumber(&config_body, g_pika_conf->cache_maxmemory_policy()); + } + + if (pstd::stringmatch(pattern.data(), "cache-maxmemory-samples", 1)) { + elements += 2; + EncodeString(&config_body, "cache-maxmemory-samples"); + EncodeNumber(&config_body, g_pika_conf->cache_maxmemory_samples()); + } + + if (pstd::stringmatch(pattern.data(), "cache-lfu-decay-time", 1)) { + elements += 2; + EncodeString(&config_body, "cache-lfu-decay-time"); + EncodeNumber(&config_body, g_pika_conf->cache_lfu_decay_time()); + } + + if (pstd::stringmatch(pattern.data(), "acl-pubsub-default", 1) != 0) { + elements += 2; + EncodeString(&config_body, "acl-pubsub-default"); + g_pika_conf->acl_pubsub_default() ? EncodeString(&config_body, "allchannels") + : EncodeString(&config_body, "resetchannels"); + } + + if (pstd::stringmatch(pattern.data(), "enable-db-statistics", 1)) { + elements += 2; + EncodeString(&config_body, "enable-db-statistics"); + EncodeString(&config_body, g_pika_conf->enable_db_statistics() ? "yes" : "no"); + } + + if (pstd::stringmatch(pattern.data(), "db-statistics-level", 1)) { + elements += 2; + EncodeString(&config_body, "db-statistics-level"); + EncodeNumber(&config_body, g_pika_conf->db_statistics_level()); } std::stringstream resp; @@ -1518,102 +2276,150 @@ void ConfigCmd::ConfigGet(std::string &ret) { } // Remember to sync change PikaConf::ConfigRewrite(); -void ConfigCmd::ConfigSet(std::string& ret) { +void ConfigCmd::ConfigSet(std::shared_ptr db) { std::string set_item = config_args_v_[1]; if (set_item == "*") { - ret = "*23\r\n"; - EncodeString(&ret, "timeout"); - EncodeString(&ret, "requirepass"); - EncodeString(&ret, "masterauth"); - EncodeString(&ret, "userpass"); - EncodeString(&ret, "userblacklist"); - EncodeString(&ret, "dump-prefix"); - EncodeString(&ret, "maxclients"); - EncodeString(&ret, "dump-expire"); - EncodeString(&ret, "expire-logs-days"); - EncodeString(&ret, "expire-logs-nums"); - EncodeString(&ret, "root-connection-num"); - EncodeString(&ret, "slowlog-write-errorlog"); - EncodeString(&ret, "slowlog-log-slower-than"); - EncodeString(&ret, "slowlog-max-len"); - EncodeString(&ret, "write-binlog"); - EncodeString(&ret, "max-cache-statistic-keys"); - EncodeString(&ret, "small-compaction-threshold"); - EncodeString(&ret, "max-client-response-size"); - EncodeString(&ret, "db-sync-speed"); - EncodeString(&ret, "compact-cron"); - EncodeString(&ret, "compact-interval"); - EncodeString(&ret, "slave-priority"); - EncodeString(&ret, "sync-window-size"); - return; - } - long int ival; + std::vector replyVt({ + "timeout", + "requirepass", + "masterauth", + "slotmigrate", + "slow-cmd-pool", + "slotmigrate-thread-num", + "thread-migrate-keys-num", + "userpass", + "userblacklist", + "dump-prefix", + "maxclients", + "dump-expire", + "expire-logs-days", + "expire-logs-nums", + "root-connection-num", + "slowlog-write-errorlog", + "slowlog-log-slower-than", + "slowlog-max-len", + "write-binlog", + "max-cache-statistic-keys", + "small-compaction-threshold", + "small-compaction-duration-threshold", + "max-client-response-size", + "db-sync-speed", + "compact-cron", + "compact-interval", + "disable_auto_compactions", + "slave-priority", + "sync-window-size", + "slow-cmd-list", + // Options for storage engine + // MutableDBOptions + "max-cache-files", + "max-background-compactions", + "max-background-jobs", + // MutableColumnFamilyOptions + "write-buffer-size", + "max-write-buffer-num", + "min-write-buffer-number-to-merge", + "max-total-wal-size", + "level0-slowdown-writes-trigger", + "level0-stop-writes-trigger", + "level0-file-num-compaction-trigger", + "arena-block-size", + "throttle-bytes-per-second", + "max-rsync-parallel-num", + "cache-model", + "cache-type", + "zset-cache-start-direction", + "zset-cache-field-num-per-key", + "cache-lfu-decay-time", + "max-conn-rbuf-size", + }); + res_.AppendStringVector(replyVt); + return; + } + long int ival = 0; std::string value = config_args_v_[2]; if (set_item == "timeout") { - if (!slash::string2l(value.data(), value.size(), &ival)) { - ret = "-ERR Invalid argument " + value + " for CONFIG SET 'timeout'\r\n"; + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument " + value + " for CONFIG SET 'timeout'\r\n"); return; } - g_pika_conf->SetTimeout(ival); - ret = "+OK\r\n"; + g_pika_conf->SetTimeout(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); } else if (set_item == "requirepass") { g_pika_conf->SetRequirePass(value); - ret = "+OK\r\n"; + g_pika_server->Acl()->UpdateDefaultUserPassword(value); + res_.AppendStringRaw("+OK\r\n"); } else if (set_item == "masterauth") { g_pika_conf->SetMasterAuth(value); - ret = "+OK\r\n"; + res_.AppendStringRaw("+OK\r\n"); } else if (set_item == "userpass") { g_pika_conf->SetUserPass(value); - ret = "+OK\r\n"; + res_.AppendStringRaw("+OK\r\n"); } else if (set_item == "userblacklist") { g_pika_conf->SetUserBlackList(value); - ret = "+OK\r\n"; + res_.AppendStringRaw("+OK\r\n"); } else if (set_item == "dump-prefix") { g_pika_conf->SetBgsavePrefix(value); - ret = "+OK\r\n"; + res_.AppendStringRaw("+OK\r\n"); } else if (set_item == "maxclients") { - if (!slash::string2l(value.data(), value.size(), &ival) || ival <= 0) { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'maxclients'\r\n"; + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival <= 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'maxclients'\r\n"); return; } - g_pika_conf->SetMaxConnection(ival); - g_pika_server->SetDispatchQueueLimit(ival); - ret = "+OK\r\n"; + g_pika_conf->SetMaxConnection(static_cast(ival)); + g_pika_server->SetDispatchQueueLimit(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); } else if (set_item == "dump-expire") { - if (!slash::string2l(value.data(), value.size(), &ival)) { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'dump-expire'\r\n"; + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'dump-expire'\r\n"); return; } - g_pika_conf->SetExpireDumpDays(ival); - ret = "+OK\r\n"; + g_pika_conf->SetExpireDumpDays(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); } else if (set_item == "slave-priority") { - if (!slash::string2l(value.data(), value.size(), &ival)) { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'slave-priority'\r\n"; + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'slave-priority'\r\n"); return; } - g_pika_conf->SetSlavePriority(ival); - ret = "+OK\r\n"; + g_pika_conf->SetSlavePriority(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); } else if (set_item == "expire-logs-days") { - if (!slash::string2l(value.data(), value.size(), &ival) || ival <= 0) { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'expire-logs-days'\r\n"; + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival <= 0) { + res_.AppendStringRaw( "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'expire-logs-days'\r\n"); return; } - g_pika_conf->SetExpireLogsDays(ival); - ret = "+OK\r\n"; + g_pika_conf->SetExpireLogsDays(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); } else if (set_item == "expire-logs-nums") { - if (!slash::string2l(value.data(), value.size(), &ival) || ival <= 0) { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'expire-logs-nums'\r\n"; + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival <= 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'expire-logs-nums'\r\n"); return; } - g_pika_conf->SetExpireLogsNums(ival); - ret = "+OK\r\n"; + g_pika_conf->SetExpireLogsNums(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); } else if (set_item == "root-connection-num") { - if (!slash::string2l(value.data(), value.size(), &ival) || ival <= 0) { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'root-connection-num'\r\n"; + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival <= 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'root-connection-num'\r\n"); return; } - g_pika_conf->SetRootConnectionNum(ival); - ret = "+OK\r\n"; + g_pika_conf->SetRootConnectionNum(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "slotmigrate-thread-num") { + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival <= 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'slotmigrate-thread-num'\r\n"); + return; + } + long int migrate_thread_num = (1 > ival || 24 < ival) ? 8 : ival; + g_pika_conf->SetSlotMigrateThreadNum(migrate_thread_num); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "thread-migrate-keys-num") { + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival <= 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'thread-migrate-keys-num'\r\n"); + return; + } + long int thread_migrate_keys_num = (8 > ival || 128 < ival) ? 64 : ival; + g_pika_conf->SetThreadMigrateKeysNum(thread_migrate_keys_num); + res_.AppendStringRaw("+OK\r\n"); } else if (set_item == "slowlog-write-errorlog") { bool is_write_errorlog; if (value == "yes") { @@ -1621,80 +2427,164 @@ void ConfigCmd::ConfigSet(std::string& ret) { } else if (value == "no") { is_write_errorlog = false; } else { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'slowlog-write-errorlog'\r\n"; + res_.AppendStringRaw( "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'slowlog-write-errorlog'\r\n"); return; } g_pika_conf->SetSlowlogWriteErrorlog(is_write_errorlog); - ret = "+OK\r\n"; + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "slotmigrate") { + bool slotmigrate; + if (value == "yes") { + slotmigrate = true; + } else if (value == "no") { + slotmigrate = false; + } else { + res_.AppendStringRaw( "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'slotmigrate'\r\n"); + return; + } + g_pika_conf->SetSlotMigrate(slotmigrate); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "slow_cmd_pool") { + bool SlowCmdPool; + if (value == "yes") { + SlowCmdPool = true; + } else if (value == "no") { + SlowCmdPool = false; + } else { + res_.AppendStringRaw( "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'slow-cmd-pool'\r\n"); + return; + } + g_pika_conf->SetSlowCmdPool(SlowCmdPool); + g_pika_server->SetSlowCmdThreadPoolFlag(SlowCmdPool); + res_.AppendStringRaw("+OK\r\n"); } else if (set_item == "slowlog-log-slower-than") { - if (!slash::string2l(value.data(), value.size(), &ival) || ival < 0) { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'slowlog-log-slower-than'\r\n"; + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival < 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'slowlog-log-slower-than'\r\n"); return; } - g_pika_conf->SetSlowlogSlowerThan(ival); - ret = "+OK\r\n"; + g_pika_conf->SetSlowlogSlowerThan(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); } else if (set_item == "slowlog-max-len") { - if (!slash::string2l(value.data(), value.size(), &ival) || ival < 0) { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'slowlog-max-len'\r\n"; + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival < 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'slowlog-max-len'\r\n"); return; } - g_pika_conf->SetSlowlogMaxLen(ival); + g_pika_conf->SetSlowlogMaxLen(static_cast(ival)); g_pika_server->SlowlogTrim(); - ret = "+OK\r\n"; + res_.AppendStringRaw("+OK\r\n"); } else if (set_item == "max-cache-statistic-keys") { - if (!slash::string2l(value.data(), value.size(), &ival) || ival < 0) { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-cache-statistic-keys'\r\n"; + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival < 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-cache-statistic-keys'\r\n"); return; } - g_pika_conf->SetMaxCacheStatisticKeys(ival); - g_pika_server->PartitionSetMaxCacheStatisticKeys(ival); - ret = "+OK\r\n"; + g_pika_conf->SetMaxCacheStatisticKeys(static_cast(ival)); + g_pika_server->DBSetMaxCacheStatisticKeys(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); } else if (set_item == "small-compaction-threshold") { - if (!slash::string2l(value.data(), value.size(), &ival) || ival < 0) { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'small-compaction-threshold'\r\n"; + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival < 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'small-compaction-threshold'\r\n"); return; } - g_pika_conf->SetSmallCompactionThreshold(ival); - g_pika_server->PartitionSetSmallCompactionThreshold(ival); - ret = "+OK\r\n"; - } else if (set_item == "max-client-response-size") { - if (!slash::string2l(value.data(), value.size(), &ival) || ival < 0) { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-client-response-size'\r\n"; + g_pika_conf->SetSmallCompactionThreshold(static_cast(ival)); + g_pika_server->DBSetSmallCompactionThreshold(static_cast(ival)); + res_.AppendStringRaw( "+OK\r\n"); + } else if (set_item == "small-compaction-duration-threshold") { + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival < 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'small-compaction-duration-threshold'\r\n"); return; } - g_pika_conf->SetMaxClientResponseSize(ival); - ret = "+OK\r\n"; - } else if (set_item == "write-binlog") { + g_pika_conf->SetSmallCompactionDurationThreshold(static_cast(ival)); + g_pika_server->DBSetSmallCompactionDurationThreshold(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "disable_auto_compactions") { + if (value != "true" && value != "false") { + res_.AppendStringRaw("-ERR invalid disable_auto_compactions (true or false)\r\n"); + return; + } + std::unordered_map options_map{{"disable_auto_compactions", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set storage::OptionType::kColumnFamily disable_auto_compactions wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetDisableAutoCompaction(value); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "rate-limiter-bandwidth") { + int64_t new_bandwidth = 0; + if (pstd::string2int(value.data(), value.size(), &new_bandwidth) == 0 || new_bandwidth <= 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'rate-limiter-bandwidth'\r\n"); + return; + } + g_pika_server->storage_options().options.rate_limiter->SetBytesPerSecond(new_bandwidth); + g_pika_conf->SetRateLmiterBandwidth(new_bandwidth); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "delayed-write-rate") { + int64_t new_delayed_write_rate = 0; + if (pstd::string2int(value.data(), value.size(), &new_delayed_write_rate) == 0 || new_delayed_write_rate <= 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'delayed-write-rate'\r\n"); + return; + } + std::unordered_map options_map{{"delayed_write_rate", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kDB, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set delayed-write-rate wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetDelayedWriteRate(new_delayed_write_rate); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "max-compaction-bytes") { + int64_t new_max_compaction_bytes = 0; + if (pstd::string2int(value.data(), value.size(), &new_max_compaction_bytes) == 0 || new_max_compaction_bytes <= 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-compaction-bytes'\r\n"); + return; + } + std::unordered_map options_map{{"max_compaction_bytes", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set max-compaction-bytes wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetMaxCompactionBytes(new_max_compaction_bytes); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "max-client-response-size") { + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival < 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-client-response-size'\r\n"); + return; + } + g_pika_conf->SetMaxClientResponseSize(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "write-binlog") { int role = g_pika_server->role(); if (role == PIKA_ROLE_SLAVE) { - ret = "-ERR need to close master-slave mode first\r\n"; + res_.AppendStringRaw("-ERR need to close master-slave mode first\r\n"); return; } else if (value != "yes" && value != "no") { - ret = "-ERR invalid write-binlog (yes or no)\r\n"; + res_.AppendStringRaw("-ERR invalid write-binlog (yes or no)\r\n"); return; } else { g_pika_conf->SetWriteBinlog(value); - ret = "+OK\r\n"; + res_.AppendStringRaw("+OK\r\n"); } } else if (set_item == "db-sync-speed") { - if (!slash::string2l(value.data(), value.size(), &ival)) { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'db-sync-speed(MB)'\r\n"; + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'db-sync-speed(MB)'\r\n"); return; } if (ival < 0 || ival > 1024) { ival = 1024; } - g_pika_conf->SetDbSyncSpeed(ival); - ret = "+OK\r\n"; + g_pika_conf->SetDbSyncSpeed(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); } else if (set_item == "compact-cron") { bool invalid = false; - if (value != "") { + if (!value.empty()) { bool have_week = false; - std::string compact_cron, week_str; - int slash_num = count(value.begin(), value.end(), '/'); + std::string compact_cron; + std::string week_str; + int64_t slash_num = count(value.begin(), value.end(), '/'); if (slash_num == 2) { have_week = true; - std::string::size_type first_slash = value.find("/"); + std::string::size_type first_slash = value.find('/'); week_str = value.substr(0, first_slash); compact_cron = value.substr(first_slash + 1); } else { @@ -1702,73 +2592,409 @@ void ConfigCmd::ConfigSet(std::string& ret) { } std::string::size_type len = compact_cron.length(); - std::string::size_type colon = compact_cron.find("-"); - std::string::size_type underline = compact_cron.find("/"); - if (colon == std::string::npos || underline == std::string::npos || - colon >= underline || colon + 1 >= len || + std::string::size_type colon = compact_cron.find('-'); + std::string::size_type underline = compact_cron.find('/'); + if (colon == std::string::npos || underline == std::string::npos || colon >= underline || colon + 1 >= len || colon + 1 == underline || underline + 1 >= len) { - invalid = true; + invalid = true; } else { int week = std::atoi(week_str.c_str()); int start = std::atoi(compact_cron.substr(0, colon).c_str()); int end = std::atoi(compact_cron.substr(colon + 1, underline).c_str()); int usage = std::atoi(compact_cron.substr(underline + 1).c_str()); - if ((have_week && (week < 1 || week > 7)) || start < 0 || start > 23 || end < 0 || end > 23 || usage < 0 || usage > 100) { + if ((have_week && (week < 1 || week > 7)) || start < 0 || start > 23 || end < 0 || end > 23 || usage < 0 || + usage > 100) { invalid = true; } } } if (invalid) { - ret = "-ERR invalid compact-cron\r\n"; + res_.AppendStringRaw("-ERR invalid compact-cron\r\n"); return; } else { g_pika_conf->SetCompactCron(value); - ret = "+OK\r\n"; + res_.AppendStringRaw("+OK\r\n"); } } else if (set_item == "compact-interval") { bool invalid = false; - if (value != "") { + if (!value.empty()) { std::string::size_type len = value.length(); - std::string::size_type slash = value.find("/"); + std::string::size_type slash = value.find('/'); if (slash == std::string::npos || slash + 1 >= len) { invalid = true; } else { int interval = std::atoi(value.substr(0, slash).c_str()); - int usage = std::atoi(value.substr(slash+1).c_str()); + int usage = std::atoi(value.substr(slash + 1).c_str()); if (interval <= 0 || usage < 0 || usage > 100) { invalid = true; } } } if (invalid) { - ret = "-ERR invalid compact-interval\r\n"; + res_.AppendStringRaw("-ERR invalid compact-interval\r\n"); return; } else { g_pika_conf->SetCompactInterval(value); - ret = "+OK\r\n"; + res_.AppendStringRaw("+OK\r\n"); } } else if (set_item == "sync-window-size") { - if (!slash::string2l(value.data(), value.size(), &ival)) { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'sync-window-size'\r\n"; + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'sync-window-size'\r\n"); return; } if (ival <= 0 || ival > kBinlogReadWinMaxSize) { - ret = "-ERR Argument exceed range \'" + value + "\' for CONFIG SET 'sync-window-size'\r\n"; + res_.AppendStringRaw("-ERR Argument exceed range \'" + value + "\' for CONFIG SET 'sync-window-size'\r\n"); + return; + } + g_pika_conf->SetSyncWindowSize(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "slow-cmd-list") { + g_pika_conf->SetSlowCmd(value); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "max-cache-files") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-cache-files'\r\n"); + return; + } + std::unordered_map options_map{{"max_open_files", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kDB, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set max-cache-files wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetMaxCacheFiles(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "max-background-compactions") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0 || ival <= 0) { + res_.AppendStringRaw( "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-background-compactions'\r\n"); + return; + } + std::unordered_map options_map{{"max_background_compactions", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kDB, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set max-background-compactions wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetMaxBackgroudCompactions(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "rocksdb-periodic-second") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'rocksdb-periodic-second'\r\n"); + return; + } + std::unordered_map options_map{{"periodic_compaction_seconds", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set rocksdb-periodic-second wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetRocksdbPeriodicSecond(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "rocksdb-ttl-second") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'rocksdb-ttl-second'\r\n"); + return; + } + std::unordered_map options_map{{"ttl", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set rocksdb-ttl-second wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetRocksdbTTLSecond(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "max-background-jobs") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-background-jobs'\r\n"); + return; + } + std::unordered_map options_map{{"max_background_jobs", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kDB, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set max-background-jobs wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetMaxBackgroudJobs(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "write-buffer-size") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'write-buffer-size'\r\n"); return; } - g_pika_conf->SetSyncWindowSize(ival); + std::unordered_map options_map{{"write_buffer_size", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set write-buffer-size wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetWriteBufferSize(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "max-write-buffer-num") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-write-buffer-number'\r\n"); + return; + } + std::unordered_map options_map{{"max_write_buffer_number", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set max-write-buffer-number wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetMaxWriteBufferNumber(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "min-write-buffer-number-to-merge") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'min-write-buffer-number-to-merge'\r\n"); + return; + } + std::unordered_map options_map{{"min_write_buffer_number_to_merge", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set min-write-buffer-number-to-merge wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetMinWriteBufferNumberToMerge(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "level0-stop-writes-trigger") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'level0-stop-writes-trigger'\r\n"); + return; + } + std::unordered_map options_map{{"level0_stop_writes_trigger", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set level0-stop-writes-trigger wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetLevel0StopWritesTrigger(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "level0-slowdown-writes-trigger") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'level0-slowdown-writes-trigger'\r\n"); + return; + } + std::unordered_map options_map{{"level0_slowdown_writes_trigger", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set level0-slowdown-writes-trigger wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetLevel0SlowdownWritesTrigger(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + + } else if (set_item == "max-total-wal-size") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-total-wal-size'\r\n"); + return; + } + std::unordered_map options_map{{"max_total_wal_size", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kDB, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set max-total-wal-size: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetMaxTotalWalSize(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "level0-file-num-compaction-trigger") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'level0-file-num-compaction-trigger'\r\n"); + return; + } + std::unordered_map options_map{{"level0_file_num_compaction_trigger", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set level0-file-num-compaction-trigger wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetLevel0FileNumCompactionTrigger(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "arena-block-size") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'arena-block-size'\r\n"); + return; + } + std::unordered_map options_map{{"arena_block_size", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + res_.AppendStringRaw( "-ERR Set arena-block-size wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetArenaBlockSize(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "throttle-bytes-per-second") { + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival <= 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'throttle-bytes-per-second'\r\n"); + return; + } + int32_t new_throughput_limit = static_cast(ival); + g_pika_conf->SetThrottleBytesPerSecond(new_throughput_limit); + //The rate limiter of rsync(Throttle) is used in singleton mode, all db shares the same rate limiter + rsync::Throttle::GetInstance().ResetThrottleThroughputBytes(new_throughput_limit); + LOG(INFO) << "The conf item [throttle-bytes-per-second] is changed by Config Set command. " + "The rsync rate limit now is " + << new_throughput_limit << "(Which Is Around " << (new_throughput_limit >> 20) << " MB/s)"; + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "rsync-timeout-ms") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0 || ival <= 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'rsync-timeout-ms'\r\n"); + return; + } + g_pika_conf->SetRsyncTimeoutMs(ival); + LOG(INFO) << "The conf item [rsync-timeout-ms] is changed by Config Set command. " + "The rsync-timeout-ms now is " << ival << " ms"; + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "max-rsync-parallel-num") { + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival > kMaxRsyncParallelNum || ival <= 0) { + res_.AppendStringRaw( "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-rsync-parallel-num'\r\n"); + return; + } + g_pika_conf->SetMaxRsyncParallelNum(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "cache-num") { + if (!pstd::string2int(value.data(), value.size(), &ival) || ival < 0) { + res_.AppendStringRaw("-ERR Invalid argument " + value + " for CONFIG SET 'cache-num'\r\n"); + return; + } + + int cache_num = (ival <= 0 || ival > 48) ? 16 : ival; + if (cache_num != g_pika_conf->GetCacheNum()) { + g_pika_conf->SetCacheNum(cache_num); + g_pika_server->ResetCacheAsync(cache_num, db); + } + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "cache-model") { + if (!pstd::string2int(value.data(), value.size(), &ival) || ival < 0) { + res_.AppendStringRaw( "-ERR Invalid argument " + value + " for CONFIG SET 'cache-model'\r\n"); + return; + } + if (PIKA_CACHE_NONE > ival || PIKA_CACHE_READ < ival) { + res_.AppendStringRaw("-ERR Invalid cache model\r\n"); + } else { + g_pika_conf->SetCacheMode(ival); + if (PIKA_CACHE_NONE == ival) { + g_pika_server->ClearCacheDbAsync(db); + } + res_.AppendStringRaw("+OK\r\n"); + } + } else if (set_item == "cache-type") { + pstd::StringToLower(value); + std::set available_types = {"string", "set", "zset", "list", "hash", "bit"}; + std::string type_str = value; + std::vector types; + type_str.erase(remove_if(type_str.begin(), type_str.end(), ::isspace), type_str.end()); + pstd::StringSplit(type_str, COMMA, types); + for (auto& type : types) { + if (available_types.find(type) == available_types.end()) { + res_.AppendStringRaw("-ERR Invalid cache type: " + type + "\r\n"); + return; + } + } + g_pika_conf->SetCacheType(value); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "zset-cache-start-direction") { + if (!pstd::string2int(value.data(), value.size(), &ival)) { + res_.AppendStringRaw("-ERR Invalid argument " + value + " for CONFIG SET 'zset-cache-start-direction'\r\n"); + return; + } + if (ival != CACHE_START_FROM_BEGIN && ival != CACHE_START_FROM_END) { + res_.AppendStringRaw("-ERR Invalid zset-cache-start-direction\r\n"); + return; + } + auto origin_start_pos = g_pika_conf->zset_cache_start_direction(); + if (origin_start_pos != ival) { + g_pika_conf->SetCacheStartDirection(ival); + g_pika_server->OnCacheStartPosChanged(ival, db); + } + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "zset-cache-field-num-per-key") { + if (!pstd::string2int(value.data(), value.size(), &ival) || ival < 0) { + res_.AppendStringRaw("-ERR Invalid argument " + value + " for CONFIG SET 'zset-cache-field-num-per-key'\r\n"); + return; + } + g_pika_conf->SetCacheItemsPerKey(ival); + g_pika_server->ResetCacheConfig(db); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "cache-maxmemory") { + if (!pstd::string2int(value.data(), value.size(), &ival) || ival < 0) { + res_.AppendStringRaw("-ERR Invalid argument " + value + " for CONFIG SET 'cache-maxmemory'\r\n"); + return; + } + int64_t cache_maxmemory = (PIKA_CACHE_SIZE_MIN > ival) ? PIKA_CACHE_SIZE_DEFAULT : ival; + g_pika_conf->SetCacheMaxmemory(cache_maxmemory); + g_pika_server->ResetCacheConfig(db); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "cache-maxmemory-policy") { + if (!pstd::string2int(value.data(), value.size(), &ival) || ival < 0) { + res_.AppendStringRaw("-ERR Invalid argument " + value + " for CONFIG SET 'cache-maxmemory-policy'\r\n"); + return; + } + int cache_maxmemory_policy_ = (ival < 0|| ival > 5) ? 3 : ival; // default allkeys-lru + g_pika_conf->SetCacheMaxmemoryPolicy(cache_maxmemory_policy_); + g_pika_server->ResetCacheConfig(db); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "cache-maxmemory-samples") { + if (!pstd::string2int(value.data(), value.size(), &ival) || ival < 0) { + res_.AppendStringRaw("-ERR Invalid argument " + value + " for CONFIG SET 'cache-maxmemory-samples'\r\n"); + return; + } + int cache_maxmemory_samples = (ival > 1) ? 5 : ival; + g_pika_conf->SetCacheMaxmemorySamples(cache_maxmemory_samples); + g_pika_server->ResetCacheConfig(db); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "cache-lfu-decay-time") { + if (!pstd::string2int(value.data(), value.size(), &ival) || ival < 0) { + res_.AppendStringRaw("-ERR Invalid argument " + value + " for CONFIG SET 'cache-lfu-decay-time'\r\n"); + return; + } + int cache_lfu_decay_time = (ival < 0) ? 1 : ival; + g_pika_conf->SetCacheLFUDecayTime(cache_lfu_decay_time); + g_pika_server->ResetCacheConfig(db); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "acl-pubsub-default") { + std::string v(value); + pstd::StringToLower(v); + if (v != "allchannels" && v != "resetchannels") { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'acl-pubsub-default'\r\n"); + return; + } + g_pika_conf->SetAclPubsubDefault(v); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "acllog-max-len") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0 || ival < 0) { + res_.AppendStringRaw( "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'acllog-max-len'\r\n"); + return; + } + g_pika_conf->SetAclLogMaxLen(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "max-conn-rbuf-size") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0 || ival < PIKA_MAX_CONN_RBUF_LB || ival > PIKA_MAX_CONN_RBUF_HB * 2) { + res_.AppendStringRaw( "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-conn-rbuf-size'\r\n"); + return; + } + g_pika_conf->SetMaxConnRbufSize(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else { + res_.AppendStringRaw("-ERR Unsupported CONFIG parameter: " + set_item + "\r\n"); + } +} + +void ConfigCmd::ConfigRewrite(std::string& ret) { + if (g_pika_conf->ConfigRewrite() != 0) { ret = "+OK\r\n"; } else { - ret = "-ERR Unsupported CONFIG parameter: " + set_item + "\r\n"; + ret = "-ERR Rewire CONFIG fail\r\n"; } } -void ConfigCmd::ConfigRewrite(std::string &ret) { - g_pika_conf->ConfigRewrite(); - ret = "+OK\r\n"; +void ConfigCmd::ConfigRewriteReplicationID(std::string& ret) { + if (g_pika_conf->ConfigRewriteReplicationID() != 0) { + ret = "+OK\r\n"; + } else { + ret = "-ERR Rewire ReplicationID CONFIG fail\r\n"; + } } -void ConfigCmd::ConfigResetstat(std::string &ret) { +void ConfigCmd::ConfigResetstat(std::string& ret) { g_pika_server->ResetStat(); ret = "+OK\r\n"; } @@ -1780,19 +3006,16 @@ void MonitorCmd::DoInitial() { } } -void MonitorCmd::Do(std::shared_ptr partition) { - std::shared_ptr conn_repl = GetConn(); +void MonitorCmd::Do() { + std::shared_ptr conn_repl = GetConn(); if (!conn_repl) { res_.SetRes(CmdRes::kErrOther, kCmdNameMonitor); - LOG(WARNING) << name_ << " weak ptr is empty"; + LOG(WARNING) << name_ << " weak ptr is empty"; return; } - std::shared_ptr conn = - std::dynamic_pointer_cast(conn_repl)->server_thread()->MoveConnOut(conn_repl->fd()); - assert(conn.get() == conn_repl.get()); - g_pika_server->AddMonitorClient(std::dynamic_pointer_cast(conn)); - g_pika_server->AddMonitorMessage("OK"); - return; // Monitor thread will return "OK" + + g_pika_server->AddMonitorClient(std::dynamic_pointer_cast(conn_repl)); + res_.SetRes(CmdRes::kOk); } void DbsizeCmd::DoInitial() { @@ -1802,23 +3025,36 @@ void DbsizeCmd::DoInitial() { } } -void DbsizeCmd::Do(std::shared_ptr partition) { - std::shared_ptr
table = g_pika_server->GetTable(table_name_); - if (!table) { - res_.SetRes(CmdRes::kInvalidTable); +void DbsizeCmd::Do() { + std::shared_ptr dbs = g_pika_server->GetDB(db_name_); + if (!dbs) { + res_.SetRes(CmdRes::kInvalidDB); } else { - KeyScanInfo key_scan_info = table->GetKeyScanInfo(); - std::vector key_infos = key_scan_info.key_infos; - if (key_infos.size() != 5) { - res_.SetRes(CmdRes::kErrOther, "keyspace error"); + if (g_pika_conf->slotmigrate()) { + int64_t dbsize = 0; + for (int i = 0; i < g_pika_conf->default_slot_num(); ++i) { + int32_t card = 0; + rocksdb::Status s = dbs->storage()->SCard(SlotKeyPrefix+std::to_string(i), &card); + if (s.ok() && card >= 0) { + dbsize += card; + } else { + res_.SetRes(CmdRes::kErrOther, "Get dbsize error"); + return; + } + } + res_.AppendInteger(dbsize); + } + KeyScanInfo key_scan_info = dbs->GetKeyScanInfo(); + std::vector key_infos = key_scan_info.key_infos; + if (key_infos.size() != (size_t)(storage::DataTypeNum)) { + res_.SetRes(CmdRes::kErrOther, "Mismatch in expected data types and actual key info count"); return; } - int64_t dbsize = key_infos[0].keys - + key_infos[1].keys - + key_infos[2].keys - + key_infos[3].keys - + key_infos[4].keys; - res_.AppendInteger(dbsize); + uint64_t dbsize = 0; + for (auto info : key_infos) { + dbsize += info.keys; + } + res_.AppendInteger(static_cast(dbsize)); } } @@ -1829,16 +3065,16 @@ void TimeCmd::DoInitial() { } } -void TimeCmd::Do(std::shared_ptr partition) { +void TimeCmd::Do() { struct timeval tv; - if (gettimeofday(&tv, NULL) == 0) { + if (gettimeofday(&tv, nullptr) == 0) { res_.AppendArrayLen(2); char buf[32]; - int32_t len = slash::ll2string(buf, sizeof(buf), tv.tv_sec); + int32_t len = pstd::ll2string(buf, sizeof(buf), tv.tv_sec); res_.AppendStringLen(len); res_.AppendContent(buf); - len = slash::ll2string(buf, sizeof(buf), tv.tv_usec); + len = pstd::ll2string(buf, sizeof(buf), tv.tv_usec); res_.AppendStringLen(len); res_.AppendContent(buf); } else { @@ -1846,6 +3082,17 @@ void TimeCmd::Do(std::shared_ptr partition) { } } +void LastsaveCmd::DoInitial() { + if (argv_.size() != 1) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameLastSave); + return; + } +} + +void LastsaveCmd::Do() { + res_.AppendInteger(g_pika_server->GetLastSave()); +} + void DelbackupCmd::DoInitial() { if (argv_.size() != 1) { res_.SetRes(CmdRes::kWrongNum, kCmdNameDelbackup); @@ -1853,46 +3100,45 @@ void DelbackupCmd::DoInitial() { } } -void DelbackupCmd::Do(std::shared_ptr partition) { +void DelbackupCmd::Do() { std::string db_sync_prefix = g_pika_conf->bgsave_prefix(); std::string db_sync_path = g_pika_conf->bgsave_path(); std::vector dump_dir; // Dump file is not exist - if (!slash::FileExists(db_sync_path)) { + if (!pstd::FileExists(db_sync_path)) { res_.SetRes(CmdRes::kOk); return; } // Directory traversal - if (slash::GetChildren(db_sync_path, dump_dir) != 0) { + if (pstd::GetChildren(db_sync_path, dump_dir) != 0) { res_.SetRes(CmdRes::kOk); return; } - int len = dump_dir.size(); - for (size_t i = 0; i < dump_dir.size(); i++) { - if (dump_dir[i].substr(0, db_sync_prefix.size()) != db_sync_prefix || dump_dir[i].size() != (db_sync_prefix.size() + 8)) { + int len = static_cast(dump_dir.size()); + for (auto& i : dump_dir) { + if (i.substr(0, db_sync_prefix.size()) != db_sync_prefix || i.size() != (db_sync_prefix.size() + 8)) { continue; } - std::string str_date = dump_dir[i].substr(db_sync_prefix.size(), (dump_dir[i].size() - db_sync_prefix.size())); - char *end = NULL; + std::string str_date = i.substr(db_sync_prefix.size(), (i.size() - db_sync_prefix.size())); + char* end = nullptr; std::strtol(str_date.c_str(), &end, 10); if (*end != 0) { continue; } - std::string dump_dir_name = db_sync_path + dump_dir[i] + "/" + table_name_; + std::string dump_dir_name = db_sync_path + i + "/" + db_name_; if (g_pika_server->CountSyncSlaves() == 0) { LOG(INFO) << "Not syncing, delete dump file: " << dump_dir_name; - slash::DeleteDirIfExist(dump_dir_name); + pstd::DeleteDirIfExist(dump_dir_name); len--; } else { LOG(INFO) << "Syncing, can not delete " << dump_dir_name << " dump file" << std::endl; } } res_.SetRes(CmdRes::kOk); - return; } void EchoCmd::DoInitial() { @@ -1901,13 +3147,9 @@ void EchoCmd::DoInitial() { return; } body_ = argv_[1]; - return; } -void EchoCmd::Do(std::shared_ptr partition) { - res_.AppendString(body_); - return; -} +void EchoCmd::Do() { res_.AppendString(body_); } void ScandbCmd::DoInitial() { if (!CheckArg(argv_.size())) { @@ -1915,34 +3157,32 @@ void ScandbCmd::DoInitial() { return; } if (argv_.size() == 1) { - type_ = blackwidow::kAll; + type_ = storage::DataType::kAll; } else { - if (!strcasecmp(argv_[1].data(),"string")) { - type_ = blackwidow::kStrings; - } else if (!strcasecmp(argv_[1].data(), "hash")) { - type_ = blackwidow::kHashes; - } else if (!strcasecmp(argv_[1].data(), "set")) { - type_ = blackwidow::kSets; - } else if (!strcasecmp(argv_[1].data(), "zset")) { - type_ = blackwidow::kZSets; - } else if (!strcasecmp(argv_[1].data(), "list")) { - type_ = blackwidow::kLists; + if (strcasecmp(argv_[1].data(), "string") == 0) { + type_ = storage::DataType::kStrings; + } else if (strcasecmp(argv_[1].data(), "hash") == 0) { + type_ = storage::DataType::kHashes; + } else if (strcasecmp(argv_[1].data(), "set") == 0) { + type_ = storage::DataType::kSets; + } else if (strcasecmp(argv_[1].data(), "zset") == 0) { + type_ = storage::DataType::kZSets; + } else if (strcasecmp(argv_[1].data(), "list") == 0) { + type_ = storage::DataType::kLists; } else { res_.SetRes(CmdRes::kInvalidDbType); } } - return; } -void ScandbCmd::Do(std::shared_ptr partition) { - std::shared_ptr
table = g_pika_server->GetTable(table_name_); - if (!table) { - res_.SetRes(CmdRes::kInvalidTable); +void ScandbCmd::Do() { + std::shared_ptr dbs = g_pika_server->GetDB(db_name_); + if (!dbs) { + res_.SetRes(CmdRes::kInvalidDB); } else { - table->ScanDatabase(type_); + dbs->ScanDatabase(type_); res_.SetRes(CmdRes::kOk); } - return; } void SlowlogCmd::DoInitial() { @@ -1950,13 +3190,13 @@ void SlowlogCmd::DoInitial() { res_.SetRes(CmdRes::kWrongNum, kCmdNameSlowlog); return; } - if (argv_.size() == 2 && !strcasecmp(argv_[1].data(), "reset")) { + if (argv_.size() == 2 && (strcasecmp(argv_[1].data(), "reset") == 0)) { condition_ = SlowlogCmd::kRESET; - } else if (argv_.size() == 2 && !strcasecmp(argv_[1].data(), "len")) { + } else if (argv_.size() == 2 && (strcasecmp(argv_[1].data(), "len") == 0)) { condition_ = SlowlogCmd::kLEN; - } else if ((argv_.size() == 2 || argv_.size() == 3) && !strcasecmp(argv_[1].data(), "get")) { + } else if ((argv_.size() == 2 || argv_.size() == 3) && (strcasecmp(argv_[1].data(), "get") == 0)) { condition_ = SlowlogCmd::kGET; - if (argv_.size() == 3 && !slash::string2l(argv_[2].data(), argv_[2].size(), &number_)) { + if (argv_.size() == 3 && (pstd::string2int(argv_[2].data(), argv_[2].size(), &number_) == 0)) { res_.SetRes(CmdRes::kInvalidInt); return; } @@ -1966,28 +3206,27 @@ void SlowlogCmd::DoInitial() { } } -void SlowlogCmd::Do(std::shared_ptr partition) { +void SlowlogCmd::Do() { if (condition_ == SlowlogCmd::kRESET) { g_pika_server->SlowlogReset(); res_.SetRes(CmdRes::kOk); - } else if (condition_ == SlowlogCmd::kLEN) { + } else if (condition_ == SlowlogCmd::kLEN) { res_.AppendInteger(g_pika_server->SlowlogLen()); } else { std::vector slowlogs; g_pika_server->SlowlogObtain(number_, &slowlogs); - res_.AppendArrayLen(slowlogs.size()); + res_.AppendArrayLenUint64(slowlogs.size()); for (const auto& slowlog : slowlogs) { res_.AppendArrayLen(4); res_.AppendInteger(slowlog.id); res_.AppendInteger(slowlog.start_time); res_.AppendInteger(slowlog.duration); - res_.AppendArrayLen(slowlog.argv.size()); + res_.AppendArrayLenUint64(slowlog.argv.size()); for (const auto& arg : slowlog.argv) { res_.AppendString(arg); } } } - return; } void PaddingCmd::DoInitial() { @@ -1997,110 +3236,531 @@ void PaddingCmd::DoInitial() { } } -void PaddingCmd::Do(std::shared_ptr partition) { - res_.SetRes(CmdRes::kOk); -} +void PaddingCmd::Do() { res_.SetRes(CmdRes::kOk); } -std::string PaddingCmd::ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) { +std::string PaddingCmd::ToRedisProtocol() { return PikaBinlogTransverter::ConstructPaddingBinlog( - BinlogType::TypeFirst, argv_[1].size() + BINLOG_ITEM_HEADER_SIZE - + PADDING_BINLOG_PROTOCOL_SIZE + SPACE_STROE_PARAMETER_LENGTH); + BinlogType::TypeFirst, + argv_[1].size() + BINLOG_ITEM_HEADER_SIZE + PADDING_BINLOG_PROTOCOL_SIZE + SPACE_STROE_PARAMETER_LENGTH); } -#ifdef TCMALLOC_EXTENSION -void TcmallocCmd::DoInitial() { - if (argv_.size() != 2 && argv_.size() != 3) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameTcmalloc); +void PKPatternMatchDelCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePKPatternMatchDel); return; } - rate_ = 0; - std::string type = argv_[1]; - if (!strcasecmp(type.data(), "stats")) { - type_ = 0; - } else if (!strcasecmp(type.data(), "rate")) { - type_ = 1; - if (argv_.size() == 3) { - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &rate_)) { - res_.SetRes(CmdRes::kSyntaxErr, kCmdNameTcmalloc); - } + pattern_ = argv_[1]; + max_count_ = storage::BATCH_DELETE_LIMIT; + if (argv_.size() > 2) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &max_count_) == 0 || max_count_ < 1 || max_count_ > storage::BATCH_DELETE_LIMIT) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + } +} + +void PKPatternMatchDelCmd::Do() { + int64_t count = 0; + rocksdb::Status s = db_->storage()->PKPatternMatchDelWithRemoveKeys(pattern_, &count, &remove_keys_, max_count_); + + if(s.ok()) { + res_.AppendInteger(count); + s_ = rocksdb::Status::OK(); + for (const auto& key : remove_keys_) { + RemSlotKey(key, db_); } - } else if (!strcasecmp(type.data(), "list")) { - type_ = 2; - } else if (!strcasecmp(type.data(), "free")) { - type_ = 3; } else { - res_.SetRes(CmdRes::kInvalidParameter, kCmdNameTcmalloc); + res_.SetRes(CmdRes::kErrOther, s.ToString()); + if (count >= 0) { + s_ = rocksdb::Status::OK(); + for (const auto& key : remove_keys_) { + RemSlotKey(key, db_); + } + } + } +} + +void PKPatternMatchDelCmd::DoThroughDB() { + Do(); +} + +void PKPatternMatchDelCmd::DoUpdateCache() { + if(s_.ok()) { + db_->cache()->Del(remove_keys_); + } +} + +void PKPatternMatchDelCmd::DoBinlog() { + std::string opt = "del"; + for(auto& key: remove_keys_) { + argv_.clear(); + argv_.emplace_back(opt); + argv_.emplace_back(key); + Cmd::DoBinlog(); + } +} + +void DummyCmd::DoInitial() {} + +void DummyCmd::Do() {} + +void QuitCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameQuit); + } +} + +void QuitCmd::Do() { + res_.SetRes(CmdRes::kOk); + LOG(INFO) << "QutCmd will close connection " << GetConn()->String(); + GetConn()->SetClose(true); +} + +/* + * HELLO [ [AUTH ] [SETNAME ] ] + */ +void HelloCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameHello); return; } } -void TcmallocCmd::Do(std::shared_ptr partition) { - std::vector fli; - std::vector elems; - switch(type_) { - case 0: - char stats[1024]; - MallocExtension::instance()->GetStats(stats, 1024); - slash::StringSplit(stats, '\n', elems); - res_.AppendArrayLen(elems.size()); - for (auto& i : elems) { - res_.AppendString(i); +void HelloCmd::Do() { + size_t next_arg = 1; + long ver = 0; + if (argv_.size() >= 2) { + if (pstd::string2int(argv_[next_arg].data(), argv_[next_arg].size(), &ver) == 0) { + res_.SetRes(CmdRes::kErrOther, "Protocol version is not an integer or out of range"); + return; + } + next_arg++; + + if (ver < 2 || ver > 3) { + res_.AppendContent("-NOPROTO unsupported protocol version"); + return; + } + } + + std::shared_ptr conn = GetConn(); + if (!conn) { + res_.SetRes(CmdRes::kErrOther, kCmdNameHello); + return; + } + + for (; next_arg < argv_.size(); next_arg++) { + size_t more_args = argv_.size() - next_arg - 1; + const std::string opt = argv_[next_arg]; + if ((strcasecmp(opt.data(), "AUTH") == 0) && (more_args >= 2)) { + const std::string userName = argv_[next_arg + 1]; + const std::string pwd = argv_[next_arg + 2]; + bool defaultAuth = false; + if (userName == Acl::DefaultUser) { + defaultAuth = true; } - break; - case 1: - if (rate_) { - MallocExtension::instance()->SetMemoryReleaseRate(rate_); + auto authResult = AuthenticateUser(name(), userName, pwd, conn, defaultAuth); + switch (authResult) { + case AuthResult::INVALID_CONN: + res_.SetRes(CmdRes::kErrOther, kCmdNamePing); + return; + case AuthResult::INVALID_PASSWORD: + res_.AppendContent("-WRONGPASS invalid username-password pair or user is disabled."); + return; + case AuthResult::NO_REQUIRE_PASS: + res_.SetRes(CmdRes::kErrOther, "Client sent AUTH, but no password is set"); + default: + break; } - res_.AppendInteger(MallocExtension::instance()->GetMemoryReleaseRate()); + next_arg += 2; + } else if ((strcasecmp(opt.data(), "SETNAME") == 0) && (more_args != 0U)) { + const std::string name = argv_[next_arg + 1]; + if (pstd::isspace(name)) { + res_.SetRes(CmdRes::kErrOther, "Client names cannot contain spaces, newlines or special characters."); + return; + } + conn->set_name(name); + next_arg++; + } else { + res_.SetRes(CmdRes::kErrOther, "Syntax error in HELLO option " + opt); + return; + } + } + + std::string raw; + std::vector fvs{ + {"server", "redis"}, + }; + // just for redis resp2 protocol + fvs.push_back({"proto", "2"}); + fvs.push_back({"mode", "classic"}); + int host_role = g_pika_server->role(); + switch (host_role) { + case PIKA_ROLE_SINGLE: + case PIKA_ROLE_MASTER: + fvs.push_back({"role", "master"}); + break; + case PIKA_ROLE_SLAVE: + fvs.push_back({"role", "slave"}); break; - case 2: - MallocExtension::instance()->GetFreeListSizes(&fli); - res_.AppendArrayLen(fli.size()); - for (auto& i : fli) { - res_.AppendString("type: " + std::string(i.type) + ", min: " + std::to_string(i.min_object_size) + - ", max: " + std::to_string(i.max_object_size) + ", total: " + std::to_string(i.total_bytes_free)); + case PIKA_ROLE_MASTER | PIKA_ROLE_SLAVE: + fvs.push_back({"role", "master&&slave"}); + break; + default: + LOG(INFO) << "unknown role" << host_role << " client ip:port " << conn->ip_port(); + return; + } + + for (const auto& fv : fvs) { + RedisAppendLenUint64(raw, fv.field.size(), "$"); + RedisAppendContent(raw, fv.field); + if (fv.field == "proto") { + pstd::string2int(fv.value.data(), fv.value.size(), &ver); + RedisAppendLen(raw, static_cast(ver), ":"); + continue; + } + RedisAppendLenUint64(raw, fv.value.size(), "$"); + RedisAppendContent(raw, fv.value); + } + res_.AppendArrayLenUint64(fvs.size() * 2); + res_.AppendStringRaw(raw); +} + +void DiskRecoveryCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameDiskRecovery); + return; + } +} + +void DiskRecoveryCmd::Do() { + struct statvfs disk_info; + int ret = statvfs(g_pika_conf->db_path().c_str(), &disk_info); + if (ret == -1) { + std::stringstream tmp_stream; + tmp_stream << "statvfs error:" << strerror(errno); + const std::string res = tmp_stream.str(); + res_.SetRes(CmdRes::kErrOther, res); + return; + } + int64_t least_free_size = g_pika_conf->least_resume_free_disk_size(); + uint64_t free_size = disk_info.f_bsize * disk_info.f_bfree; + if (free_size < least_free_size) { + res_.SetRes(CmdRes::kErrOther, "The available disk capacity is insufficient"); + return; + } + std::shared_mutex dbs_rw; + std::shared_lock db_rwl(dbs_rw); + // loop every db + for (const auto& db_item : g_pika_server->GetDB()) { + if (!db_item.second) { + continue; + } + db_item.second->SetBinlogIoErrorrelieve(); + background_errors_.clear(); + db_item.second->DBLockShared(); + db_item.second->storage()->GetUsage(storage::PROPERTY_TYPE_ROCKSDB_BACKGROUND_ERRORS, &background_errors_); + db_item.second->DBUnlockShared(); + for (const auto &item: background_errors_) { + if (item.second != 0) { + rocksdb::Status s = db_item.second->storage()->GetDBByIndex(item.first)->Resume(); + if (!s.ok()) { + res_.SetRes(CmdRes::kErrOther, "The restore operation failed."); + } } + } + } + res_.SetRes(CmdRes::kOk, "The disk error has been recovered"); +} + +void ClearReplicationIDCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameClearReplicationID); + return; + } +} + +void ClearReplicationIDCmd::Do() { + g_pika_conf->SetReplicationID(""); + g_pika_conf->SetInternalUsedUnFinishedFullSync(""); + g_pika_conf->ConfigRewriteReplicationID(); + res_.SetRes(CmdRes::kOk, "ReplicationID is cleared"); +} + +void DisableWalCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameDisableWal); + return; + } +} + +void DisableWalCmd::Do() { + std::string option = argv_[1].data(); + bool is_wal_disable = false; + if (option.compare("true") == 0) { + is_wal_disable = true; + } else if (option.compare("false") == 0) { + is_wal_disable = false; + } else { + res_.SetRes(CmdRes::kErrOther, "Invalid parameter"); + return; + } + db_->storage()->DisableWal(is_wal_disable); + res_.SetRes(CmdRes::kOk, "Wal options is changed"); +} + +void CacheCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameCache); + return; + } + if (!strcasecmp(argv_[1].data(), "clear")) { + if (argv_.size() == 3 && !strcasecmp(argv_[2].data(), "db")) { + condition_ = kCLEAR_DB; + } else if (argv_.size() == 3 && !strcasecmp(argv_[2].data(), "hitratio")) { + condition_ = kCLEAR_HITRATIO; + } else { + res_.SetRes(CmdRes::kErrOther, "Unknown cache subcommand or wrong # of args."); + } + } else if (argv_.size() >= 3 && !strcasecmp(argv_[1].data(), "del")) { + condition_ = kDEL_KEYS; + keys_.assign(argv_.begin() + 2, argv_.end()); + } else if (argv_.size() == 2 && !strcasecmp(argv_[1].data(), "randomkey")) { + condition_ = kRANDOM_KEY; + } else { + res_.SetRes(CmdRes::kErrOther, "Unknown cache subcommand or wrong # of args."); + } + return; +} + +void CacheCmd::Do() { + std::string key; + switch (condition_) { + case kCLEAR_DB: + g_pika_server->ClearCacheDbAsync(db_); + res_.SetRes(CmdRes::kOk); + break; + case kCLEAR_HITRATIO: + g_pika_server->ClearHitRatio(db_); + res_.SetRes(CmdRes::kOk); break; - case 3: - MallocExtension::instance()->ReleaseFreeMemory(); + case kDEL_KEYS: + db_->cache()->Del(keys_); res_.SetRes(CmdRes::kOk); + break; + case kRANDOM_KEY: + s_ = db_->cache()->RandomKey(&key); + if (!s_.ok()) { + res_.AppendStringLen(-1); + } else { + res_.AppendStringLen(key.size()); + res_.AppendContent(key); + } + break; + default: + res_.SetRes(CmdRes::kErrOther, "Unknown cmd"); + break; } + return; } -#endif -void PKPatternMatchDelCmd::DoInitial() { +void ClearCacheCmd::DoInitial() { if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePKPatternMatchDel); + res_.SetRes(CmdRes::kWrongNum, kCmdNameClearCache); return; - } - pattern_ = argv_[1]; - if (!strcasecmp(argv_[2].data(), "set")) { - type_ = blackwidow::kSets; - } else if (!strcasecmp(argv_[2].data(), "list")) { - type_ = blackwidow::kLists; - } else if (!strcasecmp(argv_[2].data(), "string")) { - type_ = blackwidow::kStrings; - } else if (!strcasecmp(argv_[2].data(), "zset")) { - type_ = blackwidow::kZSets; - } else if (!strcasecmp(argv_[2].data(), "hash")) { - type_ = blackwidow::kHashes; - } else { - res_.SetRes(CmdRes::kInvalidDbType, kCmdNamePKPatternMatchDel); + } +} + +void ClearCacheCmd::Do() { + // clean cache + if (PIKA_CACHE_NONE != g_pika_conf->cache_mode()) { + g_pika_server->ClearCacheDbAsync(db_); + } + res_.SetRes(CmdRes::kOk, "Cache is cleared"); +} + +#ifdef WITH_COMMAND_DOCS + +bool CommandCmd::CommandFieldCompare::operator()(const std::string& a, const std::string& b) const { + int av{0}; + int bv{0}; + if (auto avi = kFieldNameOrder.find(a); avi != kFieldNameOrder.end()) { + av = avi->second; + } + if (auto bvi = kFieldNameOrder.find(b); bvi != kFieldNameOrder.end()) { + bv = bvi->second; + } + return av < bv; +} + +CmdRes& CommandCmd::EncodableInt::EncodeTo(CmdRes& res) const { + res.AppendInteger(value_); + return res; +} + +CommandCmd::EncodablePtr CommandCmd::EncodableInt::MergeFrom(const CommandCmd::EncodablePtr& other) const { + if (auto pe = std::dynamic_pointer_cast(other)) { + return std::make_shared(value_ + pe->value_); + } + return std::make_shared(value_); +} + +CmdRes& CommandCmd::EncodableString::EncodeTo(CmdRes& res) const { + res.AppendString(value_); + return res; +} + +CommandCmd::EncodablePtr CommandCmd::EncodableString::MergeFrom(const CommandCmd::EncodablePtr& other) const { + if (auto pe = std::dynamic_pointer_cast(other)) { + return std::make_shared(value_ + pe->value_); + } + return std::make_shared(value_); +} + +template +CmdRes& CommandCmd::EncodableMap::EncodeTo(CmdRes& res, const Map& map, const Map& specialization) { + std::string raw_string; + RedisAppendLen(raw_string, map.size() * 2, kPrefix); + res.AppendStringRaw(raw_string); + for (const auto& kv : map) { + res.AppendString(kv.first); + if (auto iter = specialization.find(kv.first); iter != specialization.end()) { + res << *(*kv.second + iter->second); + } else { + res << *kv.second; + } + } + return res; +} + +CmdRes& CommandCmd::EncodableMap::EncodeTo(CmdRes& res) const { return EncodeTo(res, values_); } + +CommandCmd::EncodablePtr CommandCmd::EncodableMap::MergeFrom(const CommandCmd::EncodablePtr& other) const { + if (auto pe = std::dynamic_pointer_cast(other)) { + auto values = CommandCmd::EncodableMap::RedisMap(values_.cbegin(), values_.cend()); + for (const auto& pair : pe->values_) { + auto iter = values.find(pair.first); + if (iter == values.end()) { + values[pair.first] = pair.second; + } else { + iter->second = (*iter->second + pair.second); + } + } + return std::make_shared(values); + } + return std::make_shared( + CommandCmd::EncodableMap::RedisMap(values_.cbegin(), values_.cend())); +} + +CmdRes& CommandCmd::EncodableSet::EncodeTo(CmdRes& res) const { + std::string raw_string; + RedisAppendLen(raw_string, values_.size(), kPrefix); + res.AppendStringRaw(raw_string); + for (const auto& item : values_) { + res << *item; + } + return res; +} + +CommandCmd::EncodablePtr CommandCmd::EncodableSet::MergeFrom(const CommandCmd::EncodablePtr& other) const { + if (auto pe = std::dynamic_pointer_cast(other)) { + auto values = std::vector(values_.cbegin(), values_.cend()); + values.insert(values.end(), pe->values_.cbegin(), pe->values_.cend()); + return std::make_shared(values); + } + return std::make_shared( + std::vector(values_.cbegin(), values_.cend())); +} + +CmdRes& CommandCmd::EncodableArray::EncodeTo(CmdRes& res) const { + res.AppendArrayLen(values_.size()); + for (const auto& item : values_) { + res << *item; + } + return res; +} + +CommandCmd::EncodablePtr CommandCmd::EncodableArray::MergeFrom(const CommandCmd::EncodablePtr& other) const { + if (auto pe = std::dynamic_pointer_cast(other)) { + auto values = std::vector(values_.cbegin(), values_.cend()); + values.insert(values.end(), pe->values_.cbegin(), pe->values_.cend()); + return std::make_shared(values); + } + return std::make_shared( + std::vector(values_.cbegin(), values_.cend())); +} + +CmdRes& CommandCmd::EncodableStatus::EncodeTo(CmdRes& res) const { + res.AppendStringRaw(kPrefix + value_ + kNewLine); + return res; +} + +CommandCmd::EncodablePtr CommandCmd::EncodableStatus::MergeFrom(const CommandCmd::EncodablePtr& other) const { + if (auto pe = std::dynamic_pointer_cast(other)) { + return std::make_shared(value_ + pe->value_); + } + return std::make_shared(value_); +} + +const std::unordered_map CommandCmd::CommandFieldCompare::kFieldNameOrder{ + {kPikaField, 0}, {"name", 100}, {"type", 101}, + {"spec", 102}, {"index", 103}, {"display_text", 104}, + {"key_spec_index", 105}, {"token", 106}, {"summary", 107}, + {"since", 108}, {"group", 109}, {"complexity", 110}, + {"module", 111}, {"doc_flags", 112}, {"deprecated_since", 113}, + {"notes", 114}, {"flags", 15}, {"begin_search", 116}, + {"replaced_by", 17}, {"history", 18}, {"arguments", 119}, + {"subcommands", 120}, {"keyword", 121}, {"startfrom", 122}, + {"find_keys", 123}, {"lastkey", 124}, {"keynum", 125}, + {"keynumidx", 126}, {"firstkey", 127}, {"keystep", 128}, + {"limit", 129}, +}; +const std::string CommandCmd::EncodableMap::kPrefix = "*"; +const std::string CommandCmd::EncodableSet::kPrefix = "*"; +const std::string CommandCmd::EncodableStatus::kPrefix = "+"; + +void CommandCmd::DoInitial() { + if (!CheckArg(argv_.size())) { // The original redis command's arity is -1 + res_.SetRes(CmdRes::kWrongNum, kCmdNameEcho); return; } + if (argv_.size() < 2) { // But currently only docs subcommand is impled + res_.SetRes(CmdRes::kErrOther, "only docs subcommand supported"); + return; + } + if (command_ = argv_[1]; strcasecmp(command_.data(), "docs") != 0) { + res_.SetRes(CmdRes::kErrOther, "unknown command '" + command_ + "'"); + return; + } + cmds_begin_ = argv_.cbegin() + 2; + cmds_end_ = argv_.cend(); } -void PKPatternMatchDelCmd::Do(std::shared_ptr partition) { - int ret = 0; - rocksdb::Status s = partition->db()->PKPatternMatchDel(type_, pattern_, &ret); - if (s.ok()) { - res_.AppendInteger(ret); +extern std::unique_ptr g_pika_cmd_table_manager; + +void CommandCmd::Do(std::shared_ptr dbs) { + std::unordered_map cmds; + std::unordered_map specializations; + if (cmds_begin_ == cmds_end_) { + cmds = kCommandDocs; + specializations.insert(kPikaSpecialization.cbegin(), kPikaSpecialization.cend()); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + for (auto iter = cmds_begin_; iter != cmds_end_; ++iter) { + if (auto cmd = kCommandDocs.find(*iter); cmd != kCommandDocs.end()) { + cmds.insert(*cmd); + } + if (auto specialization = kPikaSpecialization.find(*iter); specialization != kPikaSpecialization.end()) { + specializations.insert(*specialization); + } + } } + for (const auto& cmd : cmds) { + if (!g_pika_cmd_table_manager->CmdExist(cmd.first)) { + specializations[cmd.first] = kNotSupportedSpecialization; + } else if (auto iter = specializations.find(cmd.first); iter == specializations.end()) { + specializations[cmd.first] = kCompatibleSpecialization; + } + } + EncodableMap::EncodeTo(res_, cmds, specializations); } + +#endif // WITH_COMMAND_DOCS diff --git a/tools/pika_migrate/src/pika_auxiliary_thread.cc b/tools/pika_migrate/src/pika_auxiliary_thread.cc index 62a2b22941..003a43c93b 100644 --- a/tools/pika_migrate/src/pika_auxiliary_thread.cc +++ b/tools/pika_migrate/src/pika_auxiliary_thread.cc @@ -3,14 +3,15 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#include "include/pika_auxiliary_thread.h" - -#include "include/pika_server.h" #include "include/pika_define.h" +#include "include/pika_auxiliary_thread.h" #include "include/pika_rm.h" +#include "include/pika_server.h" extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; +extern std::unique_ptr g_pika_rm; + +using namespace std::chrono_literals; PikaAuxiliaryThread::~PikaAuxiliaryThread() { StopThread(); @@ -19,21 +20,19 @@ PikaAuxiliaryThread::~PikaAuxiliaryThread() { void* PikaAuxiliaryThread::ThreadMain() { while (!should_stop()) { - if (g_pika_conf->classic_mode()) { - if (g_pika_server->ShouldMetaSync()) { - g_pika_rm->SendMetaSyncRequest(); - } else if (g_pika_server->MetaSyncDone()) { - g_pika_rm->RunSyncSlavePartitionStateMachine(); - } - } else { - g_pika_rm->RunSyncSlavePartitionStateMachine(); + if (g_pika_server->ShouldMetaSync()) { + g_pika_rm->SendMetaSyncRequest(); + } else if (g_pika_server->MetaSyncDone()) { + g_pika_rm->RunSyncSlaveDBStateMachine(); } - Status s = g_pika_rm->CheckSyncTimeout(slash::NowMicros()); + pstd::Status s = g_pika_rm->CheckSyncTimeout(pstd::NowMicros()); if (!s.ok()) { LOG(WARNING) << s.ToString(); } + g_pika_server->CheckLeaderProtectedMode(); + // TODO(whoiami) timeout s = g_pika_server->TriggerSendBinlogSync(); if (!s.ok()) { @@ -41,15 +40,13 @@ void* PikaAuxiliaryThread::ThreadMain() { } // send to peer int res = g_pika_server->SendToPeer(); - if (!res) { + if (res == 0) { // sleep 100 ms - mu_.Lock(); - cv_.TimedWait(100); - mu_.Unlock(); + std::unique_lock lock(mu_); + cv_.wait_for(lock, 100ms); } else { - //LOG_EVERY_N(INFO, 1000) << "Consume binlog number " << res; + // LOG_EVERY_N(INFO, 1000) << "Consume binlog number " << res; } } - return NULL; + return nullptr; } - diff --git a/tools/pika_migrate/src/pika_binlog.cc b/tools/pika_migrate/src/pika_binlog.cc index 7c71b32bba..6f4ed2861d 100644 --- a/tools/pika_migrate/src/pika_binlog.cc +++ b/tools/pika_migrate/src/pika_binlog.cc @@ -5,54 +5,52 @@ #include "include/pika_binlog.h" -#include +#include #include +#include + +#include #include "include/pika_binlog_transverter.h" +#include "pstd/include/pstd_defer.h" +#include "pstd_status.h" -using slash::RWLock; +using pstd::Status; -std::string NewFileName(const std::string name, const uint32_t current) { +std::string NewFileName(const std::string& name, const uint32_t current) { char buf[256]; snprintf(buf, sizeof(buf), "%s%u", name.c_str(), current); - return std::string(buf); + return {buf}; } /* * Version */ -Version::Version(slash::RWFile *save) - : pro_num_(0), - pro_offset_(0), - logic_id_(0), - save_(save) { - assert(save_ != NULL); - - pthread_rwlock_init(&rwlock_, NULL); +Version::Version(const std::shared_ptr& save) : save_(save) { + assert(save_ != nullptr); } -Version::~Version() { - StableSave(); - pthread_rwlock_destroy(&rwlock_); -} +Version::~Version() { StableSave(); } Status Version::StableSave() { - char *p = save_->GetData(); + char* p = save_->GetData(); memcpy(p, &pro_num_, sizeof(uint32_t)); p += 4; memcpy(p, &pro_offset_, sizeof(uint64_t)); p += 8; memcpy(p, &logic_id_, sizeof(uint64_t)); p += 8; + memcpy(p, &term_, sizeof(uint32_t)); return Status::OK(); } Status Version::Init() { Status s; - if (save_->GetData() != NULL) { - memcpy((char*)(&pro_num_), save_->GetData(), sizeof(uint32_t)); - memcpy((char*)(&pro_offset_), save_->GetData() + 4, sizeof(uint64_t)); - memcpy((char*)(&logic_id_), save_->GetData() + 12, sizeof(uint64_t)); + if (save_->GetData()) { + memcpy(reinterpret_cast(&pro_num_), save_->GetData(), sizeof(uint32_t)); + memcpy(reinterpret_cast(&pro_offset_), save_->GetData() + 4, sizeof(uint64_t)); + memcpy(reinterpret_cast(&logic_id_), save_->GetData() + 12, sizeof(uint64_t)); + memcpy(reinterpret_cast(&term_), save_->GetData() + 20, sizeof(uint32_t)); return Status::OK(); } else { return Status::Corruption("version init error"); @@ -62,64 +60,59 @@ Status Version::Init() { /* * Binlog */ -Binlog::Binlog(const std::string& binlog_path, const int file_size) : - consumer_num_(0), - version_(NULL), - queue_(NULL), - versionfile_(NULL), - pro_num_(0), - pool_(NULL), - exit_all_consume_(false), - binlog_path_(binlog_path), - file_size_(file_size) { - +Binlog::Binlog(std::string binlog_path, const int file_size) + : opened_(false), + binlog_path_(std::move(binlog_path)), + file_size_(file_size), + binlog_io_error_(false) { // To intergrate with old version, we don't set mmap file size to 100M; - //slash::SetMmapBoundSize(file_size); - //slash::kMmapBoundSize = 1024 * 1024 * 100; + // pstd::SetMmapBoundSize(file_size); + // pstd::kMmapBoundSize = 1024 * 1024 * 100; Status s; - slash::CreateDir(binlog_path_); + pstd::CreateDir(binlog_path_); - filename = binlog_path_ + kBinlogPrefix; + filename_ = binlog_path_ + kBinlogPrefix; const std::string manifest = binlog_path_ + kManifest; std::string profile; - if (!slash::FileExists(manifest)) { + if (!pstd::FileExists(manifest)) { LOG(INFO) << "Binlog: Manifest file not exist, we create a new one."; - profile = NewFileName(filename, pro_num_); - s = slash::NewWritableFile(profile, &queue_); + profile = NewFileName(filename_, pro_num_); + s = pstd::NewWritableFile(profile, queue_); if (!s.ok()) { - LOG(FATAL) << "Binlog: new " << filename << " " << s.ToString(); + LOG(FATAL) << "Binlog: new " << filename_ << " " << s.ToString(); } - - - s = slash::NewRWFile(manifest, &versionfile_); + std::unique_ptr tmp_file; + s = pstd::NewRWFile(manifest, tmp_file); + versionfile_.reset(tmp_file.release()); if (!s.ok()) { LOG(FATAL) << "Binlog: new versionfile error " << s.ToString(); } - version_ = new Version(versionfile_); + version_ = std::make_unique(versionfile_); version_->StableSave(); } else { LOG(INFO) << "Binlog: Find the exist file."; - - s = slash::NewRWFile(manifest, &versionfile_); + std::unique_ptr tmp_file; + s = pstd::NewRWFile(manifest, tmp_file); + versionfile_.reset(tmp_file.release()); if (s.ok()) { - version_ = new Version(versionfile_); + version_ = std::make_unique(versionfile_); version_->Init(); pro_num_ = version_->pro_num_; // Debug - //version_->debug(); + // version_->debug(); } else { LOG(FATAL) << "Binlog: open versionfile error"; } - profile = NewFileName(filename, pro_num_); + profile = NewFileName(filename_, pro_num_); DLOG(INFO) << "Binlog: open profile " << profile; - s = slash::AppendWritableFile(profile, &queue_, version_->pro_offset_); + s = pstd::AppendWritableFile(profile, queue_, version_->pro_offset_); if (!s.ok()) { LOG(FATAL) << "Binlog: Open file " << profile << " error " << s.ToString(); } @@ -132,34 +125,80 @@ Binlog::Binlog(const std::string& binlog_path, const int file_size) : } Binlog::~Binlog() { - delete version_; - delete versionfile_; + std::lock_guard l(mutex_); + Close(); +} - delete queue_; +void Binlog::Close() { + if (!opened_.load()) { + return; + } + opened_.store(false); } void Binlog::InitLogFile() { - assert(queue_ != NULL); + assert(queue_ != nullptr); uint64_t filesize = queue_->Filesize(); - block_offset_ = filesize % kBlockSize; + block_offset_ = static_cast(filesize % kBlockSize); + + opened_.store(true); +} + +Status Binlog::IsOpened() { + if (!opened_.load()) { + return Status::Busy("Binlog is not open yet"); + } + return Status::OK(); } -Status Binlog::GetProducerStatus(uint32_t* filenum, uint64_t* pro_offset, uint64_t* logic_id) { - slash::RWLock(&(version_->rwlock_), false); +Status Binlog::GetProducerStatus(uint32_t* filenum, uint64_t* pro_offset, uint32_t* term, uint64_t* logic_id) { + if (!opened_.load()) { + return Status::Busy("Binlog is not open yet"); + } + + std::shared_lock l(version_->rwlock_); *filenum = version_->pro_num_; *pro_offset = version_->pro_offset_; - if (logic_id != NULL) { + if (logic_id) { *logic_id = version_->logic_id_; } + if (term) { + *term = version_->term_; + } return Status::OK(); } // Note: mutex lock should be held -Status Binlog::Put(const std::string &item) { - return Put(item.c_str(), item.size()); +Status Binlog::Put(const std::string& item) { + if (!opened_.load()) { + return Status::Busy("Binlog is not open yet"); + } + uint32_t filenum = 0; + uint32_t term = 0; + uint64_t offset = 0; + uint64_t logic_id = 0; + + Lock(); + DEFER { + Unlock(); + }; + + Status s = GetProducerStatus(&filenum, &offset, &term, &logic_id); + if (!s.ok()) { + return s; + } + logic_id++; + std::string data = PikaBinlogTransverter::BinlogEncode(BinlogType::TypeFirst, + time(nullptr), term, logic_id, filenum, offset, item, {}); + + s = Put(data.c_str(), static_cast(data.size())); + if (!s.ok()) { + binlog_io_error_.store(true); + } + return s; } // Note: mutex lock should be held @@ -169,15 +208,19 @@ Status Binlog::Put(const char* item, int len) { /* Check to roll log file */ uint64_t filesize = queue_->Filesize(); if (filesize > file_size_) { - delete queue_; - queue_ = NULL; - + std::unique_ptr queue; + std::string profile = NewFileName(filename_, pro_num_ + 1); + s = pstd::NewWritableFile(profile, queue); + if (!s.ok()) { + LOG(ERROR) << "Binlog: new " << filename_ << " " << s.ToString(); + return s; + } + queue_.reset(); + queue_ = std::move(queue); pro_num_++; - std::string profile = NewFileName(filename, pro_num_); - slash::NewWritableFile(profile, &queue_); { - slash::RWLock(&(version_->rwlock_), true); + std::lock_guard l(version_->rwlock_); version_->pro_offset_ = 0; version_->pro_num_ = pro_num_; version_->StableSave(); @@ -186,9 +229,9 @@ Status Binlog::Put(const char* item, int len) { } int pro_offset; - s = Produce(Slice(item, len), &pro_offset); + s = Produce(pstd::Slice(item, len), &pro_offset); if (s.ok()) { - slash::RWLock(&(version_->rwlock_), true); + std::lock_guard l(version_->rwlock_); version_->pro_offset_ = pro_offset; version_->logic_id_++; version_->StableSave(); @@ -196,53 +239,53 @@ Status Binlog::Put(const char* item, int len) { return s; } - -Status Binlog::EmitPhysicalRecord(RecordType t, const char *ptr, size_t n, int *temp_pro_offset) { - Status s; - assert(n <= 0xffffff); - assert(block_offset_ + kHeaderSize + n <= kBlockSize); - - char buf[kHeaderSize]; - - uint64_t now; - struct timeval tv; - gettimeofday(&tv, NULL); - now = tv.tv_sec; - buf[0] = static_cast(n & 0xff); - buf[1] = static_cast((n & 0xff00) >> 8); - buf[2] = static_cast(n >> 16); - buf[3] = static_cast(now & 0xff); - buf[4] = static_cast((now & 0xff00) >> 8); - buf[5] = static_cast((now & 0xff0000) >> 16); - buf[6] = static_cast((now & 0xff000000) >> 24); - buf[7] = static_cast(t); - - s = queue_->Append(Slice(buf, kHeaderSize)); + +Status Binlog::EmitPhysicalRecord(RecordType t, const char* ptr, size_t n, int* temp_pro_offset) { + Status s; + assert(n <= 0xffffff); + assert(block_offset_ + kHeaderSize + n <= kBlockSize); + + char buf[kHeaderSize]; + + uint64_t now; + struct timeval tv; + gettimeofday(&tv, nullptr); + now = tv.tv_sec; + buf[0] = static_cast(n & 0xff); + buf[1] = static_cast((n & 0xff00) >> 8); + buf[2] = static_cast(n >> 16); + buf[3] = static_cast(now & 0xff); + buf[4] = static_cast((now & 0xff00) >> 8); + buf[5] = static_cast((now & 0xff0000) >> 16); + buf[6] = static_cast((now & 0xff000000) >> 24); + buf[7] = static_cast(t); + + s = queue_->Append(pstd::Slice(buf, kHeaderSize)); + if (s.ok()) { + s = queue_->Append(pstd::Slice(ptr, n)); if (s.ok()) { - s = queue_->Append(Slice(ptr, n)); - if (s.ok()) { - s = queue_->Flush(); - } + s = queue_->Flush(); } - block_offset_ += static_cast(kHeaderSize + n); + } + block_offset_ += static_cast(kHeaderSize + n); - *temp_pro_offset += kHeaderSize + n; - return s; + *temp_pro_offset += static_cast(kHeaderSize + n); + return s; } -Status Binlog::Produce(const Slice &item, int *temp_pro_offset) { +Status Binlog::Produce(const pstd::Slice& item, int* temp_pro_offset) { Status s; - const char *ptr = item.data(); + const char* ptr = item.data(); size_t left = item.size(); bool begin = true; - *temp_pro_offset = version_->pro_offset_; + *temp_pro_offset = static_cast(version_->pro_offset_); do { const int leftover = static_cast(kBlockSize) - block_offset_; assert(leftover >= 0); if (static_cast(leftover) < kHeaderSize) { if (leftover > 0) { - s = queue_->Append(Slice("\x00\x00\x00\x00\x00\x00\x00", leftover)); + s = queue_->Append(pstd::Slice("\x00\x00\x00\x00\x00\x00\x00", leftover)); if (!s.ok()) { return s; } @@ -273,8 +316,8 @@ Status Binlog::Produce(const Slice &item, int *temp_pro_offset) { return s; } - -Status Binlog::AppendPadding(slash::WritableFile* file, uint64_t* len) { + +Status Binlog::AppendPadding(pstd::WritableFile* file, uint64_t* len) { if (*len < kHeaderSize) { return Status::OK(); } @@ -283,7 +326,7 @@ Status Binlog::AppendPadding(slash::WritableFile* file, uint64_t* len) { char buf[kBlockSize]; uint64_t now; struct timeval tv; - gettimeofday(&tv, NULL); + gettimeofday(&tv, nullptr); now = tv.tv_sec; uint64_t left = *len; @@ -293,11 +336,7 @@ Status Binlog::AppendPadding(slash::WritableFile* file, uint64_t* len) { break; } else { uint32_t bsize = size - kHeaderSize; - std::string binlog = PikaBinlogTransverter::ConstructPaddingBinlog( - BinlogType::TypeFirst, bsize); - if (binlog.empty()) { - break; - } + std::string binlog(bsize, '*'); buf[0] = static_cast(bsize & 0xff); buf[1] = static_cast((bsize & 0xff00) >> 8); buf[2] = static_cast(bsize >> 16); @@ -305,10 +344,11 @@ Status Binlog::AppendPadding(slash::WritableFile* file, uint64_t* len) { buf[4] = static_cast((now & 0xff00) >> 8); buf[5] = static_cast((now & 0xff0000) >> 16); buf[6] = static_cast((now & 0xff000000) >> 24); - buf[7] = static_cast(kFullType); - s = file->Append(Slice(buf, kHeaderSize)); + // kBadRecord here + buf[7] = static_cast(kBadRecord); + s = file->Append(pstd::Slice(buf, kHeaderSize)); if (s.ok()) { - s = file->Append(Slice(binlog.data(), binlog.size())); + s = file->Append(pstd::Slice(binlog.data(), binlog.size())); if (s.ok()) { s = file->Flush(); left -= size; @@ -317,41 +357,81 @@ Status Binlog::AppendPadding(slash::WritableFile* file, uint64_t* len) { } } *len -= left; + if (left != 0) { + LOG(WARNING) << "AppendPadding left bytes: " << left << " is less then kHeaderSize"; + } return s; } -Status Binlog::SetProducerStatus(uint32_t pro_num, uint64_t pro_offset) { - slash::MutexLock l(&mutex_); +Status Binlog::SetProducerStatus(uint32_t pro_num, uint64_t pro_offset, uint32_t term, uint64_t index) { + if (!opened_.load()) { + return Status::Busy("Binlog is not open yet"); + } + + std::lock_guard l(mutex_); // offset smaller than the first header if (pro_offset < 4) { pro_offset = 0; } - delete queue_; + queue_.reset(); - std::string init_profile = NewFileName(filename, 0); - if (slash::FileExists(init_profile)) { - slash::DeleteFile(init_profile); + std::string init_profile = NewFileName(filename_, 0); + if (pstd::FileExists(init_profile)) { + pstd::DeleteFile(init_profile); } - std::string profile = NewFileName(filename, pro_num); - if (slash::FileExists(profile)) { - slash::DeleteFile(profile); + std::string profile = NewFileName(filename_, pro_num); + if (pstd::FileExists(profile)) { + pstd::DeleteFile(profile); } - slash::NewWritableFile(profile, &queue_); - Binlog::AppendPadding(queue_, &pro_offset); + pstd::NewWritableFile(profile, queue_); + Binlog::AppendPadding(queue_.get(), &pro_offset); pro_num_ = pro_num; { - slash::RWLock(&(version_->rwlock_), true); + std::lock_guard l(version_->rwlock_); version_->pro_num_ = pro_num; version_->pro_offset_ = pro_offset; + version_->term_ = term; + version_->logic_id_ = index; version_->StableSave(); } InitLogFile(); return Status::OK(); } + +Status Binlog::Truncate(uint32_t pro_num, uint64_t pro_offset, uint64_t index) { + queue_.reset(); + std::string profile = NewFileName(filename_, pro_num); + const int fd = open(profile.c_str(), O_RDWR | O_CLOEXEC, 0644); + if (fd < 0) { + return Status::IOError("fd open failed"); + } + if (ftruncate(fd, static_cast(pro_offset)) != 0) { + return Status::IOError("ftruncate failed"); + } + close(fd); + + pro_num_ = pro_num; + { + std::lock_guard l(version_->rwlock_); + version_->pro_num_ = pro_num; + version_->pro_offset_ = pro_offset; + version_->logic_id_ = index; + version_->StableSave(); + } + + Status s = pstd::AppendWritableFile(profile, queue_, version_->pro_offset_); + if (!s.ok()) { + return s; + } + + InitLogFile(); + + return Status::OK(); +} diff --git a/tools/pika_migrate/src/pika_binlog_reader.cc b/tools/pika_migrate/src/pika_binlog_reader.cc index 46c7e8c604..b825d8864d 100644 --- a/tools/pika_migrate/src/pika_binlog_reader.cc +++ b/tools/pika_migrate/src/pika_binlog_reader.cc @@ -7,38 +7,22 @@ #include -PikaBinlogReader::PikaBinlogReader(uint32_t cur_filenum, - uint64_t cur_offset) +using pstd::Status; + +PikaBinlogReader::PikaBinlogReader(uint32_t cur_filenum, uint64_t cur_offset) : cur_filenum_(cur_filenum), cur_offset_(cur_offset), - logger_(nullptr), - queue_(nullptr), - backing_store_(new char[kBlockSize]), + backing_store_(std::make_unique(kBlockSize)), buffer_() { last_record_offset_ = cur_offset % kBlockSize; - pthread_rwlock_init(&rwlock_, NULL); } -PikaBinlogReader::PikaBinlogReader() - : cur_filenum_(0), - cur_offset_(0), - logger_(nullptr), - queue_(nullptr), - backing_store_(new char[kBlockSize]), - buffer_() { +PikaBinlogReader::PikaBinlogReader() : backing_store_(std::make_unique(kBlockSize)), buffer_() { last_record_offset_ = 0 % kBlockSize; - pthread_rwlock_init(&rwlock_, NULL); -} - - -PikaBinlogReader::~PikaBinlogReader() { - delete[] backing_store_; - delete queue_; - pthread_rwlock_destroy(&rwlock_); } void PikaBinlogReader::GetReaderStatus(uint32_t* cur_filenum, uint64_t* cur_offset) { - slash::RWLock(&(rwlock_), false); + std::shared_lock l(rwlock_); *cur_filenum = cur_filenum_; *cur_offset = cur_offset_; } @@ -47,31 +31,33 @@ bool PikaBinlogReader::ReadToTheEnd() { uint32_t pro_num; uint64_t pro_offset; logger_->GetProducerStatus(&pro_num, &pro_offset); - slash::RWLock(&(rwlock_), false); + std::shared_lock l(rwlock_); return (pro_num == cur_filenum_ && pro_offset == cur_offset_); } -int PikaBinlogReader::Seek(std::shared_ptr logger, uint32_t filenum, uint64_t offset) { - std::string confile = NewFileName(logger->filename, filenum); - if (!slash::FileExists(confile)) { +int PikaBinlogReader::Seek(const std::shared_ptr& logger, uint32_t filenum, uint64_t offset) { + std::string confile = NewFileName(logger->filename(), filenum); + if (!pstd::FileExists(confile)) { + LOG(WARNING) << confile << " not exits"; return -1; } - slash::SequentialFile* readfile; - if (!slash::NewSequentialFile(confile, &readfile).ok()) { + std::unique_ptr readfile; + if (!pstd::NewSequentialFile(confile, readfile).ok()) { + LOG(WARNING) << "New swquential " << confile << " failed"; return -1; } if (queue_) { - delete queue_; + queue_.reset(); } - queue_ = readfile; + queue_ = std::move(readfile); logger_ = logger; - slash::RWLock(&(rwlock_), true); + std::lock_guard l(rwlock_); cur_filenum_ = filenum; cur_offset_ = offset; last_record_offset_ = cur_filenum_ % kBlockSize; - slash::Status s; + pstd::Status s; uint64_t start_block = (cur_offset_ / kBlockSize) * kBlockSize; s = queue_->Skip((cur_offset_ / kBlockSize) * kBlockSize); uint64_t block_offset = cur_offset_ % kBlockSize; @@ -86,7 +72,7 @@ int PikaBinlogReader::Seek(std::shared_ptr logger, uint32_t filenum, uin } ret = 0; is_error = GetNext(&ret); - if (is_error == true) { + if (is_error) { return -1; } res += ret; @@ -97,12 +83,12 @@ int PikaBinlogReader::Seek(std::shared_ptr logger, uint32_t filenum, uin bool PikaBinlogReader::GetNext(uint64_t* size) { uint64_t offset = 0; - slash::Status s; + pstd::Status s; bool is_error = false; while (true) { buffer_.clear(); - s = queue_->Read(kHeaderSize, &buffer_, backing_store_); + s = queue_->Read(kHeaderSize, &buffer_, backing_store_.get()); if (!s.ok()) { is_error = true; return is_error; @@ -115,18 +101,26 @@ bool PikaBinlogReader::GetNext(uint64_t* size) { const unsigned int type = header[7]; const uint32_t length = a | (b << 8) | (c << 16); + if (length > (kBlockSize - kHeaderSize)) { + return true; + } + if (type == kFullType) { - s = queue_->Read(length, &buffer_, backing_store_); + s = queue_->Read(length, &buffer_, backing_store_.get()); offset += kHeaderSize + length; break; } else if (type == kFirstType) { - s = queue_->Read(length, &buffer_, backing_store_); + s = queue_->Read(length, &buffer_, backing_store_.get()); offset += kHeaderSize + length; } else if (type == kMiddleType) { - s = queue_->Read(length, &buffer_, backing_store_); + s = queue_->Read(length, &buffer_, backing_store_.get()); offset += kHeaderSize + length; } else if (type == kLastType) { - s = queue_->Read(length, &buffer_, backing_store_); + s = queue_->Read(length, &buffer_, backing_store_.get()); + offset += kHeaderSize + length; + break; + } else if (type == kBadRecord) { + s = queue_->Read(length, &buffer_, backing_store_.get()); offset += kHeaderSize + length; break; } else { @@ -138,16 +132,16 @@ bool PikaBinlogReader::GetNext(uint64_t* size) { return is_error; } -unsigned int PikaBinlogReader::ReadPhysicalRecord(slash::Slice *result, uint32_t* filenum, uint64_t* offset) { - slash::Status s; +unsigned int PikaBinlogReader::ReadPhysicalRecord(pstd::Slice* result, uint32_t* filenum, uint64_t* offset) { + pstd::Status s; if (kBlockSize - last_record_offset_ <= kHeaderSize) { queue_->Skip(kBlockSize - last_record_offset_); - slash::RWLock(&(rwlock_), true); + std::lock_guard l(rwlock_); cur_offset_ += (kBlockSize - last_record_offset_); last_record_offset_ = 0; } buffer_.clear(); - s = queue_->Read(kHeaderSize, &buffer_, backing_store_); + s = queue_->Read(kHeaderSize, &buffer_, backing_store_.get()); if (s.IsEndFile()) { return kEof; } else if (!s.ok()) { @@ -160,17 +154,22 @@ unsigned int PikaBinlogReader::ReadPhysicalRecord(slash::Slice *result, uint32_t const uint32_t c = static_cast(header[2]) & 0xff; const unsigned int type = header[7]; const uint32_t length = a | (b << 8) | (c << 16); + + if (length > (kBlockSize - kHeaderSize)) { + return kBadRecord; + } + if (type == kZeroType || length == 0) { buffer_.clear(); return kOldRecord; } buffer_.clear(); - s = queue_->Read(length, &buffer_, backing_store_); - *result = slash::Slice(buffer_.data(), buffer_.size()); + s = queue_->Read(length, &buffer_, backing_store_.get()); + *result = pstd::Slice(buffer_.data(), buffer_.size()); last_record_offset_ += kHeaderSize + length; if (s.ok()) { - slash::RWLock(&(rwlock_), true); + std::lock_guard l(rwlock_); *filenum = cur_filenum_; cur_offset_ += (kHeaderSize + length); *offset = cur_offset_; @@ -181,7 +180,7 @@ unsigned int PikaBinlogReader::ReadPhysicalRecord(slash::Slice *result, uint32_t Status PikaBinlogReader::Consume(std::string* scratch, uint32_t* filenum, uint64_t* offset) { Status s; - slash::Slice fragment; + pstd::Slice fragment; while (true) { const unsigned int record_type = ReadPhysicalRecord(&fragment, filenum, offset); @@ -205,6 +204,8 @@ Status PikaBinlogReader::Consume(std::string* scratch, uint32_t* filenum, uint64 case kEof: return Status::EndFile("Eof"); case kBadRecord: + LOG(WARNING) + << "Read BadRecord record, will decode failed, this record may dbsync padded record, not processed here"; return Status::IOError("Data Corruption"); case kOldRecord: return Status::EndFile("Eof"); @@ -223,7 +224,7 @@ Status PikaBinlogReader::Consume(std::string* scratch, uint32_t* filenum, uint64 // Append to scratch; // the status will be OK, IOError or Corruption, EndFile; Status PikaBinlogReader::Get(std::string* scratch, uint32_t* filenum, uint64_t* offset) { - if (logger_ == nullptr || queue_ == NULL) { + if (!logger_ || !queue_) { return Status::Corruption("Not seek"); } scratch->clear(); @@ -235,20 +236,20 @@ Status PikaBinlogReader::Get(std::string* scratch, uint32_t* filenum, uint64_t* } s = Consume(scratch, filenum, offset); if (s.IsEndFile()) { - std::string confile = NewFileName(logger_->filename, cur_filenum_ + 1); + std::string confile = NewFileName(logger_->filename(), cur_filenum_ + 1); // sleep 10ms wait produce thread generate the new binlog usleep(10000); // Roll to next file need retry; - if (slash::FileExists(confile)) { + if (pstd::FileExists(confile)) { DLOG(INFO) << "BinlogSender roll to new binlog" << confile; - delete queue_; - queue_ = NULL; + queue_.reset(); + queue_ = nullptr; - slash::NewSequentialFile(confile, &(queue_)); + pstd::NewSequentialFile(confile, queue_); { - slash::RWLock(&(rwlock_), true); + std::lock_guard l(rwlock_); cur_filenum_++; cur_offset_ = 0; } @@ -263,5 +264,3 @@ Status PikaBinlogReader::Get(std::string* scratch, uint32_t* filenum, uint64_t* return Status::OK(); } - - diff --git a/tools/pika_migrate/src/pika_binlog_transverter.cc b/tools/pika_migrate/src/pika_binlog_transverter.cc index 702fd6ca5d..a6f3d2b271 100644 --- a/tools/pika_migrate/src/pika_binlog_transverter.cc +++ b/tools/pika_migrate/src/pika_binlog_transverter.cc @@ -5,155 +5,132 @@ #include "include/pika_binlog_transverter.h" -#include -#include #include +#include +#include -#include "slash/include/slash_coding.h" +#include "pstd/include/pstd_coding.h" #include "include/pika_command.h" +#include "include/pika_define.h" +#include "storage/storage.h" -uint32_t BinlogItem::exec_time() const { - return exec_time_; -} -uint32_t BinlogItem::server_id() const { - return server_id_; -} +uint32_t BinlogItem::exec_time() const { return exec_time_; } -uint64_t BinlogItem::logic_id() const { - return logic_id_; -} +uint32_t BinlogItem::term_id() const { return term_id_; } -uint32_t BinlogItem::filenum() const { - return filenum_; -} +uint64_t BinlogItem::logic_id() const { return logic_id_; } -uint64_t BinlogItem::offset() const { - return offset_; -} +uint32_t BinlogItem::filenum() const { return filenum_; } -std::string BinlogItem::content() const { - return content_; -} +uint64_t BinlogItem::offset() const { return offset_; } -void BinlogItem::set_exec_time(uint32_t exec_time) { - exec_time_ = exec_time; -} +std::string BinlogItem::content() const { return content_; } -void BinlogItem::set_server_id(uint32_t server_id) { - server_id_ = server_id; -} +void BinlogItem::set_exec_time(uint32_t exec_time) { exec_time_ = exec_time; } -void BinlogItem::set_logic_id(uint64_t logic_id) { - logic_id_ = logic_id; -} +void BinlogItem::set_term_id(uint32_t term_id) { term_id_ = term_id; } -void BinlogItem::set_filenum(uint32_t filenum) { - filenum_ = filenum; -} +void BinlogItem::set_logic_id(uint64_t logic_id) { logic_id_ = logic_id; } -void BinlogItem::set_offset(uint64_t offset) { - offset_ = offset; -} +void BinlogItem::set_filenum(uint32_t filenum) { filenum_ = filenum; } + +void BinlogItem::set_offset(uint64_t offset) { offset_ = offset; } std::string BinlogItem::ToString() const { std::string str; - str.append("exec_time: " + std::to_string(exec_time_)); - str.append(",server_id: " + std::to_string(server_id_)); - str.append(",logic_id: " + std::to_string(logic_id_)); - str.append(",filenum: " + std::to_string(filenum_)); - str.append(",offset: " + std::to_string(offset_)); + str.append("exec_time: " + std::to_string(exec_time_)); + str.append(",term_id: " + std::to_string(term_id_)); + str.append(",logic_id: " + std::to_string(logic_id_)); + str.append(",filenum: " + std::to_string(filenum_)); + str.append(",offset: " + std::to_string(offset_)); str.append("\ncontent: "); - for (size_t idx = 0; idx < content_.size(); ++idx) { - if (content_[idx] == '\n') { + for (char idx : content_) { + if (idx == '\n') { str.append("\\n"); - } else if (content_[idx] == '\r') { + } else if (idx == '\r') { str.append("\\r"); } else { - str.append(1, content_[idx]); + str.append(1, idx); } } str.append("\n"); return str; } -std::string PikaBinlogTransverter::BinlogEncode(BinlogType type, - uint32_t exec_time, - uint32_t server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset, - const std::string& content, - const std::vector& extends) { +std::string PikaBinlogTransverter::BinlogEncode(BinlogType type, uint32_t exec_time, uint32_t term_id, + uint64_t logic_id, uint32_t filenum, uint64_t offset, + const std::string& content, const std::vector& extends) { std::string binlog; - slash::PutFixed16(&binlog, type); - slash::PutFixed32(&binlog, exec_time); - slash::PutFixed32(&binlog, server_id); - slash::PutFixed64(&binlog, logic_id); - slash::PutFixed32(&binlog, filenum); - slash::PutFixed64(&binlog, offset); + pstd::PutFixed16(&binlog, type); + pstd::PutFixed32(&binlog, exec_time); + pstd::PutFixed32(&binlog, term_id); + pstd::PutFixed64(&binlog, logic_id); + pstd::PutFixed32(&binlog, filenum); + pstd::PutFixed64(&binlog, offset); uint32_t content_length = content.size(); - slash::PutFixed32(&binlog, content_length); + pstd::PutFixed32(&binlog, content_length); binlog.append(content); return binlog; } -bool PikaBinlogTransverter::BinlogDecode(BinlogType type, - const std::string& binlog, - BinlogItem* binlog_item) { +bool PikaBinlogTransverter::BinlogDecode(BinlogType type, const std::string& binlog, BinlogItem* binlog_item) { uint16_t binlog_type = 0; uint32_t content_length = 0; - std::string binlog_str = binlog; - slash::GetFixed16(&binlog_str, &binlog_type); + pstd::Slice binlog_str = binlog; + pstd::GetFixed16(&binlog_str, &binlog_type); if (binlog_type != type) { LOG(ERROR) << "Binlog Item type error, expect type:" << type << " actualy type: " << binlog_type; return false; } - slash::GetFixed32(&binlog_str, &binlog_item->exec_time_); - slash::GetFixed32(&binlog_str, &binlog_item->server_id_); - slash::GetFixed64(&binlog_str, &binlog_item->logic_id_); - slash::GetFixed32(&binlog_str, &binlog_item->filenum_); - slash::GetFixed64(&binlog_str, &binlog_item->offset_); - slash::GetFixed32(&binlog_str, &content_length); + pstd::GetFixed32(&binlog_str, &binlog_item->exec_time_); + pstd::GetFixed32(&binlog_str, &binlog_item->term_id_); + pstd::GetFixed64(&binlog_str, &binlog_item->logic_id_); + pstd::GetFixed32(&binlog_str, &binlog_item->filenum_); + pstd::GetFixed64(&binlog_str, &binlog_item->offset_); + pstd::GetFixed32(&binlog_str, &content_length); if (binlog_str.size() == content_length) { binlog_item->content_.assign(binlog_str.data(), content_length); } else { - LOG(ERROR) << "Binlog Item get content error, expect length:" << content_length << " left length:" << binlog_str.size(); + LOG(ERROR) << "Binlog Item get content error, expect length:" << content_length + << " left length:" << binlog_str.size(); return false; } return true; } /* - * *************************************************Type First Binlog Item Format************************************************** - * | | | | | | | | | - * | 2 Bytes | 4 Bytes | 4 Bytes | 8 Bytes | 4 Bytes | 8 Bytes | 4 Bytes | content length Bytes | - * |---------------------------------------------- 34 Bytes -----------------------------------------------| +******************* Type First Binlog Item Format ****************** + * +-----------------------------------------------------------------+ + * | Type (2 bytes) | Create Time (4 bytes) | Term Id (4 bytes) | + * |-----------------------------------------------------------------| + * | Logic Id (8 bytes) | File Num (4 bytes) | Offset (8 bytes) | + * |-----------------------------------------------------------------| + * | Content Length (4 bytes) | Content (content length bytes) | + * +-----------------------------------------------------------------+ + * |------------------------ 34 Bytes -------------------------------| * * content: *2\r\n$7\r\npadding\r\n$00001\r\n***\r\n * length of *** -> total_len - PADDING_BINLOG_PROTOCOL_SIZE - SPACE_STROE_PARAMETER_LENGTH; * * We allocate five bytes to store the length of the parameter */ -std::string PikaBinlogTransverter::ConstructPaddingBinlog(BinlogType type, - uint32_t size) { +std::string PikaBinlogTransverter::ConstructPaddingBinlog(BinlogType type, uint32_t size) { assert(size <= kBlockSize - kHeaderSize); - assert(BINLOG_ITEM_HEADER_SIZE + PADDING_BINLOG_PROTOCOL_SIZE - + SPACE_STROE_PARAMETER_LENGTH <= size); + assert(BINLOG_ITEM_HEADER_SIZE + PADDING_BINLOG_PROTOCOL_SIZE + SPACE_STROE_PARAMETER_LENGTH <= size); std::string binlog; - slash::PutFixed16(&binlog, type); - slash::PutFixed32(&binlog, 0); - slash::PutFixed32(&binlog, 0); - slash::PutFixed64(&binlog, 0); - slash::PutFixed32(&binlog, 0); - slash::PutFixed64(&binlog, 0); - int32_t content_len = size - BINLOG_ITEM_HEADER_SIZE; - int32_t parameter_len = content_len - PADDING_BINLOG_PROTOCOL_SIZE - - SPACE_STROE_PARAMETER_LENGTH; + pstd::PutFixed16(&binlog, type); + pstd::PutFixed32(&binlog, 0); + pstd::PutFixed32(&binlog, 0); + pstd::PutFixed64(&binlog, 0); + pstd::PutFixed32(&binlog, 0); + pstd::PutFixed64(&binlog, 0); + auto content_len = static_cast(size - BINLOG_ITEM_HEADER_SIZE); + int32_t parameter_len = content_len - PADDING_BINLOG_PROTOCOL_SIZE - SPACE_STROE_PARAMETER_LENGTH; if (parameter_len < 0) { - return std::string(); + return {}; } std::string content; @@ -167,7 +144,7 @@ std::string PikaBinlogTransverter::ConstructPaddingBinlog(BinlogType type, std::istringstream is(os.str()); is >> parameter_len_str; if (parameter_len_str.size() > SPACE_STROE_PARAMETER_LENGTH) { - return std::string(); + return {}; } content.append("$"); @@ -176,25 +153,24 @@ std::string PikaBinlogTransverter::ConstructPaddingBinlog(BinlogType type, content.append(kNewLine); RedisAppendContent(content, std::string(parameter_len, '*')); - slash::PutFixed32(&binlog, content_len); + pstd::PutFixed32(&binlog, content_len); binlog.append(content); return binlog; } -bool PikaBinlogTransverter::BinlogItemWithoutContentDecode(BinlogType type, - const std::string& binlog, - BinlogItem* binlog_item) { +bool PikaBinlogTransverter::BinlogItemWithoutContentDecode(BinlogType type, const std::string& binlog, + BinlogItem* binlog_item) { uint16_t binlog_type = 0; - std::string binlog_str = binlog; - slash::GetFixed16(&binlog_str, &binlog_type); + pstd::Slice binlog_str = binlog; + pstd::GetFixed16(&binlog_str, &binlog_type); if (binlog_type != type) { LOG(ERROR) << "Binlog Item type error, expect type:" << type << " actualy type: " << binlog_type; return false; } - slash::GetFixed32(&binlog_str, &binlog_item->exec_time_); - slash::GetFixed32(&binlog_str, &binlog_item->server_id_); - slash::GetFixed64(&binlog_str, &binlog_item->logic_id_); - slash::GetFixed32(&binlog_str, &binlog_item->filenum_); - slash::GetFixed64(&binlog_str, &binlog_item->offset_); + pstd::GetFixed32(&binlog_str, &binlog_item->exec_time_); + pstd::GetFixed32(&binlog_str, &binlog_item->term_id_); + pstd::GetFixed64(&binlog_str, &binlog_item->logic_id_); + pstd::GetFixed32(&binlog_str, &binlog_item->filenum_); + pstd::GetFixed64(&binlog_str, &binlog_item->offset_); return true; } diff --git a/tools/pika_migrate/src/pika_bit.cc b/tools/pika_migrate/src/pika_bit.cc index 0815acb040..ee48d0ba5f 100644 --- a/tools/pika_migrate/src/pika_bit.cc +++ b/tools/pika_migrate/src/pika_bit.cc @@ -5,8 +5,14 @@ #include "include/pika_bit.h" -#include "slash/include/slash_string.h" +#include "pstd/include/pstd_string.h" +#include "include/pika_db.h" + +#include "include/pika_define.h" +#include "include/pika_slot_command.h" +#include "include/pika_cache.h" +#include "pstd/include/pstd_string.h" #include "include/pika_define.h" void BitSetCmd::DoInitial() { @@ -15,11 +21,11 @@ void BitSetCmd::DoInitial() { return; } key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &bit_offset_)) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &bit_offset_) == 0) { res_.SetRes(CmdRes::kInvalidBitOffsetInt); return; } - if (!slash::string2l(argv_[3].data(), argv_[3].size(), &on_)) { + if (pstd::string2int(argv_[3].data(), argv_[3].size(), &on_) == 0) { res_.SetRes(CmdRes::kInvalidBitInt); return; } @@ -28,35 +34,48 @@ void BitSetCmd::DoInitial() { return; } // value no bigger than 2^18 - if ( (bit_offset_ >> kMaxBitOpInputBit) > 0) { + if ((bit_offset_ >> kMaxBitOpInputBit) > 0) { res_.SetRes(CmdRes::kInvalidBitOffsetInt); return; } - if (on_ & ~1) { + if ((on_ & ~1) != 0) { res_.SetRes(CmdRes::kInvalidBitInt); return; } - return; } -void BitSetCmd::Do(std::shared_ptr partition) { +void BitSetCmd::Do() { std::string value; int32_t bit_val = 0; - rocksdb::Status s = partition->db()->SetBit(key_, bit_offset_, on_, &bit_val); - if (s.ok()){ - res_.AppendInteger((int)bit_val); + s_ = db_->storage()->SetBit(key_, bit_offset_, static_cast(on_), &bit_val); + if (s_.ok()) { + res_.AppendInteger(static_cast(bit_val)); + AddSlotKey("k", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); } } +void BitSetCmd::DoThroughDB() { + Do(); +} + +void BitSetCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->SetBitIfKeyExist(key_, bit_offset_, on_); + } +} + + void BitGetCmd::DoInitial() { if (!CheckArg(argv_.size())) { res_.SetRes(CmdRes::kWrongNum, kCmdNameBitGet); return; } key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &bit_offset_)) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &bit_offset_) == 0) { res_.SetRes(CmdRes::kInvalidBitOffsetInt); return; } @@ -64,19 +83,43 @@ void BitGetCmd::DoInitial() { res_.SetRes(CmdRes::kInvalidBitOffsetInt); return; } - return; } -void BitGetCmd::Do(std::shared_ptr partition) { +void BitGetCmd::Do() { int32_t bit_val = 0; - rocksdb::Status s = partition->db()->GetBit(key_, bit_offset_, &bit_val); + s_ = db_->storage()->GetBit(key_, bit_offset_, &bit_val); + if (s_.ok()) { + res_.AppendInteger(static_cast(bit_val)); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void BitGetCmd::ReadCache() { + int64_t bit_val = 0; + auto s = db_->cache()->GetBit(key_, bit_offset_, &bit_val); if (s.ok()) { - res_.AppendInteger((int)bit_val); + res_.AppendInteger(bit_val); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } } +void BitGetCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void BitGetCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_KV, key_, db_); + } +} + void BitCountCmd::DoInitial() { if (!CheckArg(argv_.size())) { res_.SetRes(CmdRes::kWrongNum, kCmdNameBitCount); @@ -85,11 +128,11 @@ void BitCountCmd::DoInitial() { key_ = argv_[1]; if (argv_.size() == 4) { count_all_ = false; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &start_offset_)) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &start_offset_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } - if (!slash::string2l(argv_[3].data(), argv_[3].size(), &end_offset_)) { + if (pstd::string2int(argv_[3].data(), argv_[3].size(), &end_offset_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } @@ -98,36 +141,66 @@ void BitCountCmd::DoInitial() { } else { res_.SetRes(CmdRes::kSyntaxErr, kCmdNameBitCount); } - return; } -void BitCountCmd::Do(std::shared_ptr partition) { +void BitCountCmd::Do() { int32_t count = 0; - rocksdb::Status s; if (count_all_) { - s = partition->db()->BitCount(key_, start_offset_, end_offset_, &count, false); + s_ = db_->storage()->BitCount(key_, start_offset_, end_offset_, &count, false); } else { - s = partition->db()->BitCount(key_, start_offset_, end_offset_, &count, true); + s_ = db_->storage()->BitCount(key_, start_offset_, end_offset_, &count, true); } - if (s.ok() || s.IsNotFound()) { + if (s_.ok() || s_.IsNotFound()) { res_.AppendInteger(count); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void BitCountCmd::ReadCache() { + int64_t count = 0; + int64_t start = static_cast(start_offset_); + int64_t end = static_cast(end_offset_); + bool flag = true; + if (count_all_) { + flag = false; + } + rocksdb::Status s = db_->cache()->BitCount(key_, start, end, &count, flag); + + if (s.ok()) { + res_.AppendInteger(count); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } } +void BitCountCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void BitCountCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_KV, key_, db_); + } +} + void BitPosCmd::DoInitial() { if (!CheckArg(argv_.size())) { res_.SetRes(CmdRes::kWrongNum, kCmdNameBitPos); return; } key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &bit_val_)) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &bit_val_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } - if (bit_val_ & ~1) { + if ((bit_val_ & ~1) != 0) { res_.SetRes(CmdRes::kInvalidBitPosArgument); return; } @@ -137,85 +210,146 @@ void BitPosCmd::DoInitial() { } else if (argv_.size() == 4) { pos_all_ = false; endoffset_set_ = false; - if (!slash::string2l(argv_[3].data(), argv_[3].size(), &start_offset_)) { + if (pstd::string2int(argv_[3].data(), argv_[3].size(), &start_offset_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; - } + } } else if (argv_.size() == 5) { pos_all_ = false; endoffset_set_ = true; - if (!slash::string2l(argv_[3].data(), argv_[3].size(), &start_offset_)) { + if (pstd::string2int(argv_[3].data(), argv_[3].size(), &start_offset_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; - } - if (!slash::string2l(argv_[4].data(), argv_[4].size(), &end_offset_)) { + } + if (pstd::string2int(argv_[4].data(), argv_[4].size(), &end_offset_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } - } else + } else { res_.SetRes(CmdRes::kSyntaxErr, kCmdNameBitPos); - return; + } } -void BitPosCmd::Do(std::shared_ptr partition) { +void BitPosCmd::Do() { int64_t pos = 0; rocksdb::Status s; if (pos_all_) { - s = partition->db()->BitPos(key_, bit_val_, &pos); + s_ = db_->storage()->BitPos(key_, static_cast(bit_val_), &pos); } else if (!pos_all_ && !endoffset_set_) { - s = partition->db()->BitPos(key_, bit_val_, start_offset_, &pos); + s_ = db_->storage()->BitPos(key_, static_cast(bit_val_), start_offset_, &pos); } else if (!pos_all_ && endoffset_set_) { - s = partition->db()->BitPos(key_, bit_val_, start_offset_, end_offset_, &pos); + s_ = db_->storage()->BitPos(key_, static_cast(bit_val_), start_offset_, end_offset_, &pos); + } + if (s_.ok()) { + res_.AppendInteger(static_cast(pos)); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void BitPosCmd::ReadCache() { + int64_t pos = 0; + rocksdb::Status s; + int64_t bit = static_cast(bit_val_); + int64_t start = static_cast(start_offset_); + int64_t end = static_cast(end_offset_);\ + if (pos_all_) { + s = db_->cache()->BitPos(key_, bit, &pos); + } else if (!pos_all_ && !endoffset_set_) { + s = db_->cache()->BitPos(key_, bit, start, &pos); + } else if (!pos_all_ && endoffset_set_) { + s = db_->cache()->BitPos(key_, bit, start, end, &pos); } if (s.ok()) { - res_.AppendInteger((int)pos); + res_.AppendInteger(pos); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } } +void BitPosCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void BitPosCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_KV, key_, db_); + } +} + void BitOpCmd::DoInitial() { if (!CheckArg(argv_.size())) { res_.SetRes(CmdRes::kWrongNum, kCmdNameBitOp); return; } std::string op_str = argv_[1]; - if (!strcasecmp(op_str.data(), "not")) { - op_ = blackwidow::kBitOpNot; - } else if (!strcasecmp(op_str.data(), "and")) { - op_ = blackwidow::kBitOpAnd; - } else if (!strcasecmp(op_str.data(), "or")) { - op_ = blackwidow::kBitOpOr; - } else if (!strcasecmp(op_str.data(), "xor")) { - op_ = blackwidow::kBitOpXor; + if (strcasecmp(op_str.data(), "not") == 0) { + op_ = storage::kBitOpNot; + } else if (strcasecmp(op_str.data(), "and") == 0) { + op_ = storage::kBitOpAnd; + } else if (strcasecmp(op_str.data(), "or") == 0) { + op_ = storage::kBitOpOr; + } else if (strcasecmp(op_str.data(), "xor") == 0) { + op_ = storage::kBitOpXor; } else { res_.SetRes(CmdRes::kSyntaxErr, kCmdNameBitOp); return; } - if (op_ == blackwidow::kBitOpNot && argv_.size() != 4) { - res_.SetRes(CmdRes::kWrongBitOpNotNum, kCmdNameBitOp); - return; - } else if (op_ != blackwidow::kBitOpNot && argv_.size() < 4) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameBitOp); - return; + if (op_ == storage::kBitOpNot && argv_.size() != 4) { + res_.SetRes(CmdRes::kWrongBitOpNotNum, kCmdNameBitOp); + return; + } else if (op_ != storage::kBitOpNot && argv_.size() < 4) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameBitOp); + return; } else if (argv_.size() >= kMaxBitOpInputKey) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameBitOp); - return; + res_.SetRes(CmdRes::kWrongNum, kCmdNameBitOp); + return; } - dest_key_ = argv_[2].data(); - for(unsigned int i = 3; i <= argv_.size() - 1; i++) { - src_keys_.push_back(argv_[i].data()); + dest_key_ = argv_[2]; + for (size_t i = 3; i <= argv_.size() - 1; i++) { + src_keys_.emplace_back(argv_[i].data()); } - return; } -void BitOpCmd::Do(std::shared_ptr partition) { - int64_t result_length; - rocksdb::Status s = partition->db()->BitOp(op_, dest_key_, src_keys_, &result_length); - if (s.ok()) { +void BitOpCmd::Do() { + int64_t result_length = 0; + s_ = db_->storage()->BitOp(op_, dest_key_, src_keys_, value_to_dest_, &result_length); + if (s_.ok()) { res_.AppendInteger(result_length); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void BitOpCmd::DoThroughDB() { + Do(); +} + +void BitOpCmd::DoUpdateCache() { + if (s_.ok()) { + std::vector v; + v.emplace_back(dest_key_); + db_->cache()->Del(v); } } + +void BitOpCmd::DoBinlog() { + PikaCmdArgsType set_args; + //used "set" instead of "SET" to distinguish the binlog of SetCmd + set_args.emplace_back("set"); + set_args.emplace_back(dest_key_); + set_args.emplace_back(value_to_dest_); + set_cmd_->Initial(set_args, db_name_); + set_cmd_->SetConn(GetConn()); + set_cmd_->SetResp(resp_.lock()); + //value of this binlog might be strange if you print it out(eg. set bitkey_out1 «ѦFODoBinlog(); +} diff --git a/tools/pika_migrate/src/pika_cache.cc b/tools/pika_migrate/src/pika_cache.cc new file mode 100644 index 0000000000..b7d1f45eb1 --- /dev/null +++ b/tools/pika_migrate/src/pika_cache.cc @@ -0,0 +1,1628 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include +#include +#include + +#include "include/pika_cache.h" +#include "include/pika_cache_load_thread.h" +#include "include/pika_server.h" +#include "include/pika_slot_command.h" +#include "pstd/include/pika_codis_slot.h" +#include "cache/include/cache.h" +#include "cache/include/config.h" + +extern PikaServer* g_pika_server; +#define EXTEND_CACHE_SIZE(N) (N * 12 / 10) +using rocksdb::Status; + +PikaCache::PikaCache(int zset_cache_start_direction, int zset_cache_field_num_per_key) + : cache_status_(PIKA_CACHE_STATUS_NONE), + cache_num_(0), + zset_cache_start_direction_(zset_cache_start_direction), + zset_cache_field_num_per_key_(EXTEND_CACHE_SIZE(zset_cache_field_num_per_key)) { + cache_load_thread_ = std::make_unique (zset_cache_start_direction_, zset_cache_field_num_per_key_); + cache_load_thread_->StartThread(); +} + +PikaCache::~PikaCache() { + { + std::lock_guard l(rwlock_); + DestroyWithoutLock(); + } +} + +Status PikaCache::Init(uint32_t cache_num, cache::CacheConfig *cache_cfg) { + std::lock_guard l(rwlock_); + + if (nullptr == cache_cfg) { + return Status::Corruption("invalid arguments !!!"); + } + return InitWithoutLock(cache_num, cache_cfg); +} + +void PikaCache::ProcessCronTask(void) { + std::lock_guard l(rwlock_); + for (uint32_t i = 0; i < caches_.size(); ++i) { + std::unique_lock lm(*cache_mutexs_[i]); + caches_[i]->ActiveExpireCycle(); + } +} + +Status PikaCache::Reset(uint32_t cache_num, cache::CacheConfig *cache_cfg) { + std::lock_guard l(rwlock_); + + DestroyWithoutLock(); + return InitWithoutLock(cache_num, cache_cfg); +} + +void PikaCache::ResetConfig(cache::CacheConfig *cache_cfg) { + std::lock_guard l(rwlock_); + zset_cache_start_direction_ = cache_cfg->zset_cache_start_direction; + zset_cache_field_num_per_key_ = EXTEND_CACHE_SIZE(cache_cfg->zset_cache_field_num_per_key); + LOG(WARNING) << "zset-cache-start-direction: " << zset_cache_start_direction_ << ", zset_cache_field_num_per_key: " << zset_cache_field_num_per_key_; + cache::RedisCache::SetConfig(cache_cfg); +} + +void PikaCache::Destroy(void) { + std::lock_guard l(rwlock_); + DestroyWithoutLock(); +} + +void PikaCache::SetCacheStatus(int status) { cache_status_ = status; } + +int PikaCache::CacheStatus(void) { return cache_status_; } + +/*----------------------------------------------------------------------------- + * Normal Commands + *----------------------------------------------------------------------------*/ +void PikaCache::Info(CacheInfo &info) { + info.clear(); + std::unique_lock l(rwlock_); + info.status = cache_status_; + info.cache_num = cache_num_; + info.used_memory = cache::RedisCache::GetUsedMemory(); + info.async_load_keys_num = cache_load_thread_->AsyncLoadKeysNum(); + info.waitting_load_keys_num = cache_load_thread_->WaittingLoadKeysNum(); + cache::RedisCache::GetHitAndMissNum(&info.hits, &info.misses); + for (uint32_t i = 0; i < caches_.size(); ++i) { + std::lock_guard lm(*cache_mutexs_[i]); + info.keys_num += caches_[i]->DbSize(); + } +} + +bool PikaCache::Exists(std::string& key) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->Exists(key); +} + +void PikaCache::FlushCache(void) { + std::lock_guard l(rwlock_); + for (uint32_t i = 0; i < caches_.size(); ++i) { + std::lock_guard lm(*cache_mutexs_[i]); + caches_[i]->FlushCache(); + } +} + +Status PikaCache::Del(const std::vector &keys) { + rocksdb::Status s; + for (const auto &key : keys) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + s = caches_[cache_index]->Del(key); + } + return s; +} + +Status PikaCache::Expire(std::string& key, int64_t ttl) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->Expire(key, ttl); +} + +Status PikaCache::Expireat(std::string& key, int64_t ttl_sec) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->Expireat(key, ttl_sec); +} + +Status PikaCache::TTL(std::string& key, int64_t *ttl) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->TTL(key, ttl); +} + +int64_t PikaCache::TTL(std::string &key) { + int64_t ret = 0; + int64_t timestamp = 0; + + int cache_index = CacheIndex(key); + Status s = caches_[cache_index]->TTL(key, ×tamp); + if (s.ok() || s.IsNotFound()) { + ret = timestamp; + } else if (!s.IsNotFound()) { + ret = -3; + } + return ret; +} + +Status PikaCache::Persist(std::string &key) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->Persist(key); +} + +Status PikaCache::Type(std::string& key, std::string *value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->Type(key, value); +} + +Status PikaCache::RandomKey(std::string *key) { + Status s; + srand((unsigned)time(nullptr)); + int cache_index = rand() % caches_.size(); + for (unsigned int i = 0; i < caches_.size(); ++i) { + cache_index = (cache_index + i) % caches_.size(); + + std::lock_guard lm(*cache_mutexs_[cache_index]); + s = caches_[cache_index]->RandomKey(key); + if (s.ok()) { + break; + } + } + return s; +} + +Status PikaCache::GetType(const std::string& key, bool single, std::vector& types) { + types.clear(); + + Status s; + std::string value; + int cache_indexk = CacheIndex(key); + s = caches_[cache_indexk]->Get(key, &value); + if (s.ok()) { + types.emplace_back("string"); + } else if (!s.IsNotFound()) { + return s; + } + if (single && !types.empty()) { + return s; + } + + uint64_t hashes_len = 0; + int cache_indexh = CacheIndex(key); + s = caches_[cache_indexh]->HLen(key, &hashes_len); + if (s.ok() && hashes_len != 0) { + types.emplace_back("hash"); + } else if (!s.IsNotFound()) { + return s; + } + if (single && !types.empty()) { + return s; + } + + uint64_t lists_len = 0; + int cache_indexl = CacheIndex(key); + s = caches_[cache_indexl]->LLen(key, &lists_len); + if (s.ok() && lists_len != 0) { + types.emplace_back("list"); + } else if (!s.IsNotFound()) { + return s; + } + if (single && !types.empty()) { + return s; + } + + uint64_t zsets_size = 0; + int cache_indexz = CacheIndex(key); + s = caches_[cache_indexz]->ZCard(key, &zsets_size); + if (s.ok() && zsets_size != 0) { + types.emplace_back("zset"); + } else if (!s.IsNotFound()) { + return s; + } + if (single && !types.empty()) { + return s; + } + + uint64_t sets_size = 0; + int cache_indexs = CacheIndex(key); + s = caches_[cache_indexs]->SCard(key, &sets_size); + if (s.ok() && sets_size != 0) { + types.emplace_back("set"); + } else if (!s.IsNotFound()) { + return s; + } + if (single && types.empty()) { + types.emplace_back("none"); + } + return Status::OK(); +} + +/*----------------------------------------------------------------------------- + * String Commands + *----------------------------------------------------------------------------*/ +Status PikaCache::Set(std::string& key, std::string &value, int64_t ttl) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->Set(key, value, ttl); +} + +Status PikaCache::Setnx(std::string& key, std::string &value, int64_t ttl) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->Setnx(key, value, ttl); +} + +Status PikaCache::SetnxWithoutTTL(std::string& key, std::string &value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->SetnxWithoutTTL(key, value); +} + +Status PikaCache::Setxx(std::string& key, std::string &value, int64_t ttl) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->Setxx(key, value, ttl); +} + +Status PikaCache::SetxxWithoutTTL(std::string& key, std::string &value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->SetxxWithoutTTL(key, value); +} + +Status PikaCache::Get(std::string& key, std::string *value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->Get(key, value); +} + +Status PikaCache::MSet(const std::vector &kvs) { + for (const auto &item : kvs) { + auto [key, value] = item; + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->SetxxWithoutTTL(key, value); + } + return Status::OK(); +} + +Status PikaCache::MGet(const std::vector &keys, std::vector *vss) { + vss->resize(keys.size()); + rocksdb::Status ret; + for (int i = 0; i < keys.size(); ++i) { + int cache_index = CacheIndex(keys[i]); + std::lock_guard lm(*cache_mutexs_[cache_index]); + auto s = caches_[cache_index]->Get(keys[i], &(*vss)[i].value); + (*vss)[i].status = s; + if (!s.ok()) { + ret = s; + } + } + return ret; +} + +Status PikaCache::Incrxx(std::string& key) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->Incr(key); + } + return Status::NotFound("key not exist"); +} + +Status PikaCache::Decrxx(std::string& key) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->Decr(key); + } + return Status::NotFound("key not exist"); +} + +Status PikaCache::IncrByxx(std::string& key, uint64_t incr) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->IncrBy(key, incr); + } + return Status::NotFound("key not exist"); +} + +Status PikaCache::DecrByxx(std::string& key, uint64_t incr) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->DecrBy(key, incr); + } + return Status::NotFound("key not exist"); +} + +Status PikaCache::Incrbyfloatxx(std::string& key, long double incr) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->Incrbyfloat(key, incr); + } + return Status::NotFound("key not exist"); +} + +Status PikaCache::Appendxx(std::string& key, std::string &value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->Append(key, value); + } + return Status::NotFound("key not exist"); +} + +Status PikaCache::GetRange(std::string& key, int64_t start, int64_t end, std::string *value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->GetRange(key, start, end, value); +} + +Status PikaCache::SetRangexx(std::string& key, int64_t start, std::string &value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->SetRange(key, start, value); + } + return Status::NotFound("key not exist"); +} + +Status PikaCache::Strlen(std::string& key, int32_t *len) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->Strlen(key, len); +} + +/*----------------------------------------------------------------------------- + * Hash Commands + *----------------------------------------------------------------------------*/ +Status PikaCache::HDel(std::string& key, std::vector &fields) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->HDel(key, fields); +} + +Status PikaCache::HSet(std::string& key, std::string &field, std::string &value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->HSet(key, field, value); +} + +Status PikaCache::HSetIfKeyExist(std::string& key, std::string &field, std::string &value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->HSet(key, field, value); + } + return Status::NotFound("key not exist"); +} + +Status PikaCache::HSetIfKeyExistAndFieldNotExist(std::string& key, std::string &field, std::string &value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->HSetnx(key, field, value); + } + return Status::NotFound("key not exist"); +} + +Status PikaCache::HMSet(std::string& key, std::vector &fvs) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->HMSet(key, fvs); +} + +Status PikaCache::HMSetnx(std::string& key, std::vector &fvs, int64_t ttl) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (!caches_[cache_index]->Exists(key)) { + caches_[cache_index]->HMSet(key, fvs); + caches_[cache_index]->Expire(key, ttl); + return Status::OK(); + } else { + return Status::NotFound("key exist"); + } +} + +Status PikaCache::HMSetnxWithoutTTL(std::string& key, std::vector &fvs) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (!caches_[cache_index]->Exists(key)) { + caches_[cache_index]->HMSet(key, fvs); + return Status::OK(); + } else { + return Status::NotFound("key exist"); + } +} + +Status PikaCache::HMSetxx(std::string& key, std::vector &fvs) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->HMSet(key, fvs); + } else { + return Status::NotFound("key not exist"); + } +} + +Status PikaCache::HGet(std::string& key, std::string &field, std::string *value) { + + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->HGet(key, field, value); +} + +Status PikaCache::HMGet(std::string& key, std::vector &fields, std::vector *vss) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->HMGet(key, fields, vss); +} + +Status PikaCache::HGetall(std::string& key, std::vector *fvs) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->HGetall(key, fvs); +} + +Status PikaCache::HKeys(std::string& key, std::vector *fields) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->HKeys(key, fields); +} + +Status PikaCache::HVals(std::string& key, std::vector *values) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->HVals(key, values); +} + +Status PikaCache::HExists(std::string& key, std::string &field) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->HExists(key, field); +} + +Status PikaCache::HIncrbyxx(std::string& key, std::string &field, int64_t value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->HIncrby(key, field, value); + } + return Status::NotFound("key not exist"); +} + +Status PikaCache::HIncrbyfloatxx(std::string& key, std::string &field, long double value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->HIncrbyfloat(key, field, value); + } + return Status::NotFound("key not exist"); +} + +Status PikaCache::HLen(std::string& key, uint64_t *len) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->HLen(key, len); +} + +Status PikaCache::HStrlen(std::string& key, std::string &field, uint64_t *len) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->HStrlen(key, field, len); +} + +/*----------------------------------------------------------------------------- + * List Commands + *----------------------------------------------------------------------------*/ +Status PikaCache::LIndex(std::string& key, int64_t index, std::string *element) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->LIndex(key, index, element); +} + +Status PikaCache::LInsert(std::string& key, storage::BeforeOrAfter &before_or_after, std::string &pivot, + std::string &value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->LInsert(key, before_or_after, pivot, value); +} + +Status PikaCache::LLen(std::string& key, uint64_t *len) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->LLen(key, len); +} + +Status PikaCache::LPop(std::string& key, std::string *element) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->LPop(key, element); +} + +Status PikaCache::LPush(std::string& key, std::vector &values) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->LPush(key, values); +} + +Status PikaCache::LPushx(std::string& key, std::vector &values) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->LPushx(key, values); +} + +Status PikaCache::LRange(std::string& key, int64_t start, int64_t stop, std::vector *values) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->LRange(key, start, stop, values); +} + +Status PikaCache::LRem(std::string& key, int64_t count, std::string &value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->LRem(key, count, value); +} + +Status PikaCache::LSet(std::string& key, int64_t index, std::string &value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->LSet(key, index, value); +} + +Status PikaCache::LTrim(std::string& key, int64_t start, int64_t stop) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->LTrim(key, start, stop); +} + +Status PikaCache::RPop(std::string& key, std::string *element) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->RPop(key, element); +} + +Status PikaCache::RPush(std::string& key, std::vector &values) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->RPush(key, values); +} + +Status PikaCache::RPushx(std::string& key, std::vector &values) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->RPushx(key, values); +} + +Status PikaCache::RPushnx(std::string& key, std::vector &values, int64_t ttl) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (!caches_[cache_index]->Exists(key)) { + caches_[cache_index]->RPush(key, values); + caches_[cache_index]->Expire(key, ttl); + return Status::OK(); + } else { + return Status::NotFound("key exist"); + } +} + +Status PikaCache::RPushnxWithoutTTL(std::string& key, std::vector &values) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (!caches_[cache_index]->Exists(key)) { + caches_[cache_index]->RPush(key, values); + return Status::OK(); + } else { + return Status::NotFound("key exist"); + } +} + +/*----------------------------------------------------------------------------- + * Set Commands + *----------------------------------------------------------------------------*/ +Status PikaCache::SAdd(std::string& key, std::vector &members) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->SAdd(key, members); +} + +Status PikaCache::SAddIfKeyExist(std::string& key, std::vector &members) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->SAdd(key, members); + } + return Status::NotFound("key not exist"); +} + +Status PikaCache::SAddnx(std::string& key, std::vector &members, int64_t ttl) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (!caches_[cache_index]->Exists(key)) { + caches_[cache_index]->SAdd(key, members); + caches_[cache_index]->Expire(key, ttl); + return Status::OK(); + } else { + return Status::NotFound("key exist"); + } +} + +Status PikaCache::SAddnxWithoutTTL(std::string& key, std::vector &members) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (!caches_[cache_index]->Exists(key)) { + caches_[cache_index]->SAdd(key, members); + return Status::OK(); + } else { + return Status::NotFound("key exist"); + } +} + +Status PikaCache::SCard(std::string& key, uint64_t *len) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->SCard(key, len); +} + +Status PikaCache::SIsmember(std::string& key, std::string& member) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->SIsmember(key, member); +} + +Status PikaCache::SMembers(std::string& key, std::vector *members) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->SMembers(key, members); +} + +Status PikaCache::SRem(std::string& key, std::vector &members) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->SRem(key, members); +} + +Status PikaCache::SRandmember(std::string& key, int64_t count, std::vector *members) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->SRandmember(key, count, members); +} + +/*----------------------------------------------------------------------------- + * ZSet Commands + *----------------------------------------------------------------------------*/ +Status PikaCache::ZAdd(std::string& key, std::vector &score_members) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->ZAdd(key, score_members); +} + +void PikaCache::GetMinMaxScore(std::vector &score_members, double &min, double &max) { + if (score_members.empty()) { + return; + } + min = max = score_members.front().score; + for (auto &item : score_members) { + if (item.score < min) { + min = item.score; + } + if (item.score > max) { + max = item.score; + } + } +} + +bool PikaCache::GetCacheMinMaxSM(cache::RedisCache *cache_obj, std::string& key, storage::ScoreMember &min_m, + storage::ScoreMember &max_m) { + if (cache_obj) { + std::vector score_members; + auto s = cache_obj->ZRange(key, 0, 0, &score_members); + if (!s.ok() || score_members.empty()) { + return false; + } + min_m = score_members.front(); + score_members.clear(); + + s = cache_obj->ZRange(key, -1, -1, &score_members); + if (!s.ok() || score_members.empty()) { + return false; + } + max_m = score_members.front(); + return true; + } + return false; +} + +Status PikaCache::ZAddIfKeyExist(std::string& key, std::vector &score_members) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + auto cache_obj = caches_[cache_index]; + Status s; + if (cache_obj->Exists(key)) { + std::unordered_set unique; + std::list filtered_score_members; + for (auto it = score_members.rbegin(); it != score_members.rend(); ++it) { + if (unique.find(it->member) == unique.end()) { + unique.insert(it->member); + filtered_score_members.push_front(*it); + } + } + std::vector new_score_members; + for (auto &item : filtered_score_members) { + new_score_members.push_back(std::move(item)); + } + + double min_score = storage::ZSET_SCORE_MIN; + double max_score = storage::ZSET_SCORE_MAX; + GetMinMaxScore(new_score_members, min_score, max_score); + + storage::ScoreMember cache_min_sm; + storage::ScoreMember cache_max_sm; + if (!GetCacheMinMaxSM(cache_obj, key, cache_min_sm, cache_max_sm)) { + return Status::NotFound("key not exist"); + } + auto cache_min_score = cache_min_sm.score; + auto cache_max_score = cache_max_sm.score; + if (zset_cache_start_direction_ == cache::CACHE_START_FROM_BEGIN) { + if (max_score < cache_max_score) { + cache_obj->ZAdd(key, new_score_members); + } else { + std::vector score_members_can_add; + std::vector members_need_remove; + bool left_close = false; + for (auto &item : new_score_members) { + if (item.score == cache_max_score) { + left_close = true; + score_members_can_add.push_back(item); + continue; + } + if (item.score < cache_max_score) { + score_members_can_add.push_back(item); + } else { + members_need_remove.push_back(item.member); + } + } + if (!score_members_can_add.empty()) { + cache_obj->ZAdd(key, score_members_can_add); + std::string cache_max_score_str = left_close ? "" : "(" + std::to_string(cache_max_score); + std::string max_str = "+inf"; + cache_obj->ZRemrangebyscore(key, cache_max_score_str, max_str); + } + if (!members_need_remove.empty()) { + cache_obj->ZRem(key, members_need_remove); + } + } + } else if (zset_cache_start_direction_ == cache::CACHE_START_FROM_END) { + if (min_score > cache_min_score) { + cache_obj->ZAdd(key, new_score_members); + } else { + std::vector score_members_can_add; + std::vector members_need_remove; + bool right_close = false; + for (auto &item : new_score_members) { + if (item.score == cache_min_score) { + right_close = true; + score_members_can_add.push_back(item); + continue; + } + if (item.score > cache_min_score) { + score_members_can_add.push_back(item); + } else { + members_need_remove.push_back(item.member); + } + } + if (!score_members_can_add.empty()) { + cache_obj->ZAdd(key, score_members_can_add); + std::string cache_min_score_str = right_close ? "" : "(" + std::to_string(cache_min_score); + std::string min_str = "-inf"; + cache_obj->ZRemrangebyscore(key, min_str, cache_min_score_str); + } + if (!members_need_remove.empty()) { + cache_obj->ZRem(key, members_need_remove); + } + } + } + + return CleanCacheKeyIfNeeded(cache_obj, key); + } else { + return Status::NotFound("key not exist"); + } +} + +Status PikaCache::CleanCacheKeyIfNeeded(cache::RedisCache *cache_obj, std::string& key) { + uint64_t cache_len = 0; + cache_obj->ZCard(key, &cache_len); + if (cache_len > (unsigned long)zset_cache_field_num_per_key_) { + long start = 0; + long stop = 0; + if (zset_cache_start_direction_ == cache::CACHE_START_FROM_BEGIN) { + start = -cache_len + zset_cache_field_num_per_key_; + stop = -1; + } else if (zset_cache_start_direction_ == cache::CACHE_START_FROM_END) { + start = 0; + stop = cache_len - zset_cache_field_num_per_key_ - 1; + } + auto min = std::to_string(start); + auto max = std::to_string(stop); + cache_obj->ZRemrangebyrank(key, min, max); + } + return Status::OK(); +} + +Status PikaCache::ZAddnx(std::string& key, std::vector &score_members, int64_t ttl) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (!caches_[cache_index]->Exists(key)) { + caches_[cache_index]->ZAdd(key, score_members); + caches_[cache_index]->Expire(key, ttl); + return Status::OK(); + } else { + return Status::NotFound("key exist"); + } +} + +Status PikaCache::ZAddnxWithoutTTL(std::string& key, std::vector &score_members) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (!caches_[cache_index]->Exists(key)) { + caches_[cache_index]->ZAdd(key, score_members); + return Status::OK(); + } else { + return Status::NotFound("key exist"); + } +} + +Status PikaCache::ZCard(std::string& key, uint32_t *len, const std::shared_ptr& db) { + int32_t db_len = 0; + db->storage()->ZCard(key, &db_len); + *len = db_len; + return Status::OK(); +} + +Status PikaCache::CacheZCard(std::string& key, uint64_t *len) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + + return caches_[cache_index]->ZCard(key, len); +} + +RangeStatus PikaCache::CheckCacheRangeByScore(uint64_t cache_len, double cache_min, double cache_max, double min, + double max, bool left_close, bool right_close) { + bool cache_full = (cache_len == (unsigned long)zset_cache_field_num_per_key_); + + if (cache_full) { + if (zset_cache_start_direction_ == cache::CACHE_START_FROM_BEGIN) { + bool ret = (max < cache_max); + if (ret) { + if (max < cache_min) { + return RangeStatus::RangeError; + } else { + return RangeStatus::RangeHit; + } + } else { + return RangeStatus::RangeMiss; + } + } else if (zset_cache_start_direction_ == cache::CACHE_START_FROM_END) { + bool ret = min > cache_min; + if (ret) { + if (min > cache_max) { + return RangeStatus::RangeError; + } else { + return RangeStatus::RangeHit; + } + } else { + return RangeStatus::RangeMiss; + } + } else { + return RangeStatus::RangeError; + } + } else { + if (zset_cache_start_direction_ == cache::CACHE_START_FROM_BEGIN) { + bool ret = right_close ? max < cache_max : max <= cache_max; + if (ret) { + if (max < cache_min) { + return RangeStatus::RangeError; + } else { + return RangeStatus::RangeHit; + } + } else { + return RangeStatus::RangeMiss; + } + } else if (zset_cache_start_direction_ == cache::CACHE_START_FROM_END) { + bool ret = left_close ? min > cache_min : min >= cache_min; + if (ret) { + if (min > cache_max) { + return RangeStatus::RangeError; + } else { + return RangeStatus::RangeHit; + } + } else { + return RangeStatus::RangeMiss; + } + } else { + return RangeStatus::RangeError; + } + } +} + +Status PikaCache::ZCount(std::string& key, std::string &min, std::string &max, uint64_t *len, ZCountCmd *cmd) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + auto cache_obj = caches_[cache_index]; + uint64_t cache_len = 0; + cache_obj->ZCard(key, &cache_len); + if (cache_len <= 0) { + return Status::NotFound("key not in cache"); + } else { + storage::ScoreMember cache_min_sm; + storage::ScoreMember cache_max_sm; + if (!GetCacheMinMaxSM(cache_obj, key, cache_min_sm, cache_max_sm)) { + return Status::NotFound("key not exist"); + } + auto cache_min_score = cache_min_sm.score; + auto cache_max_score = cache_max_sm.score; + + if (RangeStatus::RangeHit == CheckCacheRangeByScore(cache_len, cache_min_score, cache_max_score, cmd->MinScore(), + cmd->MaxScore(), cmd->LeftClose(), cmd->RightClose())) { + auto s = cache_obj->ZCount(key, min, max, len); + return s; + } else { + return Status::NotFound("key not in cache"); + } + } +} + +Status PikaCache::ZIncrby(std::string& key, std::string& member, double increment) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->ZIncrby(key, member, increment); +} + +bool PikaCache::ReloadCacheKeyIfNeeded(cache::RedisCache *cache_obj, std::string& key, int mem_len, int db_len, + const std::shared_ptr& db) { + if (mem_len == -1) { + uint64_t cache_len = 0; + cache_obj->ZCard(key, &cache_len); + mem_len = cache_len; + } + if (db_len == -1) { + db_len = 0; + db->storage()->ZCard(key, &db_len); + if (!db_len) { + return false; + } + } + if (db_len < zset_cache_field_num_per_key_) { + if (mem_len * 2 < db_len) { + cache_obj->Del(key); + PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_ZSET, key, db); + return true; + } else { + return false; + } + } else { + if (zset_cache_field_num_per_key_ && mem_len * 2 < zset_cache_field_num_per_key_) { + cache_obj->Del(key); + PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_ZSET, key, db); + return true; + } else { + return false; + } + } +} + +Status PikaCache::ZIncrbyIfKeyExist(std::string& key, std::string& member, double increment, ZIncrbyCmd *cmd, const std::shared_ptr& db) { + auto eps = std::numeric_limits::epsilon(); + if (-eps < increment && increment < eps) { + return Status::NotFound("icrement is 0, nothing to be done"); + } + if (!cmd->res().ok()) { + return Status::NotFound("key not exist"); + } + std::lock_guard l(rwlock_); + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + auto cache_obj = caches_[cache_index]; + uint64_t cache_len = 0; + cache_obj->ZCard(key, &cache_len); + + storage::ScoreMember cache_min_sm; + storage::ScoreMember cache_max_sm; + if (!GetCacheMinMaxSM(cache_obj, key, cache_min_sm, cache_max_sm)) { + return Status::NotFound("key not exist"); + } + auto cache_min_score = cache_min_sm.score; + auto cache_max_score = cache_max_sm.score; + auto RemCacheRangebyscoreAndCheck = [this, cache_obj, &key, cache_len, db](double score) { + auto score_rm = std::to_string(score); + auto s = cache_obj->ZRemrangebyscore(key, score_rm, score_rm); + ReloadCacheKeyIfNeeded(cache_obj, key, cache_len, -1, db); + return s; + }; + auto RemCacheKeyMember = [this, cache_obj, &key, cache_len, db](const std::string& member, bool check = true) { + std::vector member_rm = {member}; + auto s = cache_obj->ZRem(key, member_rm); + if (check) { + ReloadCacheKeyIfNeeded(cache_obj, key, cache_len, -1, db); + } + return s; + }; + + if (zset_cache_start_direction_ == cache::CACHE_START_FROM_BEGIN) { + if (cmd->Score() > cache_max_score) { + return RemCacheKeyMember(member); + } else if (cmd->Score() == cache_max_score) { + RemCacheKeyMember(member, false); + return RemCacheRangebyscoreAndCheck(cache_max_score); + } else { + std::vector score_member = {{cmd->Score(), member}}; + auto s = cache_obj->ZAdd(key, score_member); + CleanCacheKeyIfNeeded(cache_obj, key); + return s; + } + } else if (zset_cache_start_direction_ == cache::CACHE_START_FROM_END) { + if (cmd->Score() > cache_min_score) { + std::vector score_member = {{cmd->Score(), member}}; + auto s = cache_obj->ZAdd(key, score_member); + CleanCacheKeyIfNeeded(cache_obj, key); + return s; + } else if (cmd->Score() == cache_min_score) { + RemCacheKeyMember(member, false); + return RemCacheRangebyscoreAndCheck(cache_min_score); + } else { + std::vector member_rm = {member}; + return RemCacheKeyMember(member); + } + } + + return Status::NotFound("key not exist"); +} + +RangeStatus PikaCache::CheckCacheRange(int32_t cache_len, int32_t db_len, int64_t start, int64_t stop, int64_t &out_start, + int64_t &out_stop) { + out_start = start >= 0 ? start : db_len + start; + out_stop = stop >= 0 ? stop : db_len + stop; + out_start = out_start <= 0 ? 0 : out_start; + out_stop = out_stop >= db_len ? db_len - 1 : out_stop; + if (out_start > out_stop || out_start >= db_len || out_stop < 0) { + return RangeStatus::RangeError; + } else { + if (zset_cache_start_direction_ == cache::CACHE_START_FROM_BEGIN) { + if (out_start < cache_len && out_stop < cache_len) { + return RangeStatus::RangeHit; + } else { + return RangeStatus::RangeMiss; + } + } else if (zset_cache_start_direction_ == cache::CACHE_START_FROM_END) { + if (out_start >= db_len - cache_len && out_stop >= db_len - cache_len) { + out_start = out_start - (db_len - cache_len); + out_stop = out_stop - (db_len - cache_len); + return RangeStatus::RangeHit; + } else { + return RangeStatus::RangeMiss; + } + } else { + return RangeStatus::RangeError; + } + } +} + +RangeStatus PikaCache::CheckCacheRevRange(int32_t cache_len, int32_t db_len, int64_t start, int64_t stop, int64_t &out_start, + int64_t &out_stop) { + int64_t start_index = stop >= 0 ? db_len - stop - 1 : -stop - 1; + int64_t stop_index = start >= 0 ? db_len - start - 1 : -start - 1; + start_index = start_index <= 0 ? 0 : start_index; + stop_index = stop_index >= db_len ? db_len - 1 : stop_index; + if (start_index > stop_index || start_index >= db_len || stop_index < 0) { + return RangeStatus::RangeError; + } else { + if (zset_cache_start_direction_ == cache::CACHE_START_FROM_BEGIN) { + if (start_index < cache_len && stop_index < cache_len) { + // cache reverse index + out_start = cache_len - stop_index - 1; + out_stop = cache_len - start_index - 1; + + return RangeStatus::RangeHit; + } else { + return RangeStatus::RangeMiss; + } + } else if (zset_cache_start_direction_ == cache::CACHE_START_FROM_END) { + if (start_index >= db_len - cache_len && stop_index >= db_len - cache_len) { + int cache_start = start_index - (db_len - cache_len); + int cache_stop = stop_index - (db_len - cache_len); + out_start = cache_len - cache_stop - 1; + out_stop = cache_len - cache_start - 1; + return RangeStatus::RangeHit; + } else { + return RangeStatus::RangeMiss; + } + } else { + return RangeStatus::RangeError; + } + } +} + +Status PikaCache::ZRange(std::string& key, int64_t start, int64_t stop, std::vector *score_members, + const std::shared_ptr& db) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + + auto cache_obj = caches_[cache_index]; + auto db_obj = db->storage(); + Status s; + if (cache_obj->Exists(key)) { + uint64_t cache_len = 0; + cache_obj->ZCard(key, &cache_len); + int32_t db_len = 0; + db_obj->ZCard(key, &db_len); + int64_t out_start = 0; + int64_t out_stop = 0; + RangeStatus rs = CheckCacheRange(cache_len, db_len, start, stop, out_start, out_stop); + if (rs == RangeStatus::RangeHit) { + return cache_obj->ZRange(key, out_start, out_stop, score_members); + } else if (rs == RangeStatus::RangeMiss) { + ReloadCacheKeyIfNeeded(cache_obj, key, cache_len, db_len, db); + return Status::NotFound("key not in cache"); + } else if (rs == RangeStatus::RangeError) { + return Status::NotFound("error range"); + } else { + return Status::Corruption("unknown error"); + } + } else { + return Status::NotFound("key not in cache"); + } +} + +Status PikaCache::ZRangebyscore(std::string& key, std::string &min, std::string &max, + std::vector *score_members, ZRangebyscoreCmd *cmd) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + + auto cache_obj = caches_[cache_index]; + uint64_t cache_len = 0; + cache_obj->ZCard(key, &cache_len); + if (cache_len <= 0) { + return Status::NotFound("key not in cache"); + } else { + storage::ScoreMember cache_min_sm; + storage::ScoreMember cache_max_sm; + if (!GetCacheMinMaxSM(cache_obj, key, cache_min_sm, cache_max_sm)) { + return Status::NotFound("key not exist"); + } + + if (RangeStatus::RangeHit == CheckCacheRangeByScore(cache_len, cache_min_sm.score, cache_max_sm.score, + cmd->MinScore(), cmd->MaxScore(), cmd->LeftClose(), + cmd->RightClose())) { + return cache_obj->ZRangebyscore(key, min, max, score_members, cmd->Offset(), cmd->Count()); + } else { + return Status::NotFound("key not in cache"); + } + } +} + +Status PikaCache::ZRank(std::string& key, std::string& member, int64_t *rank, const std::shared_ptr& db) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + + auto cache_obj = caches_[cache_index]; + uint64_t cache_len = 0; + cache_obj->ZCard(key, &cache_len); + if (cache_len <= 0) { + return Status::NotFound("key not in cache"); + } else { + auto s = cache_obj->ZRank(key, member, rank); + if (s.ok()) { + if (zset_cache_start_direction_ == cache::CACHE_START_FROM_END) { + int32_t db_len = 0; + db->storage()->ZCard(key, &db_len); + *rank = db_len - cache_len + *rank; + } + return s; + } else { + return Status::NotFound("key not in cache"); + } + } +} + +Status PikaCache::ZRem(std::string& key, std::vector &members, std::shared_ptr db) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + + auto s = caches_[cache_index]->ZRem(key, members); + ReloadCacheKeyIfNeeded(caches_[cache_index], key, -1, -1, db); + return s; +} + +Status PikaCache::ZRemrangebyrank(std::string& key, std::string &min, std::string &max, int32_t ele_deleted, + const std::shared_ptr& db) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + auto cache_obj = caches_[cache_index]; + uint64_t cache_len = 0; + cache_obj->ZCard(key, &cache_len); + if (cache_len <= 0) { + return Status::NotFound("key not in cache"); + } else { + auto db_obj = db->storage(); + int32_t db_len = 0; + db_obj->ZCard(key, &db_len); + db_len += ele_deleted; + auto start = std::stol(min); + auto stop = std::stol(max); + + int32_t start_index = start >= 0 ? start : db_len + start; + int32_t stop_index = stop >= 0 ? stop : db_len + stop; + start_index = start_index <= 0 ? 0 : start_index; + stop_index = stop_index >= db_len ? db_len - 1 : stop_index; + if (start_index > stop_index) { + return Status::NotFound("error range"); + } + + if (zset_cache_start_direction_ == cache::CACHE_START_FROM_BEGIN) { + if ((uint32_t)start_index <= cache_len) { + auto cache_min_str = std::to_string(start_index); + auto cache_max_str = std::to_string(stop_index); + auto s = cache_obj->ZRemrangebyrank(key, cache_min_str, cache_max_str); + ReloadCacheKeyIfNeeded(cache_obj, key, cache_len, db_len - ele_deleted, db); + return s; + } else { + return Status::NotFound("error range"); + } + } else if (zset_cache_start_direction_ == cache::CACHE_START_FROM_END) { + if ((uint32_t)stop_index >= db_len - cache_len) { + int32_t cache_min = start_index - (db_len - cache_len); + int32_t cache_max = stop_index - (db_len - cache_len); + cache_min = cache_min <= 0 ? 0 : cache_min; + cache_max = cache_max >= (int32_t)cache_len ? cache_len - 1 : cache_max; + + auto cache_min_str = std::to_string(cache_min); + auto cache_max_str = std::to_string(cache_max); + auto s = cache_obj->ZRemrangebyrank(key, cache_min_str, cache_max_str); + + ReloadCacheKeyIfNeeded(cache_obj, key, cache_len, db_len - ele_deleted, db); + return s; + } else { + return Status::NotFound("error range"); + } + } else { + return Status::NotFound("error range"); + } + } +} + +Status PikaCache::ZRemrangebyscore(std::string& key, std::string &min, std::string &max, + const std::shared_ptr& db) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + auto s = caches_[cache_index]->ZRemrangebyscore(key, min, max); + ReloadCacheKeyIfNeeded(caches_[cache_index], key, -1, -1, db); + return s; +} + +Status PikaCache::ZRevrange(std::string& key, int64_t start, int64_t stop, std::vector *score_members, + const std::shared_ptr& db) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + + auto cache_obj = caches_[cache_index]; + auto db_obj = db->storage(); + Status s; + if (cache_obj->Exists(key)) { + uint64_t cache_len = 0; + cache_obj->ZCard(key, &cache_len); + int32_t db_len = 0; + db_obj->ZCard(key, &db_len); + int64_t out_start = 0; + int64_t out_stop = 0; + RangeStatus rs = CheckCacheRevRange(cache_len, db_len, start, stop, out_start, out_stop); + if (rs == RangeStatus::RangeHit) { + return cache_obj->ZRevrange(key, out_start, out_stop, score_members); + } else if (rs == RangeStatus::RangeMiss) { + ReloadCacheKeyIfNeeded(cache_obj, key, cache_len, db_len, db); + return Status::NotFound("key not in cache"); + } else if (rs == RangeStatus::RangeError) { + return Status::NotFound("error revrange"); + } else { + return Status::Corruption("unknown error"); + } + } else { + return Status::NotFound("key not in cache"); + } +} + +Status PikaCache::ZRevrangebyscore(std::string& key, std::string &min, std::string &max, + std::vector *score_members, ZRevrangebyscoreCmd *cmd, + const std::shared_ptr& db) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + + auto cache_obj = caches_[cache_index]; + uint64_t cache_len = 0; + cache_obj->ZCard(key, &cache_len); + if (cache_len <= 0) { + return Status::NotFound("key not in cache"); + } else { + storage::ScoreMember cache_min_sm; + storage::ScoreMember cache_max_sm; + if (!GetCacheMinMaxSM(cache_obj, key, cache_min_sm, cache_max_sm)) { + return Status::NotFound("key not exist"); + } + auto cache_min_score = cache_min_sm.score; + auto cache_max_score = cache_max_sm.score; + + auto rs = CheckCacheRangeByScore(cache_len, cache_min_score, cache_max_score, cmd->MinScore(), cmd->MaxScore(), + cmd->LeftClose(), cmd->RightClose()); + if (RangeStatus::RangeHit == rs) { + return cache_obj->ZRevrangebyscore(key, min, max, score_members, cmd->Offset(), cmd->Count()); + } else if (RangeStatus::RangeMiss == rs) { + ReloadCacheKeyIfNeeded(cache_obj, key, cache_len, -1, db); + return Status::NotFound("score range miss"); + } else { + return Status::NotFound("score range error"); + } + } +} + +bool PikaCache::CacheSizeEqsDB(std::string& key, const std::shared_ptr& db) { + int32_t db_len = 0; + db->storage()->ZCard(key, &db_len); + + std::lock_guard l(rwlock_); + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + uint64_t cache_len = 0; + caches_[cache_index]->ZCard(key, &cache_len); + return (db_len == (int32_t)cache_len) && cache_len; +} + +Status PikaCache::ZRevrangebylex(std::string& key, std::string &min, std::string &max, + std::vector *members, const std::shared_ptr& db) { + if (CacheSizeEqsDB(key, db)) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->ZRevrangebylex(key, min, max, members); + } else { + return Status::NotFound("key not in cache"); + } +} + +Status PikaCache::ZRevrank(std::string& key, std::string& member, int64_t *rank, const std::shared_ptr& db) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + auto cache_obj = caches_[cache_index]; + uint64_t cache_len = 0; + cache_obj->ZCard(key, &cache_len); + if (cache_len <= 0) { + return Status::NotFound("key not in cache"); + } else { + auto s = cache_obj->ZRevrank(key, member, rank); + if (s.ok()) { + if (zset_cache_start_direction_ == cache::CACHE_START_FROM_BEGIN) { + int32_t db_len = 0; + db->storage()->ZCard(key, &db_len); + *rank = db_len - cache_len + *rank; + } + return s; + } else { + return Status::NotFound("member not in cache"); + } + } +} +Status PikaCache::ZScore(std::string& key, std::string& member, double *score, const std::shared_ptr& db) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + auto s = caches_[cache_index]->ZScore(key, member, score); + if (!s.ok()) { + return Status::NotFound("key or member not in cache"); + } + return s; +} + +Status PikaCache::ZRangebylex(std::string& key, std::string &min, std::string &max, std::vector *members, + const std::shared_ptr& db) { + if (CacheSizeEqsDB(key, db)) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->ZRangebylex(key, min, max, members); + } else { + return Status::NotFound("key not in cache"); + } +} + +Status PikaCache::ZLexcount(std::string& key, std::string &min, std::string &max, uint64_t *len, + const std::shared_ptr& db) { + if (CacheSizeEqsDB(key, db)) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + + return caches_[cache_index]->ZLexcount(key, min, max, len); + } else { + return Status::NotFound("key not in cache"); + } +} + +Status PikaCache::ZRemrangebylex(std::string& key, std::string &min, std::string &max, + const std::shared_ptr& db) { + if (CacheSizeEqsDB(key, db)) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + + return caches_[cache_index]->ZRemrangebylex(key, min, max); + } else { + return Status::NotFound("key not in cache"); + } +} + +/*----------------------------------------------------------------------------- + * Bit Commands + *----------------------------------------------------------------------------*/ +Status PikaCache::SetBit(std::string& key, size_t offset, int64_t value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->SetBit(key, offset, value); +} + +Status PikaCache::SetBitIfKeyExist(std::string& key, size_t offset, int64_t value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->SetBit(key, offset, value); + } + return Status::NotFound("key not exist"); +} + +Status PikaCache::GetBit(std::string& key, size_t offset, int64_t *value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->GetBit(key, offset, value); +} + +Status PikaCache::BitCount(std::string& key, int64_t start, int64_t end, int64_t *value, bool have_offset) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->BitCount(key, start, end, value, have_offset); +} + +Status PikaCache::BitPos(std::string& key, int64_t bit, int64_t *value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->BitPos(key, bit, value); +} + +Status PikaCache::BitPos(std::string& key, int64_t bit, int64_t start, int64_t *value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->BitPos(key, bit, start, value); +} + +Status PikaCache::BitPos(std::string& key, int64_t bit, int64_t start, int64_t end, int64_t *value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->BitPos(key, bit, start, end, value); +} + +Status PikaCache::InitWithoutLock(uint32_t cache_num, cache::CacheConfig *cache_cfg) { + cache_status_ = PIKA_CACHE_STATUS_INIT; + + cache_num_ = cache_num; + if (cache_cfg != nullptr) { + cache::RedisCache::SetConfig(cache_cfg); + } + + for (uint32_t i = 0; i < cache_num; ++i) { + auto *cache = new cache::RedisCache(); + rocksdb::Status s = cache->Open(); + if (!s.ok()) { + LOG(ERROR) << "PikaCache::InitWithoutLock Open cache failed"; + DestroyWithoutLock(); + cache_status_ = PIKA_CACHE_STATUS_NONE; + return Status::Corruption("create redis cache failed"); + } + caches_.push_back(cache); + cache_mutexs_.push_back(std::make_shared()); + } + cache_status_ = PIKA_CACHE_STATUS_OK; + return Status::OK(); +} + +void PikaCache::DestroyWithoutLock(void) +{ + cache_status_ = PIKA_CACHE_STATUS_DESTROY; + + for (auto iter = caches_.begin(); iter != caches_.end(); ++iter) { + delete *iter; + } + caches_.clear(); + cache_mutexs_.clear(); +} + +int PikaCache::CacheIndex(const std::string& key) { + auto crc = crc32(0L, (const Bytef*)key.data(), (int)key.size()); + return (int)(crc % caches_.size()); +} + +Status PikaCache::WriteKVToCache(std::string& key, std::string &value, int64_t ttl) { + if (0 >= ttl) { + if (PIKA_TTL_NONE == ttl) { + return SetnxWithoutTTL(key, value); + } else { + return Del({key}); + } + } else { + return Setnx(key, value, ttl); + } + return Status::OK(); +} + +Status PikaCache::WriteHashToCache(std::string& key, std::vector &fvs, int64_t ttl) { + if (0 >= ttl) { + if (PIKA_TTL_NONE == ttl) { + return HMSetnxWithoutTTL(key, fvs); + } else { + return Del({key}); + } + } else { + return HMSetnx(key, fvs, ttl); + } + return Status::OK(); +} + +Status PikaCache::WriteListToCache(std::string& key, std::vector &values, int64_t ttl) { + if (0 >= ttl) { + if (PIKA_TTL_NONE == ttl) { + return RPushnxWithoutTTL(key, values); + } else { + return Del({key}); + } + } else { + return RPushnx(key, values, ttl); + } + return Status::OK(); +} + +Status PikaCache::WriteSetToCache(std::string& key, std::vector &members, int64_t ttl) { + if (0 >= ttl) { + if (PIKA_TTL_NONE == ttl) { + return SAddnxWithoutTTL(key, members); + } else { + return Del({key}); + } + } else { + return SAddnx(key, members, ttl); + } + return Status::OK(); +} + +Status PikaCache::WriteZSetToCache(std::string& key, std::vector &score_members, int64_t ttl) { + if (0 >= ttl) { + if (PIKA_TTL_NONE == ttl) { + return ZAddnxWithoutTTL(key, score_members); + } else { + return Del({key}); + } + } else { + return ZAddnx(key, score_members, ttl); + } + return Status::OK(); +} + +void PikaCache::PushKeyToAsyncLoadQueue(const char key_type, std::string& key, const std::shared_ptr& db) { + cache_load_thread_->Push(key_type, key, db); +} + +void PikaCache::ClearHitRatio(void) { + std::unique_lock l(rwlock_); + cache::RedisCache::ResetHitAndMissNum(); +} diff --git a/tools/pika_migrate/src/pika_cache_load_thread.cc b/tools/pika_migrate/src/pika_cache_load_thread.cc new file mode 100644 index 0000000000..f9bb040a40 --- /dev/null +++ b/tools/pika_migrate/src/pika_cache_load_thread.cc @@ -0,0 +1,214 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +#include + +#include "include/pika_cache_load_thread.h" +#include "include/pika_server.h" +#include "include/pika_cache.h" +#include "pstd/include/scope_record_lock.h" + +extern PikaServer* g_pika_server; + +PikaCacheLoadThread::PikaCacheLoadThread(int zset_cache_start_direction, int zset_cache_field_num_per_key) + : should_exit_(false) + , loadkeys_cond_() + , async_load_keys_num_(0) + , waitting_load_keys_num_(0) + , zset_cache_start_direction_(zset_cache_start_direction) + , zset_cache_field_num_per_key_(zset_cache_field_num_per_key) +{ + set_thread_name("PikaCacheLoadThread"); +} + +PikaCacheLoadThread::~PikaCacheLoadThread() { + { + std::lock_guard lq(loadkeys_mutex_); + should_exit_ = true; + loadkeys_cond_.notify_all(); + } + + StopThread(); +} + +void PikaCacheLoadThread::Push(const char key_type, std::string& key, const std::shared_ptr& db) { + std::unique_lock lq(loadkeys_mutex_); + std::unique_lock lm(loadkeys_map_mutex_); + if (CACHE_LOAD_QUEUE_MAX_SIZE < loadkeys_queue_.size()) { + // 5s to print logs once + static uint64_t last_log_time_us = 0; + if (pstd::NowMicros() - last_log_time_us > 5000000) { + LOG(WARNING) << "PikaCacheLoadThread::Push waiting..."; + last_log_time_us = pstd::NowMicros(); + } + return; + } + + if (loadkeys_map_.find(key) == loadkeys_map_.end()) { + std::tuple> ktuple = std::make_tuple(key_type, key, db); + loadkeys_queue_.push_back(ktuple); + loadkeys_map_[key] = std::string(""); + loadkeys_cond_.notify_all(); + } +} + +bool PikaCacheLoadThread::LoadKV(std::string& key, const std::shared_ptr& db) { + std::string value; + int64_t ttl = -1; + rocksdb::Status s = db->storage()->GetWithTTL(key, &value, &ttl); + if (!s.ok()) { + LOG(WARNING) << "load kv failed, key=" << key; + return false; + } + db->cache()->WriteKVToCache(key, value, ttl); + return true; +} + +bool PikaCacheLoadThread::LoadHash(std::string& key, const std::shared_ptr& db) { + int32_t len = 0; + db->storage()->HLen(key, &len); + if (0 >= len || CACHE_VALUE_ITEM_MAX_SIZE < len) { + return false; + } + + std::vector fvs; + int64_t ttl = -1; + rocksdb::Status s = db->storage()->HGetallWithTTL(key, &fvs, &ttl); + if (!s.ok()) { + LOG(WARNING) << "load hash failed, key=" << key; + return false; + } + db->cache()->WriteHashToCache(key, fvs, ttl); + return true; +} + +bool PikaCacheLoadThread::LoadList(std::string& key, const std::shared_ptr& db) { + uint64_t len = 0; + db->storage()->LLen(key, &len); + if (len <= 0 || CACHE_VALUE_ITEM_MAX_SIZE < len) { + LOG(WARNING) << "can not load key, because item size:" << len + << " beyond max item size:" << CACHE_VALUE_ITEM_MAX_SIZE; + return false; + } + + std::vector values; + int64_t ttl = -1; + rocksdb::Status s = db->storage()->LRangeWithTTL(key, 0, -1, &values, &ttl); + if (!s.ok()) { + LOG(WARNING) << "load list failed, key=" << key; + return false; + } + db->cache()->WriteListToCache(key, values, ttl); + return true; +} + +bool PikaCacheLoadThread::LoadSet(std::string& key, const std::shared_ptr& db) { + int32_t len = 0; + db->storage()->SCard(key, &len); + if (0 >= len || CACHE_VALUE_ITEM_MAX_SIZE < len) { + LOG(WARNING) << "can not load key, because item size:" << len + << " beyond max item size:" << CACHE_VALUE_ITEM_MAX_SIZE; + return false; + } + + std::vector values; + int64_t ttl_millsec = -1; + rocksdb::Status s = db->storage()->SMembersWithTTL(key, &values, &ttl_millsec); + if (!s.ok()) { + LOG(WARNING) << "load set failed, key=" << key; + return false; + } + db->cache()->WriteSetToCache(key, values, ttl_millsec > 0 ? ttl_millsec / 1000 : ttl_millsec); + return true; +} + +bool PikaCacheLoadThread::LoadZset(std::string& key, const std::shared_ptr& db) { + int32_t len = 0; + int start_index = 0; + int stop_index = -1; + db->storage()->ZCard(key, &len); + if (0 >= len) { + return false; + } + + uint64_t cache_len = 0; + db->cache()->CacheZCard(key, &cache_len); + if (cache_len != 0) { + return true; + } + if (zset_cache_start_direction_ == cache::CACHE_START_FROM_BEGIN) { + if (zset_cache_field_num_per_key_ <= len) { + stop_index = zset_cache_field_num_per_key_ - 1; + } + } else if (zset_cache_start_direction_ == cache::CACHE_START_FROM_END) { + if (zset_cache_field_num_per_key_ <= len) { + start_index = len - zset_cache_field_num_per_key_; + } + } + + std::vector score_members; + int64_t ttl = -1; + rocksdb::Status s = db->storage()->ZRangeWithTTL(key, start_index, stop_index, &score_members, &ttl); + if (!s.ok()) { + LOG(WARNING) << "load zset failed, key=" << key; + return false; + } + db->cache()->WriteZSetToCache(key, score_members, ttl); + return true; +} + +bool PikaCacheLoadThread::LoadKey(const char key_type, std::string& key, const std::shared_ptr& db) { + pstd::lock::ScopeRecordLock record_lock(db->LockMgr(), key); + switch (key_type) { + case 'k': + return LoadKV(key, db); + case 'h': + return LoadHash(key, db); + case 'l': + return LoadList(key, db); + case 's': + return LoadSet(key, db); + case 'z': + return LoadZset(key, db); + default: + LOG(WARNING) << "PikaCacheLoadThread::LoadKey invalid key type : " << key_type; + return false; + } +} + +void *PikaCacheLoadThread::ThreadMain() { + LOG(INFO) << "PikaCacheLoadThread::ThreadMain Start"; + + while (!should_exit_) { + std::deque>> load_keys; + { + std::unique_lock lq(loadkeys_mutex_); + waitting_load_keys_num_ = loadkeys_queue_.size(); + while (!should_exit_ && loadkeys_queue_.size() <= 0) { + loadkeys_cond_.wait(lq); + } + + if (should_exit_) { + return nullptr; + } + + for (int i = 0; i < CACHE_LOAD_NUM_ONE_TIME; ++i) { + if (!loadkeys_queue_.empty()) { + load_keys.push_back(loadkeys_queue_.front()); + loadkeys_queue_.pop_front(); + } + } + } + for (auto & load_key : load_keys) { + if (LoadKey(std::get<0>(load_key), std::get<1>(load_key), std::get<2>(load_key))) { + ++async_load_keys_num_; + } + + std::unique_lock lm(loadkeys_map_mutex_); + loadkeys_map_.erase(std::get<1>(load_key)); + } + } + + return nullptr; +} diff --git a/tools/pika_migrate/src/pika_client_conn.cc b/tools/pika_migrate/src/pika_client_conn.cc index fd51331438..768cb6d5ad 100644 --- a/tools/pika_migrate/src/pika_client_conn.cc +++ b/tools/pika_migrate/src/pika_client_conn.cc @@ -3,50 +3,126 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#include "include/pika_client_conn.h" - -#include -#include - +#include #include +#include +#include +#include "include/pika_admin.h" +#include "include/pika_client_conn.h" +#include "include/pika_cmd_table_manager.h" +#include "include/pika_command.h" #include "include/pika_conf.h" +#include "include/pika_define.h" +#include "include/pika_rm.h" #include "include/pika_server.h" -#include "include/pika_cmd_table_manager.h" +#include "net/src/dispatch_thread.h" +#include "net/src/worker_thread.h" +#include "src/pstd/include/scope_record_lock.h" -extern PikaConf* g_pika_conf; +extern std::unique_ptr g_pika_conf; extern PikaServer* g_pika_server; -extern PikaCmdTableManager* g_pika_cmd_table_manager; - -PikaClientConn::PikaClientConn(int fd, std::string ip_port, - pink::Thread* thread, - pink::PinkEpoll* pink_epoll, - const pink::HandleType& handle_type) - : RedisConn(fd, ip_port, thread, pink_epoll, handle_type), - server_thread_(reinterpret_cast(thread)), - current_table_(g_pika_conf->default_table()), - is_pubsub_(false) { - auth_stat_.Init(); +extern std::unique_ptr g_pika_rm; +extern std::unique_ptr g_pika_cmd_table_manager; + +PikaClientConn::PikaClientConn(int fd, const std::string& ip_port, net::Thread* thread, net::NetMultiplexer* mpx, + const net::HandleType& handle_type, int max_conn_rbuf_size) + : RedisConn(fd, ip_port, thread, mpx, handle_type, max_conn_rbuf_size), + server_thread_(reinterpret_cast(thread)), + current_db_(g_pika_conf->default_db()) { + InitUser(); + time_stat_.reset(new TimeStat()); } -std::string PikaClientConn::DoCmd(const PikaCmdArgsType& argv, - const std::string& opt) { +std::shared_ptr PikaClientConn::DoCmd(const PikaCmdArgsType& argv, const std::string& opt, + const std::shared_ptr& resp_ptr, bool cache_miss_in_rtc) { // Get command info std::shared_ptr c_ptr = g_pika_cmd_table_manager->GetCmd(opt); if (!c_ptr) { - return "-Err unknown or unsupported command \'" + opt + "\'\r\n"; + std::shared_ptr tmp_ptr = std::make_shared(DummyCmd()); + tmp_ptr->res().SetRes(CmdRes::kErrOther, "unknown command \"" + opt + "\""); + if (IsInTxn()) { + SetTxnInitFailState(true); + } + return tmp_ptr; } - c_ptr->SetConn(std::dynamic_pointer_cast(shared_from_this())); + c_ptr->SetCacheMissedInRtc(cache_miss_in_rtc); + c_ptr->SetConn(shared_from_this()); + c_ptr->SetResp(resp_ptr); // Check authed - // AuthCmd will set stat_ - if (!auth_stat_.IsAuthed(c_ptr)) { - return "-ERR NOAUTH Authentication required.\r\n"; + if (AuthRequired()) { // the user is not authed, need to do auth + if (!(c_ptr->flag() & kCmdFlagsNoAuth)) { + c_ptr->res().SetRes(CmdRes::kErrOther, "NOAUTH Authentication required."); + return c_ptr; + } + } + // Initial + c_ptr->Initial(argv, current_db_); + if (!c_ptr->res().ok()) { + if (IsInTxn()) { + SetTxnInitFailState(true); + } + return c_ptr; } - uint64_t start_us = 0; - if (g_pika_conf->slowlog_slower_than() >= 0) { - start_us = slash::NowMicros(); + int8_t subCmdIndex = -1; + std::string errKey; + auto checkRes = user_->CheckUserPermission(c_ptr, argv, subCmdIndex, &errKey); + std::string cmdName = c_ptr->name(); + if (subCmdIndex >= 0 && checkRes == AclDeniedCmd::CMD) { + cmdName += "|" + argv[1]; + } + + std::string object; + switch (checkRes) { + case AclDeniedCmd::CMD: + c_ptr->res().SetRes(CmdRes::kNone, fmt::format("-NOPERM this user has no permissions to run the '{}' command\r\n", + pstd::StringToLower(cmdName))); + object = cmdName; + break; + case AclDeniedCmd::KEY: + c_ptr->res().SetRes(CmdRes::kNone, + "-NOPERM this user has no permissions to access one of the keys used as arguments\r\n"); + object = errKey; + break; + case AclDeniedCmd::CHANNEL: + c_ptr->res().SetRes(CmdRes::kNone, + "-NOPERM this user has no permissions to access one of the channel used as arguments\r\n"); + object = errKey; + break; + case AclDeniedCmd::NO_SUB_CMD: + c_ptr->res().SetRes(CmdRes::kErrOther, fmt::format("unknown subcommand '{}' subcommand", argv[1])); + break; + case AclDeniedCmd::NO_AUTH: + c_ptr->res().AppendContent("-NOAUTH Authentication required."); + break; + default: + break; + } + + if (checkRes == AclDeniedCmd::CMD || checkRes == AclDeniedCmd::KEY || checkRes == AclDeniedCmd::CHANNEL) { + std::string cInfo; + ClientInfoToString(&cInfo, cmdName); + int32_t context = IsInTxn() ? static_cast(AclLogCtx::MULTI) : static_cast(AclLogCtx::TOPLEVEL); + + if (checkRes == AclDeniedCmd::CMD && IsInTxn() && cmdName == kCmdNameExec) { + object = kCmdNameMulti; + } + g_pika_server->Acl()->AddLogEntry(static_cast(checkRes), context, user_->Name(), object, cInfo); + + return c_ptr; + } + + if (IsInTxn() && opt != kCmdNameExec && opt != kCmdNameWatch && opt != kCmdNameDiscard && opt != kCmdNameMulti) { + if (c_ptr->is_write() && g_pika_server->readonly(current_db_)) { + SetTxnInitFailState(true); + c_ptr->res().SetRes(CmdRes::kErrOther, "READONLY You can't write against a read only replica."); + return c_ptr; + } + PushCmdToQue(c_ptr); + c_ptr->res().SetRes(CmdRes::kTxnQueued); + return c_ptr; } bool is_monitoring = g_pika_server->HasMonitorClients(); @@ -54,72 +130,104 @@ std::string PikaClientConn::DoCmd(const PikaCmdArgsType& argv, ProcessMonitor(argv); } - // Initial - c_ptr->Initial(argv, current_table_); - if (!c_ptr->res().ok()) { - return c_ptr->res().message(); - } + g_pika_server->UpdateQueryNumAndExecCountDB(current_db_, opt, c_ptr->is_write()); - g_pika_server->UpdateQueryNumAndExecCountTable(opt); - // PubSub connection // (P)SubscribeCmd will set is_pubsub_ if (this->IsPubSub()) { - if (opt != kCmdNameSubscribe && - opt != kCmdNameUnSubscribe && - opt != kCmdNamePing && - opt != kCmdNamePSubscribe && + if (opt != kCmdNameSubscribe && opt != kCmdNameUnSubscribe && opt != kCmdNamePing && opt != kCmdNamePSubscribe && opt != kCmdNamePUnSubscribe) { - return "-ERR only (P)SUBSCRIBE / (P)UNSUBSCRIBE / PING / QUIT allowed in this context\r\n"; + c_ptr->res().SetRes(CmdRes::kErrOther, + "only (P)SUBSCRIBE / (P)UNSUBSCRIBE / PING / QUIT allowed in this context"); + return c_ptr; } } - if (!g_pika_server->IsCommandSupport(opt)) { - return "-ERR This command only support in classic mode\r\n"; + // reject all the request before new master sync finished + if (g_pika_server->leader_protected_mode()) { + c_ptr->res().SetRes(CmdRes::kErrOther, "Cannot process command before new leader sync finished"); + return c_ptr; } - if (!g_pika_server->IsTableExist(current_table_)) { - return "-ERR Table not found\r\n"; + if (!g_pika_server->IsDBExist(current_db_)) { + c_ptr->res().SetRes(CmdRes::kErrOther, "DB not found"); + return c_ptr; } - // TODO: Consider special commands, like flushall, flushdb? if (c_ptr->is_write()) { - if (g_pika_server->IsTableBinlogIoError(current_table_)) { - return "-ERR Writing binlog failed, maybe no space left on device\r\n"; + if (g_pika_server->IsDBBinlogIoError(current_db_)) { + c_ptr->res().SetRes(CmdRes::kErrOther, "Writing binlog failed, maybe no space left on device"); + return c_ptr; } std::vector cur_key = c_ptr->current_key(); - if (cur_key.empty()) { - return "-ERR Internal ERROR\r\n"; + if (cur_key.empty() && opt != kCmdNameExec) { + c_ptr->res().SetRes(CmdRes::kErrOther, "Internal ERROR"); + return c_ptr; } - if (g_pika_server->readonly(current_table_, cur_key.front())) { - return "-ERR Server in read-only\r\n"; + if (g_pika_server->readonly(current_db_) && opt != kCmdNameExec) { + c_ptr->res().SetRes(CmdRes::kErrOther, "READONLY You can't write against a read only replica."); + return c_ptr; + } + } else if (c_ptr->is_read() && c_ptr->flag_ == 0) { + const auto& server_guard = std::lock_guard(g_pika_server->GetDBLock()); + int role = 0; + auto status = g_pika_rm->CheckDBRole(current_db_, &role); + if (!status.ok()) { + c_ptr->res().SetRes(CmdRes::kErrOther, "Internal ERROR"); + return c_ptr; + } else if ((role & PIKA_ROLE_SLAVE) == PIKA_ROLE_SLAVE) { + const auto& slave_db = g_pika_rm->GetSyncSlaveDBByName(DBInfo(current_db_)); + if (!slave_db) { + c_ptr->res().SetRes(CmdRes::kErrOther, "Internal ERROR"); + return c_ptr; + } else if (slave_db->State() != ReplState::kConnected) { + c_ptr->res().SetRes(CmdRes::kErrOther, "Full sync not completed"); + return c_ptr; + } + } + } + + if (c_ptr->res().ok() && c_ptr->is_write() && name() != kCmdNameExec) { + if (c_ptr->name() == kCmdNameFlushdb) { + auto flushdb = std::dynamic_pointer_cast(c_ptr); + SetTxnFailedIfKeyExists(flushdb->GetFlushDBname()); + } else if (c_ptr->name() == kCmdNameFlushall) { + SetTxnFailedIfKeyExists(); + } else { + auto table_keys = c_ptr->current_key(); + for (auto& key : table_keys) { + key = c_ptr->db_name().append("_").append(key); + } + SetTxnFailedFromKeys(table_keys); } } // Process Command c_ptr->Execute(); + time_stat_->process_done_ts_ = pstd::NowMicros(); + auto cmdstat_map = g_pika_cmd_table_manager->GetCommandStatMap(); + (*cmdstat_map)[opt].cmd_count.fetch_add(1); + (*cmdstat_map)[opt].cmd_time_consuming.fetch_add(time_stat_->total_time()); if (g_pika_conf->slowlog_slower_than() >= 0) { - ProcessSlowlog(argv, start_us); + ProcessSlowlog(argv, c_ptr->GetDoDuration()); } - return c_ptr->res().message(); + return c_ptr; } -void PikaClientConn::ProcessSlowlog(const PikaCmdArgsType& argv, uint64_t start_us) { - int32_t start_time = start_us / 1000000; - int64_t duration = slash::NowMicros() - start_us; - if (duration > g_pika_conf->slowlog_slower_than()) { - g_pika_server->SlowlogPushEntry(argv, start_time, duration); +void PikaClientConn::ProcessSlowlog(const PikaCmdArgsType& argv, uint64_t do_duration) { + if (time_stat_->total_time() > g_pika_conf->slowlog_slower_than()) { + g_pika_server->SlowlogPushEntry(argv, time_stat_->start_ts() / 1000000, time_stat_->total_time()); if (g_pika_conf->slowlog_write_errorlog()) { bool trim = false; std::string slow_log; uint32_t cmd_size = 0; - for (unsigned int i = 0; i < argv.size(); i++) { - cmd_size += 1 + argv[i].size(); // blank space and argument length + for (const auto& i : argv) { + cmd_size += 1 + i.size(); // blank space and argument length if (!trim) { slow_log.append(" "); - slow_log.append(slash::ToRead(argv[i])); + slow_log.append(pstd::ToRead(i)); if (slow_log.size() >= 1000) { trim = true; slow_log.resize(1000); @@ -127,130 +235,355 @@ void PikaClientConn::ProcessSlowlog(const PikaCmdArgsType& argv, uint64_t start_ } } } - LOG(ERROR) << "ip_port: " << ip_port() << ", table: " << current_table_ - << ", command:" << slow_log << ", command_size: " << cmd_size - 1 - << ", arguments: " << argv.size() << ", start_time(s): " << start_time - << ", duration(us): " << duration; + LOG(ERROR) << "ip_port: " << ip_port() << ", db: " << current_db_ << ", command:" << slow_log + << ", command_size: " << cmd_size - 1 << ", arguments: " << argv.size() + << ", total_time(ms): " << time_stat_->total_time() / 1000 + << ", before_queue_time(ms): " << time_stat_->before_queue_time() / 1000 + << ", queue_time(ms): " << time_stat_->queue_time() / 1000 + << ", process_time(ms): " << time_stat_->process_time() / 1000 + << ", cmd_time(ms): " << do_duration / 1000; } } } void PikaClientConn::ProcessMonitor(const PikaCmdArgsType& argv) { std::string monitor_message; - std::string table_name = g_pika_conf->classic_mode() - ? current_table_.substr(2) : current_table_; - monitor_message = std::to_string(1.0*slash::NowMicros()/1000000) + - " [" + table_name + " " + this->ip_port() + "]"; - for (PikaCmdArgsType::const_iterator iter = argv.begin(); iter != argv.end(); iter++) { - monitor_message += " " + slash::ToRead(*iter); + std::string db_name = current_db_.substr(2); + monitor_message = std::to_string(1.0 * static_cast(pstd::NowMicros()) / 1000000) + " [" + db_name + " " + + this->ip_port() + "]"; + for (const auto& iter : argv) { + monitor_message += " " + pstd::ToRead(iter); } g_pika_server->AddMonitorMessage(monitor_message); } -void PikaClientConn::AsynProcessRedisCmds(const std::vector& argvs, std::string* response) { - BgTaskArg* arg = new BgTaskArg(); - arg->redis_cmds = argvs; - arg->response = response; - arg->pcc = std::dynamic_pointer_cast(shared_from_this()); - g_pika_server->Schedule(&DoBackgroundTask, arg); +bool PikaClientConn::IsInterceptedByRTC(std::string& opt) { + // currently we only Intercept: Get, HGet + if (opt == kCmdNameGet && g_pika_conf->GetCacheString()) { + return true; + } + if (opt == kCmdNameHGet && g_pika_conf->GetCacheHash()) { + return true; + } + return false; } -void PikaClientConn::BatchExecRedisCmd(const std::vector& argvs, std::string* response) { - bool success = true; - for (const auto& argv : argvs) { - if (DealMessage(argv, response) != 0) { - success = false; - break; +void PikaClientConn::ProcessRedisCmds(const std::vector& argvs, bool async, + std::string* response) { + time_stat_->Reset(); + if (async) { + auto arg = new BgTaskArg(); + arg->cache_miss_in_rtc_ = false; + arg->redis_cmds = argvs; + time_stat_->enqueue_ts_ = time_stat_->before_queue_ts_ = pstd::NowMicros(); + arg->conn_ptr = std::dynamic_pointer_cast(shared_from_this()); + /** + * If using the pipeline method to transmit batch commands to Pika, it is unable to + * correctly distinguish between fast and slow commands. + * However, if using the pipeline method for Codis, it can correctly distinguish between + * fast and slow commands, but it cannot guarantee sequential execution. + */ + std::string opt = argvs[0][0]; + pstd::StringToLower(opt); + bool is_slow_cmd = g_pika_conf->is_slow_cmd(opt); + bool is_admin_cmd = g_pika_conf->is_admin_cmd(opt); + + // we don't intercept pipeline batch (argvs.size() > 1) + if (g_pika_conf->rtc_cache_read_enabled() && argvs.size() == 1 && IsInterceptedByRTC(opt) && + PIKA_CACHE_NONE != g_pika_conf->cache_mode() && !IsInTxn()) { + // read in cache + if (ReadCmdInCache(argvs[0], opt)) { + delete arg; + return; + } + arg->cache_miss_in_rtc_ = true; + time_stat_->before_queue_ts_ = pstd::NowMicros(); } + + g_pika_server->ScheduleClientPool(&DoBackgroundTask, arg, is_slow_cmd, is_admin_cmd); + return; } - if (!response->empty()) { - set_is_reply(true); - NotifyEpoll(success); + BatchExecRedisCmd(argvs, false); +} + +void PikaClientConn::DoBackgroundTask(void* arg) { + std::unique_ptr bg_arg(static_cast(arg)); + std::shared_ptr conn_ptr = bg_arg->conn_ptr; + conn_ptr->time_stat_->dequeue_ts_ = pstd::NowMicros(); + if (bg_arg->redis_cmds.empty()) { + conn_ptr->NotifyEpoll(false); + return; + } + for (const auto& argv : bg_arg->redis_cmds) { + if (argv.empty()) { + conn_ptr->NotifyEpoll(false); + return; + } } + + conn_ptr->BatchExecRedisCmd(bg_arg->redis_cmds, bg_arg->cache_miss_in_rtc_); } -int PikaClientConn::DealMessage(const PikaCmdArgsType& argv, std::string* response) { +void PikaClientConn::BatchExecRedisCmd(const std::vector& argvs, bool cache_miss_in_rtc) { + resp_num.store(static_cast(argvs.size())); + for (const auto& argv : argvs) { + std::shared_ptr resp_ptr = std::make_shared(); + resp_array.push_back(resp_ptr); + ExecRedisCmd(argv, resp_ptr, cache_miss_in_rtc); + } + time_stat_->process_done_ts_ = pstd::NowMicros(); + TryWriteResp(); +} - if (argv.empty()) return -2; - std::string opt = argv[0]; - if (opt == kClusterPrefix) { - if (argv.size() >=2 ) { - opt += argv[1]; +bool PikaClientConn::ReadCmdInCache(const net::RedisCmdArgsType& argv, const std::string& opt) { + resp_num.store(1); + std::shared_ptr c_ptr = g_pika_cmd_table_manager->GetCmd(opt); + if (!c_ptr) { + return false; + } + // Check authed + if (AuthRequired()) { // the user is not authed, need to do auth + if (!(c_ptr->flag() & kCmdFlagsNoAuth)) { + return false; } } - slash::StringToLower(opt); + // Initial + c_ptr->Initial(argv, current_db_); + // dont store cmd with too large key(only Get/HGet cmd can reach here) + // the cmd with large key should be non-exist in cache, except for pre-stored + if (c_ptr->IsTooLargeKey(g_pika_conf->max_key_size_in_cache())) { + resp_num--; + return false; + } + // acl check + int8_t subCmdIndex = -1; + std::string errKey; + auto checkRes = user_->CheckUserPermission(c_ptr, argv, subCmdIndex, &errKey); + std::string object; + if (checkRes == AclDeniedCmd::CMD || checkRes == AclDeniedCmd::KEY || checkRes == AclDeniedCmd::CHANNEL || + checkRes == AclDeniedCmd::NO_SUB_CMD || checkRes == AclDeniedCmd::NO_AUTH) { + // acl check failed + return false; + } + // only read command(Get, HGet) will reach here, no need of record lock + bool read_status = c_ptr->DoReadCommandInCache(); + auto cmdstat_map = g_pika_cmd_table_manager->GetCommandStatMap(); + resp_num--; + if (read_status) { + time_stat_->process_done_ts_ = pstd::NowMicros(); + (*cmdstat_map)[argv[0]].cmd_count.fetch_add(1); + (*cmdstat_map)[argv[0]].cmd_time_consuming.fetch_add(time_stat_->total_time()); + resp_array.emplace_back(std::make_shared(std::move(c_ptr->res().message()))); + TryWriteResp(); + } + return read_status; +} - if (response->empty()) { - // Avoid memory copy - *response = std::move(DoCmd(argv, opt)); - } else { - // Maybe pipeline - response->append(DoCmd(argv, opt)); +void PikaClientConn::TryWriteResp() { + int expected = 0; + if (resp_num.compare_exchange_strong(expected, -1)) { + for (auto& resp : resp_array) { + WriteResp(*resp); + } + if (write_completed_cb_) { + write_completed_cb_(); + write_completed_cb_ = nullptr; + } + resp_array.clear(); + NotifyEpoll(true); } - return 0; } -void PikaClientConn::DoBackgroundTask(void* arg) { - BgTaskArg* bg_arg = reinterpret_cast(arg); - bg_arg->pcc->BatchExecRedisCmd(bg_arg->redis_cmds, bg_arg->response); - delete bg_arg; +void PikaClientConn::PushCmdToQue(std::shared_ptr cmd) { txn_cmd_que_.push(cmd); } + +bool PikaClientConn::IsInTxn() { + std::lock_guard lg(txn_state_mu_); + return txn_state_[TxnStateBitMask::Start]; +} + +bool PikaClientConn::IsTxnInitFailed() { + std::lock_guard lg(txn_state_mu_); + return txn_state_[TxnStateBitMask::InitCmdFailed]; +} + +bool PikaClientConn::IsTxnWatchFailed() { + std::lock_guard lg(txn_state_mu_); + return txn_state_[TxnStateBitMask::WatchFailed]; +} + +bool PikaClientConn::IsTxnExecing() { + std::lock_guard lg(txn_state_mu_); + return txn_state_[TxnStateBitMask::Execing] && txn_state_[TxnStateBitMask::Start]; +} + +void PikaClientConn::SetTxnWatchFailState(bool is_failed) { + std::lock_guard lg(txn_state_mu_); + txn_state_[TxnStateBitMask::WatchFailed] = is_failed; +} + +void PikaClientConn::SetTxnInitFailState(bool is_failed) { + std::lock_guard lg(txn_state_mu_); + txn_state_[TxnStateBitMask::InitCmdFailed] = is_failed; +} + +void PikaClientConn::SetTxnStartState(bool is_start) { + std::lock_guard lg(txn_state_mu_); + txn_state_[TxnStateBitMask::Start] = is_start; } -// Initial permission status -void PikaClientConn::AuthStat::Init() { - // Check auth required - stat_ = g_pika_conf->userpass() == "" ? - kLimitAuthed : kNoAuthed; - if (stat_ == kLimitAuthed - && g_pika_conf->requirepass() == "") { - stat_ = kAdminAuthed; +void PikaClientConn::ClearTxnCmdQue() { txn_cmd_que_ = std::queue>{}; } + +void PikaClientConn::AddKeysToWatch(const std::vector& db_keys) { + for (const auto& it : db_keys) { + watched_db_keys_.emplace(it); + } + + auto dispatcher = dynamic_cast(server_thread()); + if (dispatcher != nullptr) { + dispatcher->AddWatchKeys(watched_db_keys_, shared_from_this()); } } -// Check permission for current command -bool PikaClientConn::AuthStat::IsAuthed(const std::shared_ptr cmd_ptr) { - std::string opt = cmd_ptr->name(); - if (opt == kCmdNameAuth) { - return true; +void PikaClientConn::RemoveWatchedKeys() { + auto dispatcher = dynamic_cast(server_thread()); + if (dispatcher != nullptr) { + watched_db_keys_.clear(); + dispatcher->RemoveWatchKeys(shared_from_this()); } - const std::vector& blacklist = g_pika_conf->vuser_blacklist(); - switch (stat_) { - case kNoAuthed: - return false; - case kAdminAuthed: - break; - case kLimitAuthed: - if (cmd_ptr->is_admin_require() - || find(blacklist.begin(), blacklist.end(), opt) != blacklist.end()) { - return false; +} + +void PikaClientConn::SetTxnFailedFromKeys(const std::vector& db_keys) { + auto dispatcher = dynamic_cast(server_thread()); + if (dispatcher != nullptr) { + auto involved_conns = std::vector>{}; + involved_conns = dispatcher->GetInvolvedTxn(db_keys); + for (auto& conn : involved_conns) { + if (auto c = std::dynamic_pointer_cast(conn); c != nullptr) { + c->SetTxnWatchFailState(true); } - break; - default: - LOG(WARNING) << "Invalid auth stat : " << static_cast(stat_); - return false; + } } - return true; } -// Update permission status -bool PikaClientConn::AuthStat::ChecknUpdate(const std::string& message) { - // Situations to change auth status - if (message == "USER") { - stat_ = kLimitAuthed; - } else if (message == "ROOT") { - stat_ = kAdminAuthed; +// if key in target_db exists, then the key been watched multi will be failed +void PikaClientConn::SetTxnFailedIfKeyExists(std::string target_db_name) { + auto dispatcher = dynamic_cast(server_thread()); + if (dispatcher == nullptr) { + return; + } + auto involved_conns = dispatcher->GetAllTxns(); + for (auto& conn : involved_conns) { + std::shared_ptr c; + if (c = std::dynamic_pointer_cast(conn); c == nullptr) { + continue; + } + + for (const auto& db_key : c->watched_db_keys_) { + size_t pos = db_key.find('_'); + if (pos == std::string::npos) { + continue; + } + + auto db_name = db_key.substr(0, pos); + auto key = db_key.substr(pos + 1); + + if (target_db_name == "" || target_db_name == "all" || target_db_name == db_name) { + auto db = g_pika_server->GetDB(db_name); + // if watched key exists, set watch state to failed + if (db->storage()->Exists({key}) > 0) { + c->SetTxnWatchFailState(true); + break; + } + } + } + } +} + +void PikaClientConn::ExitTxn() { + if (IsInTxn()) { + RemoveWatchedKeys(); + ClearTxnCmdQue(); + std::lock_guard lg(txn_state_mu_); + txn_state_.reset(); + } +} + +void PikaClientConn::ExecRedisCmd(const PikaCmdArgsType& argv, std::shared_ptr& resp_ptr, + bool cache_miss_in_rtc) { + // get opt + std::string opt = argv[0]; + pstd::StringToLower(opt); + if (opt == kClusterPrefix) { + if (argv.size() >= 2) { + opt += argv[1]; + pstd::StringToLower(opt); + } + } + + std::shared_ptr cmd_ptr = DoCmd(argv, opt, resp_ptr, cache_miss_in_rtc); + *resp_ptr = std::move(cmd_ptr->res().message()); + resp_num--; +} + +std::queue> PikaClientConn::GetTxnCmdQue() { return txn_cmd_que_; } + +void PikaClientConn::DoAuth(const std::shared_ptr& user) { + user_ = user; + authenticated_ = true; +} + +void PikaClientConn::UnAuth(const std::shared_ptr& user) { + user_ = user; + // If the user does not have a password, and the user is valid, then the user does not need authentication + authenticated_ = user_->HasFlags(static_cast(AclUserFlag::NO_PASS)) && + !user_->HasFlags(static_cast(AclUserFlag::DISABLED)); +} + +bool PikaClientConn::IsAuthed() const { return authenticated_; } +void PikaClientConn::InitUser() { + if (!g_pika_conf->GetUserBlackList().empty()) { + user_ = g_pika_server->Acl()->GetUserLock(Acl::DefaultLimitUser); } else { + user_ = g_pika_server->Acl()->GetUserLock(Acl::DefaultUser); + } + authenticated_ = user_->HasFlags(static_cast(AclUserFlag::NO_PASS)) && + !user_->HasFlags(static_cast(AclUserFlag::DISABLED)); +} +bool PikaClientConn::AuthRequired() const { + // If the user does not have a password, and the user is valid, then the user does not need authentication + // Otherwise, you need to determine whether go has been authenticated + if (IsAuthed()) { + return false; + } + if (user_->HasFlags(static_cast(AclUserFlag::DISABLED))) { + return true; + } + if (user_->HasFlags(static_cast(AclUserFlag::NO_PASS))) { return false; } return true; } +std::string PikaClientConn::UserName() const { return user_->Name(); } -// compare addr in ClientInfo -bool AddrCompare(const ClientInfo& lhs, const ClientInfo& rhs) { - return rhs.ip_port < lhs.ip_port; -} +void PikaClientConn::ClientInfoToString(std::string* info, const std::string& cmdName) { + uint64_t age = pstd::NowMicros() - last_interaction().tv_usec; -bool IdleCompare(const ClientInfo& lhs, const ClientInfo& rhs) { - return lhs.last_interaction < rhs.last_interaction; + std::string flags; + g_pika_server->ClientIsMonitor(std::dynamic_pointer_cast(shared_from_this())) ? flags.append("O") + : flags.append("S"); + if (IsPubSub()) { + flags.append("P"); + } + + info->append(fmt::format( + "id={} addr={} name={} age={} idle={} flags={} db={} sub={} psub={} multi={} " + "cmd={} user={} resp=2", + fd(), ip_port(), name(), age, age / 1000000, flags, GetCurrentTable(), + IsPubSub() ? g_pika_server->ClientPubSubChannelSize(shared_from_this()) : 0, + IsPubSub() ? g_pika_server->ClientPubSubChannelPatternSize(shared_from_this()) : 0, -1, cmdName, user_->Name())); } +// compare addr in ClientInfo +bool AddrCompare(const ClientInfo& lhs, const ClientInfo& rhs) { return rhs.ip_port < lhs.ip_port; } + +bool IdleCompare(const ClientInfo& lhs, const ClientInfo& rhs) { return lhs.last_interaction < rhs.last_interaction; } diff --git a/tools/pika_migrate/src/pika_client_processor.cc b/tools/pika_migrate/src/pika_client_processor.cc new file mode 100644 index 0000000000..5a1c60cee0 --- /dev/null +++ b/tools/pika_migrate/src/pika_client_processor.cc @@ -0,0 +1,46 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_client_processor.h" + +#include + +PikaClientProcessor::PikaClientProcessor(size_t worker_num, size_t max_queue_size, const std::string& name_prefix) { + pool_ = std::make_unique(worker_num, max_queue_size, name_prefix + "Pool"); +} + +PikaClientProcessor::~PikaClientProcessor() { + LOG(INFO) << "PikaClientProcessor exit!!!"; +} + +int PikaClientProcessor::Start() { + int res = pool_->start_thread_pool(); + if (res != net::kSuccess) { + return res; + } + return res; +} + +void PikaClientProcessor::Stop() { + pool_->stop_thread_pool(); +} + +void PikaClientProcessor::SchedulePool(net::TaskFunc func, void* arg) { pool_->Schedule(func, arg); } + +size_t PikaClientProcessor::ThreadPoolCurQueueSize() { + size_t cur_size = 0; + if (pool_) { + pool_->cur_queue_size(&cur_size); + } + return cur_size; +} + +size_t PikaClientProcessor::ThreadPoolMaxQueueSize() { + size_t cur_size = 0; + if (pool_) { + cur_size = pool_->max_queue_size(); + } + return cur_size; +} diff --git a/tools/pika_migrate/src/pika_cluster.cc b/tools/pika_migrate/src/pika_cluster.cc deleted file mode 100644 index 34d5b1630d..0000000000 --- a/tools/pika_migrate/src/pika_cluster.cc +++ /dev/null @@ -1,495 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_rm.h" -#include "include/pika_table.h" -#include "include/pika_server.h" -#include "include/pika_cluster.h" -#include "include/pika_cmd_table_manager.h" - -extern PikaReplicaManager* g_pika_rm; -extern PikaServer* g_pika_server; -extern PikaConf* g_pika_conf; - -const std::string PkClusterInfoCmd::kSlotSection = "slot"; - -// pkcluster info slot table:slot -// pkcluster info table -// pkcluster info node -// pkcluster info cluster -void PkClusterInfoCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePkClusterInfo); - return; - } - if (g_pika_conf->classic_mode()) { - res_.SetRes(CmdRes::kErrOther, "PkClusterInfo only support on sharding mode"); - return; - } - if (!strcasecmp(argv_[2].data(), kSlotSection.data())) { - info_section_ = kInfoSlot; - if (!ParseInfoSlotSubCmd()) { - return; - } - } else { - info_section_ = kInfoErr; - } - return; -} - -void PkClusterInfoCmd::Do(std::shared_ptr partition) { - std::string info; - switch (info_section_) { - case kInfoSlot: - if (info_range_ == kAll) { - ClusterInfoSlotAll(&info); - } else if (info_range_ == kSingle) { - // doesn't process error, if error return nothing - GetSlotInfo(table_name_, partition_id_, &info); - } - break; - default: - break; - } - res_.AppendStringLen(info.size()); - res_.AppendContent(info); - return; -} - -bool PkClusterInfoCmd::ParseInfoSlotSubCmd() { - if (argv_.size() > 3) { - if (argv_.size() == 4) { - info_range_ = kSingle; - std::string tmp(argv_[3]); - size_t pos = tmp.find(':'); - std::string slot_num_str; - if (pos == std::string::npos) { - table_name_ = g_pika_conf->default_table(); - slot_num_str = tmp; - } else { - table_name_ = tmp.substr(0, pos); - slot_num_str = tmp.substr(pos + 1); - } - unsigned long partition_id; - if (!slash::string2ul(slot_num_str.c_str(), slot_num_str.size(), &partition_id)) { - res_.SetRes(CmdRes::kInvalidParameter, kCmdNamePkClusterInfo); - return false; - } - partition_id_ = partition_id; - } else { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePkClusterInfo); - return false; - } - } - return true; -} - -void PkClusterInfoCmd::ClusterInfoSlotAll(std::string* info) { - std::stringstream tmp_stream; - for (const auto& table_item : g_pika_server->tables_) { - slash::RWLock partition_rwl(&table_item.second->partitions_rw_, false); - for (const auto& partition_item : table_item.second->partitions_) { - std::string table_name = table_item.second->GetTableName(); - uint32_t partition_id = partition_item.second->GetPartitionId(); - std::string p_info; - Status s = GetSlotInfo(table_name, partition_id, &p_info); - if (!s.ok()) { - continue; - } - tmp_stream << p_info; - } - } - info->append(tmp_stream.str()); -} - -Status PkClusterInfoCmd::GetSlotInfo(const std::string table_name, - uint32_t partition_id, - std::string* info) { - std::shared_ptr partition = - g_pika_server->GetTablePartitionById(table_name, partition_id); - if (!partition) { - return Status::NotFound("not found"); - } - Status s; - std::stringstream tmp_stream; - - // binlog offset section - uint32_t filenum = 0; - uint64_t offset = 0; - partition->logger()->GetProducerStatus(&filenum, &offset); - tmp_stream << partition->GetPartitionName() << " binlog_offset=" - << filenum << " " << offset; - - // safety purge section - std::string safety_purge; - s = g_pika_rm->GetSafetyPurgeBinlogFromSMP(table_name, partition_id, &safety_purge); - tmp_stream << ",safety_purge=" << (s.ok() ? safety_purge : "error") << "\r\n"; - - // partition info section - std::string p_info; - s = g_pika_rm->GetPartitionInfo(table_name, partition_id, &p_info); - if (!s.ok()) { - return s; - } - tmp_stream << p_info; - info->append(tmp_stream.str()); - return Status::OK(); -} - -Status ParseSlotGroup(const std::string& slot_group, - std::set* slots) { - std::set tmp_slots; - int64_t slot_idx, start_idx, end_idx; - std::string::size_type pos; - std::vector elems; - slash::StringSplit(slot_group, COMMA, elems); - for (const auto& elem : elems) { - if ((pos = elem.find("-")) == std::string::npos) { - if (!slash::string2l(elem.data(), elem.size(), &slot_idx) - || slot_idx < 0) { - return Status::Corruption("syntax error"); - } else { - tmp_slots.insert(static_cast(slot_idx)); - } - } else { - if (pos == 0 || pos == (elem.size() - 1)) { - return Status::Corruption("syntax error"); - } else { - std::string start_pos = elem.substr(0, pos); - std::string end_pos = elem.substr(pos + 1, elem.size() - pos); - if (!slash::string2l(start_pos.data(), start_pos.size(), &start_idx) - || !slash::string2l(end_pos.data(), end_pos.size(), &end_idx) - || start_idx < 0 || end_idx < 0 || start_idx > end_idx) { - return Status::Corruption("syntax error"); - } - for (int64_t idx = start_idx; idx <= end_idx; ++idx) { - tmp_slots.insert(static_cast(idx)); - } - } - } - } - slots->swap(tmp_slots); - return Status::OK(); -} - -void SlotParentCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - if (g_pika_conf->classic_mode()) { - res_.SetRes(CmdRes::kErrOther, "PkClusterAddSlots/PkClusterDelSlots only support on sharding mode"); - return; - } - - Status s = ParseSlotGroup(argv_[2], &slots_); - if (!s.ok()) { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - - std::string table_name = g_pika_conf->default_table(); - for (const auto& slot_id : slots_) { - p_infos_.insert(PartitionInfo(table_name, slot_id)); - } -} - -/* - * pkcluster addslots 0-3,8-11 - * pkcluster addslots 0-3,8,9,10,11 - * pkcluster addslots 0,2,4,6,8,10,12,14 - */ -void PkClusterAddSlotsCmd::DoInitial() { - SlotParentCmd::DoInitial(); - if (!res_.ok()) { - return; - } -} - -void PkClusterAddSlotsCmd::Do(std::shared_ptr partition) { - std::string table_name = g_pika_conf->default_table(); - std::shared_ptr
table_ptr = g_pika_server->GetTable(table_name); - if (!table_ptr) { - res_.SetRes(CmdRes::kErrOther, "Internal error: table not found!"); - return; - } - - SlotState expected = INFREE; - if (!std::atomic_compare_exchange_strong(&g_pika_server->slot_state_, - &expected, INBUSY)) { - res_.SetRes(CmdRes::kErrOther, - "Slot in syncing or a change operation is under way, retry later"); - return; - } - - bool pre_success = true; - Status s = AddSlotsSanityCheck(table_name); - if (!s.ok()) { - LOG(WARNING) << "Addslots sanity check failed: " << s.ToString(); - pre_success = false; - } - if (pre_success) { - s = g_pika_conf->AddTablePartitions(table_name, slots_); - if (!s.ok()) { - LOG(WARNING) << "Addslots add to pika conf failed: " << s.ToString(); - pre_success = false; - } - } - if (pre_success) { - s = table_ptr->AddPartitions(slots_); - if (!s.ok()) { - LOG(WARNING) << "Addslots add to table partition failed: " << s.ToString(); - pre_success = false; - } - } - if (pre_success) { - s = g_pika_rm->AddSyncPartition(p_infos_); - if (!s.ok()) { - LOG(WARNING) << "Addslots add to sync partition failed: " << s.ToString(); - pre_success = false; - } - } - - g_pika_server->slot_state_.store(INFREE); - - if (!pre_success) { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - return; - } - - res_.SetRes(CmdRes::kOk); - LOG(INFO) << "Pika meta file overwrite success"; -} - -Status PkClusterAddSlotsCmd::AddSlotsSanityCheck(const std::string& table_name) { - Status s = g_pika_conf->TablePartitionsSanityCheck(table_name, slots_, true); - if (!s.ok()) { - return s; - } - - std::shared_ptr
table_ptr = g_pika_server->GetTable(table_name); - if (!table_ptr) { - return Status::NotFound("table not found!"); - } - - for (uint32_t id : slots_) { - std::shared_ptr partition_ptr = table_ptr->GetPartitionById(id); - if (partition_ptr != nullptr) { - return Status::Corruption("partition " + std::to_string(id) + " already exist"); - } - } - s = g_pika_rm->AddSyncPartitionSanityCheck(p_infos_); - if (!s.ok()) { - return s; - } - return Status::OK(); -} - -/* pkcluster delslots 0-3,8-11 - * pkcluster delslots 0-3,8,9,10,11 - * pkcluster delslots 0,2,4,6,8,10,12,14 - */ -void PkClusterDelSlotsCmd::DoInitial() { - SlotParentCmd::DoInitial(); - if (!res_.ok()) { - return; - } -} - -void PkClusterDelSlotsCmd::Do(std::shared_ptr partition) { - std::string table_name = g_pika_conf->default_table(); - std::shared_ptr
table_ptr = g_pika_server->GetTable(table_name); - if (!table_ptr) { - res_.SetRes(CmdRes::kErrOther, "Internal error: default table not found!"); - return; - } - - SlotState expected = INFREE; - if (!std::atomic_compare_exchange_strong(&g_pika_server->slot_state_, - &expected, INBUSY)) { - res_.SetRes(CmdRes::kErrOther, - "Slot in syncing or a change operation is under way, retry later"); - return; - } - - bool pre_success = true; - Status s = RemoveSlotsSanityCheck(table_name); - if (!s.ok()) { - LOG(WARNING) << "Removeslots sanity check failed: " << s.ToString(); - pre_success = false; - } - if (pre_success) { - s = g_pika_conf->RemoveTablePartitions(table_name, slots_); - if (!s.ok()) { - LOG(WARNING) << "Removeslots remove from pika conf failed: " << s.ToString(); - pre_success = false; - } - } - if (pre_success) { - s = table_ptr->RemovePartitions(slots_); - if (!s.ok()) { - LOG(WARNING) << "Removeslots remove from table partition failed: " << s.ToString(); - pre_success = false; - } - } - if (pre_success) { - s = g_pika_rm->RemoveSyncPartition(p_infos_); - if (!s.ok()) { - LOG(WARNING) << "Remvoeslots remove from sync partition failed: " << s.ToString(); - pre_success = false; - } - } - - g_pika_server->slot_state_.store(INFREE); - - if (!pre_success) { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - return; - } - res_.SetRes(CmdRes::kOk); - LOG(INFO) << "Pika meta file overwrite success"; -} - -Status PkClusterDelSlotsCmd::RemoveSlotsSanityCheck(const std::string& table_name) { - Status s = g_pika_conf->TablePartitionsSanityCheck(table_name, slots_, false); - if (!s.ok()) { - return s; - } - - std::shared_ptr
table_ptr = g_pika_server->GetTable(table_name); - if (!table_ptr) { - return Status::NotFound("table not found"); - } - - for (uint32_t id : slots_) { - std::shared_ptr partition_ptr = table_ptr->GetPartitionById(id); - if (partition_ptr == nullptr) { - return Status::Corruption("partition " + std::to_string(id) + " not found"); - } - } - s = g_pika_rm->RemoveSyncPartitionSanityCheck(p_infos_); - if (!s.ok()) { - return s; - } - return Status::OK(); -} - -/* pkcluster slotsslaveof no one [0-3,8-11 | all] - * pkcluster slotsslaveof ip port [0-3,8,9,10,11 | all] - * pkcluster slotsslaveof ip port [0,2,4,6,7,8,9 | all] force - */ -void PkClusterSlotsSlaveofCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePkClusterSlotsSlaveof); - return; - } - if (g_pika_conf->classic_mode()) { - res_.SetRes(CmdRes::kErrOther, "PkClusterSlotSync only support on sharding mode"); - return; - } - - if (!strcasecmp(argv_[2].data(), "no") - && !strcasecmp(argv_[3].data(), "one")) { - is_noone_ = true; - } else { - ip_ = argv_[2]; - if (!slash::string2l(argv_[3].data(), argv_[3].size(), &port_) - || port_ <= 0) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - - if ((ip_ == "127.0.0.1" || ip_ == g_pika_server->host()) - && port_ == g_pika_server->port()) { - res_.SetRes(CmdRes::kErrOther, "You fucked up"); - return; - } - } - - if (!strcasecmp(argv_[4].data(), "all")) { - std::string table_name = g_pika_conf->default_table(); - slots_ = g_pika_server->GetTablePartitionIds(table_name); - } else { - Status s = ParseSlotGroup(argv_[4], &slots_); - if (!s.ok()) { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - return; - } - } - - if (slots_.empty()) { - res_.SetRes(CmdRes::kErrOther, "Slots set empty"); - } - - if (argv_.size() == 5) { - // do nothing - } else if (argv_.size() == 6 - && !strcasecmp(argv_[5].data(), "force")) { - force_sync_ = true; - } else { - res_.SetRes(CmdRes::kSyntaxErr); - } -} - -void PkClusterSlotsSlaveofCmd::Do(std::shared_ptr partition) { - std::string table_name = g_pika_conf->default_table(); - std::vector to_del_slots; - for (const auto& slot : slots_) { - std::shared_ptr slave_partition = - g_pika_rm->GetSyncSlavePartitionByName( - PartitionInfo(table_name, slot)); - if (!slave_partition) { - res_.SetRes(CmdRes::kErrOther, "Slot " + std::to_string(slot) + " not found!"); - return; - } - if (is_noone_) { - // check okay - } else if (slave_partition->State() == ReplState::kConnected - && slave_partition->MasterIp() == ip_ && slave_partition->MasterPort() == port_) { - to_del_slots.push_back(slot); - } - } - - for (auto to_del : to_del_slots) { - slots_.erase(to_del); - } - - Status s = Status::OK(); - ReplState state = force_sync_ - ? ReplState::kTryDBSync : ReplState::kTryConnect; - for (const auto& slot : slots_) { - std::shared_ptr slave_partition = - g_pika_rm->GetSyncSlavePartitionByName( - PartitionInfo(table_name, slot)); - if (slave_partition->State() == ReplState::kConnected) { - s = g_pika_rm->SendRemoveSlaveNodeRequest(table_name, slot); - } - if (!s.ok()) { - break; - } - if (slave_partition->State() != ReplState::kNoConnect) { - // reset state - s = g_pika_rm->SetSlaveReplState( - PartitionInfo(table_name, slot), ReplState::kNoConnect); - if (!s.ok()) { - break; - } - } - if (is_noone_) { - } else { - s = g_pika_rm->ActivateSyncSlavePartition( - RmNode(ip_, port_, table_name, slot), state); - if (!s.ok()) { - break; - } - } - } - - if (s.ok()) { - res_.SetRes(CmdRes::kOk); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - diff --git a/tools/pika_migrate/src/pika_cmd_table_manager.cc b/tools/pika_migrate/src/pika_cmd_table_manager.cc index b046de878f..974fceb0ee 100644 --- a/tools/pika_migrate/src/pika_cmd_table_manager.cc +++ b/tools/pika_migrate/src/pika_cmd_table_manager.cc @@ -5,84 +5,106 @@ #include "include/pika_cmd_table_manager.h" -#include #include +#include +#include "include/acl.h" #include "include/pika_conf.h" -#include "slash/include/slash_mutex.h" - +#include "pstd/include/pstd_mutex.h" -#define gettid() syscall(__NR_gettid) - -extern PikaConf* g_pika_conf; +extern std::unique_ptr g_pika_conf; PikaCmdTableManager::PikaCmdTableManager() { - pthread_rwlock_init(&map_protector_, NULL); - cmds_ = new CmdTable(); + cmds_ = std::make_unique(); cmds_->reserve(300); - InitCmdTable(cmds_); } -PikaCmdTableManager::~PikaCmdTableManager() { - pthread_rwlock_destroy(&map_protector_); - for (const auto&item : thread_distribution_map_) { - delete item.second; +void PikaCmdTableManager::InitCmdTable(void) { + ::InitCmdTable(cmds_.get()); + for (const auto& cmd : *cmds_) { + if (cmd.second->flag() & kCmdFlagsWrite) { + cmd.second->AddAclCategory(static_cast(AclCategory::WRITE)); + } + if (cmd.second->flag() & kCmdFlagsRead && + !(cmd.second->AclCategory() & static_cast(AclCategory::SCRIPTING))) { + cmd.second->AddAclCategory(static_cast(AclCategory::READ)); + } + if (cmd.second->flag() & kCmdFlagsAdmin) { + cmd.second->AddAclCategory(static_cast(AclCategory::ADMIN) | + static_cast(AclCategory::DANGEROUS)); + } + if (cmd.second->flag() & kCmdFlagsPubSub) { + cmd.second->AddAclCategory(static_cast(AclCategory::PUBSUB)); + } + if (cmd.second->flag() & kCmdFlagsFast) { + cmd.second->AddAclCategory(static_cast(AclCategory::FAST)); + } + if (cmd.second->flag() & kCmdFlagsSlow) { + cmd.second->AddAclCategory(static_cast(AclCategory::SLOW)); + } + } + + CommandStatistics statistics; + for (auto& iter : *cmds_) { + cmdstat_map_.emplace(iter.first, statistics); + iter.second->SetCmdId(cmdId_++); } - DestoryCmdTable(cmds_); - delete cmds_; } -std::shared_ptr PikaCmdTableManager::GetCmd(const std::string& opt) { - std::string internal_opt = opt; - if (!g_pika_conf->classic_mode()) { - TryChangeToAlias(&internal_opt); +void PikaCmdTableManager::RenameCommand(const std::string before, const std::string after) { + auto it = cmds_->find(before); + if (it != cmds_->end()) { + if (after.length() > 0) { + cmds_->insert(std::pair>(after, std::move(it->second))); + } else { + LOG(ERROR) << "The value of rename-command is null"; + } + cmds_->erase(it); } +} + +std::unordered_map* PikaCmdTableManager::GetCommandStatMap() { + return &cmdstat_map_; +} + +std::shared_ptr PikaCmdTableManager::GetCmd(const std::string& opt) { + const std::string& internal_opt = opt; return NewCommand(internal_opt); } std::shared_ptr PikaCmdTableManager::NewCommand(const std::string& opt) { - Cmd* cmd = GetCmdFromTable(opt, *cmds_); + Cmd* cmd = GetCmdFromDB(opt, *cmds_); if (cmd) { return std::shared_ptr(cmd->Clone()); } return nullptr; } -void PikaCmdTableManager::TryChangeToAlias(std::string *internal_opt) { - if (!strcasecmp(internal_opt->c_str(), kCmdNameSlaveof.c_str())) { - *internal_opt = kCmdNamePkClusterSlotsSlaveof; - } -} +CmdTable* PikaCmdTableManager::GetCmdTable() { return cmds_.get(); } -bool PikaCmdTableManager::CheckCurrentThreadDistributionMapExist(const pid_t& tid) { - slash::RWLock l(&map_protector_, false); - if (thread_distribution_map_.find(tid) == thread_distribution_map_.end()) { - return false; - } - return true; +uint32_t PikaCmdTableManager::GetMaxCmdId() { return cmdId_; } + +bool PikaCmdTableManager::CheckCurrentThreadDistributionMapExist(const std::thread::id& tid) { + std::shared_lock l(map_protector_); + return thread_distribution_map_.find(tid) != thread_distribution_map_.end(); } void PikaCmdTableManager::InsertCurrentThreadDistributionMap() { - pid_t tid = gettid(); - PikaDataDistribution* distribution = nullptr; - if (g_pika_conf->classic_mode()) { - distribution = new HashModulo(); - } else { - distribution = new Crc32(); - } + auto tid = std::this_thread::get_id(); + std::unique_ptr distribution = std::make_unique(); distribution->Init(); - slash::RWLock l(&map_protector_, true); - thread_distribution_map_.insert(std::make_pair(tid, distribution)); + std::lock_guard l(map_protector_); + thread_distribution_map_.emplace(tid, std::move(distribution)); } -uint32_t PikaCmdTableManager::DistributeKey(const std::string& key, uint32_t partition_num) { - pid_t tid = gettid(); - PikaDataDistribution* data_dist = nullptr; - if (!CheckCurrentThreadDistributionMapExist(tid)) { - InsertCurrentThreadDistributionMap(); - } +bool PikaCmdTableManager::CmdExist(const std::string& cmd) const { return cmds_->find(cmd) != cmds_->end(); } - slash::RWLock l(&map_protector_, false); - data_dist = thread_distribution_map_[tid]; - return data_dist->Distribute(key, partition_num); +std::vector PikaCmdTableManager::GetAclCategoryCmdNames(uint32_t flag) { + std::vector result; + for (const auto& item : (*cmds_)) { + if (item.second->AclCategory() & flag) { + result.emplace_back(item.first); + } + } + return result; } diff --git a/tools/pika_migrate/src/pika_command.cc b/tools/pika_migrate/src/pika_command.cc index 5e40cf6416..63199c3481 100644 --- a/tools/pika_migrate/src/pika_command.cc +++ b/tools/pika_migrate/src/pika_command.cc @@ -3,746 +3,1055 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#include "include/pika_command.h" +#include +#include -#include "include/pika_kv.h" +#include +#include "include/pika_acl.h" +#include "include/pika_admin.h" #include "include/pika_bit.h" -#include "include/pika_set.h" +#include "include/pika_command.h" #include "include/pika_geo.h" -#include "include/pika_list.h" -#include "include/pika_zset.h" #include "include/pika_hash.h" -#include "include/pika_admin.h" +#include "include/pika_hyperloglog.h" +#include "include/pika_kv.h" +#include "include/pika_list.h" #include "include/pika_pubsub.h" +#include "include/pika_rm.h" #include "include/pika_server.h" -#include "include/pika_hyperloglog.h" -#include "include/pika_slot.h" -#include "include/pika_cluster.h" +#include "include/pika_set.h" +#include "include/pika_slot_command.h" +#include "include/pika_stream.h" +#include "include/pika_transaction.h" +#include "include/pika_zset.h" +#include "pstd_defer.h" +#include "src/pstd/include/scope_record_lock.h" + +using pstd::Status; extern PikaServer* g_pika_server; +extern std::unique_ptr g_pika_rm; +extern std::unique_ptr g_pika_cmd_table_manager; -void InitCmdTable(std::unordered_map *cmd_table) { - //Admin +void InitCmdTable(CmdTable* cmd_table) { + // Admin ////Slaveof - Cmd* slaveofptr = new SlaveofCmd(kCmdNameSlaveof, -3, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlaveof, slaveofptr)); - Cmd* dbslaveofptr = new DbSlaveofCmd(kCmdNameDbSlaveof, -2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameDbSlaveof, dbslaveofptr)); - Cmd* authptr = new AuthCmd(kCmdNameAuth, 2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameAuth, authptr)); - Cmd* bgsaveptr = new BgsaveCmd(kCmdNameBgsave, -1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSuspend); - cmd_table->insert(std::pair(kCmdNameBgsave, bgsaveptr)); - Cmd* compactptr = new CompactCmd(kCmdNameCompact, -1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameCompact, compactptr)); - Cmd* purgelogsto = new PurgelogstoCmd(kCmdNamePurgelogsto, -2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNamePurgelogsto, purgelogsto)); - Cmd* pingptr = new PingCmd(kCmdNamePing, 1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNamePing, pingptr)); - Cmd* selectptr = new SelectCmd(kCmdNameSelect, 2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSelect, selectptr)); - Cmd* flushallptr = new FlushallCmd(kCmdNameFlushall, 1, kCmdFlagsWrite | kCmdFlagsSuspend | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameFlushall, flushallptr)); - Cmd* flushdbptr = new FlushdbCmd(kCmdNameFlushdb, -1, kCmdFlagsWrite | kCmdFlagsSuspend | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameFlushdb, flushdbptr)); - Cmd* clientptr = new ClientCmd(kCmdNameClient, -2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameClient, clientptr)); - Cmd* shutdownptr = new ShutdownCmd(kCmdNameShutdown, 1, kCmdFlagsRead | kCmdFlagsLocal | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameShutdown, shutdownptr)); - Cmd* infoptr = new InfoCmd(kCmdNameInfo, -1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameInfo, infoptr)); - Cmd* configptr = new ConfigCmd(kCmdNameConfig, -2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameConfig, configptr)); - Cmd* monitorptr = new MonitorCmd(kCmdNameMonitor, -1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameMonitor, monitorptr)); - Cmd* dbsizeptr = new DbsizeCmd(kCmdNameDbsize, 1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameDbsize, dbsizeptr)); - Cmd* timeptr = new TimeCmd(kCmdNameTime, 1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameTime, timeptr)); - Cmd* delbackupptr = new DelbackupCmd(kCmdNameDelbackup, 1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameDelbackup, delbackupptr)); - Cmd* echoptr = new EchoCmd(kCmdNameEcho, 2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameEcho, echoptr)); - Cmd* scandbptr = new ScandbCmd(kCmdNameScandb, -1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameScandb, scandbptr)); - Cmd* slowlogptr = new SlowlogCmd(kCmdNameSlowlog, -2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlowlog, slowlogptr)); - Cmd* paddingptr = new PaddingCmd(kCmdNamePadding, 2, kCmdFlagsWrite | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNamePadding, paddingptr)); - Cmd* pkpatternmatchdelptr = new PKPatternMatchDelCmd(kCmdNamePKPatternMatchDel, 3, kCmdFlagsWrite | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNamePKPatternMatchDel, pkpatternmatchdelptr)); + std::unique_ptr slaveofptr = + std::make_unique(kCmdNameSlaveof, -3, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSlaveof, std::move(slaveofptr))); - // Slots related - Cmd* slotsinfoptr = new SlotsInfoCmd(kCmdNameSlotsInfo, -1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlotsInfo, slotsinfoptr)); - Cmd* slotshashkeyptr = new SlotsHashKeyCmd(kCmdNameSlotsHashKey, -2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlotsHashKey, slotshashkeyptr)); - Cmd* slotmgrtslotasyncptr = new SlotsMgrtSlotAsyncCmd(kCmdNameSlotsMgrtSlotAsync, 8, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlotsMgrtSlotAsync, slotmgrtslotasyncptr)); - Cmd* slotmgrttagslotasyncptr = new SlotsMgrtTagSlotAsyncCmd(kCmdNameSlotsMgrtTagSlotAsync, 8, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlotsMgrtTagSlotAsync, slotmgrttagslotasyncptr)); - Cmd* slotsdelptr = new SlotsDelCmd(kCmdNameSlotsDel, -2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlotsDel, slotsdelptr)); - Cmd* slotsscanptr = new SlotsScanCmd(kCmdNameSlotsScan, -3, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlotsScan, slotsscanptr)); - Cmd* slotmgrtexecwrapper = new SlotsMgrtExecWrapperCmd(kCmdNameSlotsMgrtExecWrapper, -3, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlotsMgrtExecWrapper, slotmgrtexecwrapper)); - Cmd* slotmgrtasyncstatus = new SlotsMgrtAsyncStatusCmd(kCmdNameSlotsMgrtAsyncStatus, 1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlotsMgrtAsyncStatus, slotmgrtasyncstatus)); - Cmd* slotmgrtasynccancel = new SlotsMgrtAsyncCancelCmd(kCmdNameSlotsMgrtAsyncCancel, 1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlotsMgrtAsyncCancel, slotmgrtasynccancel)); - Cmd* slotmgrtslotptr = new SlotsMgrtSlotCmd(kCmdNameSlotsMgrtSlot, 5, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlotsMgrtSlot, slotmgrtslotptr)); - Cmd* slotmgrttagslotptr = new SlotsMgrtTagSlotCmd(kCmdNameSlotsMgrtTagSlot, 5, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlotsMgrtTagSlot, slotmgrttagslotptr)); - Cmd* slotmgrtoneptr = new SlotsMgrtOneCmd(kCmdNameSlotsMgrtOne, 5, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlotsMgrtOne, slotmgrtoneptr)); - Cmd* slotmgrttagoneptr = new SlotsMgrtTagOneCmd(kCmdNameSlotsMgrtTagOne, 5, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlotsMgrtTagOne, slotmgrttagoneptr)); - - // Cluster related - Cmd* pkclusterinfoptr = new PkClusterInfoCmd(kCmdNamePkClusterInfo, -3, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNamePkClusterInfo, pkclusterinfoptr)); - Cmd* pkclusteraddslotsptr = new PkClusterAddSlotsCmd(kCmdNamePkClusterAddSlots, 3, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNamePkClusterAddSlots, pkclusteraddslotsptr)); - Cmd* pkclusterdelslotsptr = new PkClusterDelSlotsCmd(kCmdNamePkClusterDelSlots, 3, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNamePkClusterDelSlots, pkclusterdelslotsptr)); - Cmd* pkclusterslotsslaveofptr = new PkClusterSlotsSlaveofCmd(kCmdNamePkClusterSlotsSlaveof, -5, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNamePkClusterSlotsSlaveof, pkclusterslotsslaveofptr)); - -#ifdef TCMALLOC_EXTENSION - Cmd* tcmallocptr = new TcmallocCmd(kCmdNameTcmalloc, -2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameTcmalloc, tcmallocptr)); + std::unique_ptr dbslaveofptr = + std::make_unique(kCmdNameDbSlaveof, -2, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameDbSlaveof, std::move(dbslaveofptr))); + + std::unique_ptr authptr = + std::make_unique(kCmdNameAuth, -2, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsNoAuth | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameAuth, std::move(authptr))); + + std::unique_ptr bgsaveptr = std::make_unique( + kCmdNameBgsave, -1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSuspend | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameBgsave, std::move(bgsaveptr))); + + std::unique_ptr compactptr = + std::make_unique(kCmdNameCompact, -1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow | kCmdFlagsSuspend); + cmd_table->insert(std::pair>(kCmdNameCompact, std::move(compactptr))); + + std::unique_ptr compactrangeptr = std::make_unique(kCmdNameCompactRange, 4, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSuspend); + cmd_table->insert(std::pair>(kCmdNameCompactRange, std::move(compactrangeptr))); + std::unique_ptr purgelogsto = + std::make_unique(kCmdNamePurgelogsto, -2, kCmdFlagsRead | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNamePurgelogsto, std::move(purgelogsto))); + + std::unique_ptr pingptr = + std::make_unique(kCmdNamePing, 1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePing, std::move(pingptr))); + + std::unique_ptr helloptr = + std::make_unique(kCmdNameHello, -1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsNoAuth | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameHello, std::move(helloptr))); + + std::unique_ptr selectptr = + std::make_unique(kCmdNameSelect, 2, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameSelect, std::move(selectptr))); + + std::unique_ptr flushallptr = std::make_unique( + kCmdNameFlushall, 1, kCmdFlagsWrite | kCmdFlagsSuspend | kCmdFlagsAdmin | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameFlushall, std::move(flushallptr))); + + std::unique_ptr flushdbptr = std::make_unique( + kCmdNameFlushdb, -1, kCmdFlagsWrite | kCmdFlagsSuspend | kCmdFlagsAdmin | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameFlushdb, std::move(flushdbptr))); + + std::unique_ptr clientptr = + std::make_unique(kCmdNameClient, -2, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameClient, std::move(clientptr))); + + std::unique_ptr shutdownptr = std::make_unique( + kCmdNameShutdown, 1, kCmdFlagsRead | kCmdFlagsLocal | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameShutdown, std::move(shutdownptr))); + + std::unique_ptr infoptr = + std::make_unique(kCmdNameInfo, -1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameInfo, std::move(infoptr))); + + std::unique_ptr configptr = + std::make_unique(kCmdNameConfig, -2, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameConfig, std::move(configptr))); + + std::unique_ptr monitorptr = + std::make_unique(kCmdNameMonitor, -1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameMonitor, std::move(monitorptr))); + + std::unique_ptr dbsizeptr = + std::make_unique(kCmdNameDbsize, 1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameDbsize, std::move(dbsizeptr))); + + std::unique_ptr timeptr = + std::make_unique(kCmdNameTime, 1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameTime, std::move(timeptr))); + + std::unique_ptr delbackupptr = + std::make_unique(kCmdNameDelbackup, 1, kCmdFlagsRead | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNameDelbackup, std::move(delbackupptr))); + + std::unique_ptr echoptr = + std::make_unique(kCmdNameEcho, 2, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameEcho, std::move(echoptr))); + + std::unique_ptr scandbptr = + std::make_unique(kCmdNameScandb, -1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameScandb, std::move(scandbptr))); + + std::unique_ptr slowlogptr = + std::make_unique(kCmdNameSlowlog, -2, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSlowlog, std::move(slowlogptr))); + + std::unique_ptr paddingptr = std::make_unique(kCmdNamePadding, 2, kCmdFlagsWrite | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNamePadding, std::move(paddingptr))); + + std::unique_ptr pkpatternmatchdelptr = + std::make_unique(kCmdNamePKPatternMatchDel, -2, kCmdFlagsWrite | kCmdFlagsAdmin); + cmd_table->insert( + std::pair>(kCmdNamePKPatternMatchDel, std::move(pkpatternmatchdelptr))); + std::unique_ptr dummyptr = std::make_unique(kCmdDummy, 0, kCmdFlagsWrite); + cmd_table->insert(std::pair>(kCmdDummy, std::move(dummyptr))); + + std::unique_ptr quitptr = + std::make_unique(kCmdNameQuit, 1, kCmdFlagsRead | kCmdFlagsNoAuth | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameQuit, std::move(quitptr))); + + std::unique_ptr diskrecoveryptr = + std::make_unique(kCmdNameDiskRecovery, 1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameDiskRecovery, std::move(diskrecoveryptr))); + + std::unique_ptr clearreplicationidptr = std::make_unique( + kCmdNameClearReplicationID, 1, kCmdFlagsWrite | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert( + std::pair>(kCmdNameClearReplicationID, std::move(clearreplicationidptr))); + std::unique_ptr disablewalptr = std::make_unique(kCmdNameDisableWal, 2, kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNameDisableWal, std::move(disablewalptr))); + std::unique_ptr cacheptr = std::make_unique(kCmdNameCache, -2, kCmdFlagsAdmin | kCmdFlagsRead); + cmd_table->insert(std::pair>(kCmdNameCache, std::move(cacheptr))); + std::unique_ptr clearcacheptr = std::make_unique(kCmdNameClearCache, 1, kCmdFlagsAdmin | kCmdFlagsWrite); + cmd_table->insert(std::pair>(kCmdNameClearCache, std::move(clearcacheptr))); + std::unique_ptr lastsaveptr = std::make_unique(kCmdNameLastSave, 1, kCmdFlagsAdmin | kCmdFlagsRead | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameLastSave, std::move(lastsaveptr))); + +#ifdef WITH_COMMAND_DOCS + std::unique_ptr commandptr = + std::make_unique(kCmdNameCommand, -1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameCommand, std::move(commandptr))); #endif - //Kv + // Slots related + std::unique_ptr slotsinfoptr = + std::make_unique(kCmdNameSlotsInfo, -1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSlotsInfo, std::move(slotsinfoptr))); + + std::unique_ptr slotmgrttagslotasyncptr = std::make_unique( + kCmdNameSlotsMgrtTagSlotAsync, 8, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert( + std::pair>(kCmdNameSlotsMgrtTagSlotAsync, std::move(slotmgrttagslotasyncptr))); + + std::unique_ptr slotmgrtasyncstatus = std::make_unique( + kCmdNameSlotsMgrtAsyncStatus, 1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert( + std::pair>(kCmdNameSlotsMgrtAsyncStatus, std::move(slotmgrtasyncstatus))); + + std::unique_ptr slotmgrtasynccancel = std::make_unique( + kCmdNameSlotsMgrtAsyncCancel, 1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert( + std::pair>(kCmdNameSlotsMgrtAsyncCancel, std::move(slotmgrtasynccancel))); + + std::unique_ptr slotmgrttagoneptr = + std::make_unique(kCmdNameSlotsMgrtTagOne, 5, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert( + std::pair>(kCmdNameSlotsMgrtTagOne, std::move(slotmgrttagoneptr))); + + std::unique_ptr slotmgrtoneptr = + std::make_unique(kCmdNameSlotsMgrtOne, 5, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSlotsMgrtOne, std::move(slotmgrtoneptr))); + + std::unique_ptr slotmgrttagslotptr = std::make_unique( + kCmdNameSlotsMgrtTagSlot, 5, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert( + std::pair>(kCmdNameSlotsMgrtTagSlot, std::move(slotmgrttagslotptr))); + + std::unique_ptr slotmgrttagslottagptr = + std::make_unique(kCmdNameSlotsMgrtSlot, 5, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert( + std::pair>(kCmdNameSlotsMgrtSlot, std::move(slotmgrttagslottagptr))); + + std::unique_ptr slotsdelptr = + std::make_unique(kCmdNameSlotsDel, -2, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSlotsDel, std::move(slotsdelptr))); + + std::unique_ptr slotshashkeyptr = + std::make_unique(kCmdNameSlotsHashKey, -2, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSlotsHashKey, std::move(slotshashkeyptr))); + + std::unique_ptr slotsscanptr = + std::make_unique(kCmdNameSlotsScan, -3, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSlotsScan, std::move(slotsscanptr))); + + std::unique_ptr slotsmgrtexecwrapper = std::make_unique( + kCmdNameSlotsMgrtExecWrapper, -3, kCmdFlagsWrite | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert( + std::pair>(kCmdNameSlotsMgrtExecWrapper, std::move(slotsmgrtexecwrapper))); + + std::unique_ptr slotsreloadptr = + std::make_unique(kCmdNameSlotsReload, 1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSlotsReload, std::move(slotsreloadptr))); + + std::unique_ptr slotsreloadoffptr = + std::make_unique(kCmdNameSlotsReloadOff, -1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSlotsReloadOff, std::move(slotsreloadoffptr))); + + std::unique_ptr slotscleanupptr = + std::make_unique(kCmdNameSlotsCleanup, -2, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSlotsCleanup, std::move(slotscleanupptr))); + + std::unique_ptr slotscleanupoffptr = + std::make_unique(kCmdNameSlotsCleanupOff, -1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert( + std::pair>(kCmdNameSlotsCleanupOff, std::move(slotscleanupoffptr))); + + // Kv ////SetCmd - Cmd* setptr = new SetCmd(kCmdNameSet, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameSet, setptr)); + std::unique_ptr setptr = + std::make_unique(kCmdNameSet, -3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameSet, std::move(setptr))); ////GetCmd - Cmd* getptr = new GetCmd(kCmdNameGet, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameGet, getptr)); + std::unique_ptr getptr = + std::make_unique(kCmdNameGet, 2, kCmdFlagsRead | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsReadCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameGet, std::move(getptr))); ////DelCmd - Cmd* delptr = new DelCmd(kCmdNameDel, -2, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameDel, delptr)); + std::unique_ptr delptr = + std::make_unique(kCmdNameDel, -2, kCmdFlagsWrite | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameDel, std::move(delptr))); + std::unique_ptr Unlinkptr = + std::make_unique(kCmdNameUnlink, -2, kCmdFlagsWrite | kCmdFlagsOperateKey | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameUnlink, std::move(Unlinkptr))); ////IncrCmd - Cmd* incrptr = new IncrCmd(kCmdNameIncr, 2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameIncr, incrptr)); + std::unique_ptr incrptr = + std::make_unique(kCmdNameIncr, 2, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameIncr, std::move(incrptr))); ////IncrbyCmd - Cmd* incrbyptr = new IncrbyCmd(kCmdNameIncrby, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameIncrby, incrbyptr)); + std::unique_ptr incrbyptr = std::make_unique( + kCmdNameIncrby, 3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameIncrby, std::move(incrbyptr))); ////IncrbyfloatCmd - Cmd* incrbyfloatptr = new IncrbyfloatCmd(kCmdNameIncrbyfloat, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameIncrbyfloat, incrbyfloatptr)); + std::unique_ptr incrbyfloatptr = std::make_unique( + kCmdNameIncrbyfloat, 3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameIncrbyfloat, std::move(incrbyfloatptr))); ////DecrCmd - Cmd* decrptr = new DecrCmd(kCmdNameDecr, 2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameDecr, decrptr)); + std::unique_ptr decrptr = + std::make_unique(kCmdNameDecr, 2, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameDecr, std::move(decrptr))); ////DecrbyCmd - Cmd* decrbyptr = new DecrbyCmd(kCmdNameDecrby, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameDecrby, decrbyptr)); + std::unique_ptr decrbyptr = std::make_unique( + kCmdNameDecrby, 3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameDecrby, std::move(decrbyptr))); ////GetsetCmd - Cmd* getsetptr = new GetsetCmd(kCmdNameGetset, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameGetset, getsetptr)); + std::unique_ptr getsetptr = std::make_unique( + kCmdNameGetset, 3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameGetset, std::move(getsetptr))); ////AppendCmd - Cmd* appendptr = new AppendCmd(kCmdNameAppend, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameAppend, appendptr)); + std::unique_ptr appendptr = std::make_unique( + kCmdNameAppend, 3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameAppend, std::move(appendptr))); ////MgetCmd - Cmd* mgetptr = new MgetCmd(kCmdNameMget, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameMget, mgetptr)); + std::unique_ptr mgetptr = + std::make_unique(kCmdNameMget, -2, kCmdFlagsRead | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsReadCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameMget, std::move(mgetptr))); ////KeysCmd - Cmd* keysptr = new KeysCmd(kCmdNameKeys, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameKeys, keysptr)); + std::unique_ptr keysptr = + std::make_unique(kCmdNameKeys, -2, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameKeys, std::move(keysptr))); ////SetnxCmd - Cmd* setnxptr = new SetnxCmd(kCmdNameSetnx, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameSetnx, setnxptr)); + std::unique_ptr setnxptr = + std::make_unique(kCmdNameSetnx, 3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameSetnx, std::move(setnxptr))); ////SetexCmd - Cmd* setexptr = new SetexCmd(kCmdNameSetex, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameSetex, setexptr)); + std::unique_ptr setexptr = + std::make_unique(kCmdNameSetex, 4, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSetex, std::move(setexptr))); ////PsetexCmd - Cmd* psetexptr = new PsetexCmd(kCmdNamePsetex, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNamePsetex, psetexptr)); + std::unique_ptr psetexptr = + std::make_unique(kCmdNamePsetex, 4, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNamePsetex, std::move(psetexptr))); ////DelvxCmd - Cmd* delvxptr = new DelvxCmd(kCmdNameDelvx, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameDelvx, delvxptr)); + std::unique_ptr delvxptr = + std::make_unique(kCmdNameDelvx, 3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameDelvx, std::move(delvxptr))); ////MSetCmd - Cmd* msetptr = new MsetCmd(kCmdNameMset, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameMset, msetptr)); + std::unique_ptr msetptr = + std::make_unique(kCmdNameMset, -3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameMset, std::move(msetptr))); ////MSetnxCmd - Cmd* msetnxptr = new MsetnxCmd(kCmdNameMsetnx, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameMsetnx, msetnxptr)); + std::unique_ptr msetnxptr = std::make_unique( + kCmdNameMsetnx, -3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameMsetnx, std::move(msetnxptr))); ////GetrangeCmd - Cmd* getrangeptr = new GetrangeCmd(kCmdNameGetrange, 4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameGetrange, getrangeptr)); + std::unique_ptr getrangeptr = std::make_unique( + kCmdNameGetrange, 4, kCmdFlagsRead | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsReadCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameGetrange, std::move(getrangeptr))); ////SetrangeCmd - Cmd* setrangeptr = new SetrangeCmd(kCmdNameSetrange, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameSetrange, setrangeptr)); + std::unique_ptr setrangeptr = std::make_unique( + kCmdNameSetrange, 4, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSetrange, std::move(setrangeptr))); ////StrlenCmd - Cmd* strlenptr = new StrlenCmd(kCmdNameStrlen, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameStrlen, strlenptr)); + std::unique_ptr strlenptr = + std::make_unique(kCmdNameStrlen, 2, kCmdFlagsRead | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsReadCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameStrlen, std::move(strlenptr))); ////ExistsCmd - Cmd* existsptr = new ExistsCmd(kCmdNameExists, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameExists, existsptr)); + std::unique_ptr existsptr = + std::make_unique(kCmdNameExists, -2, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameExists, std::move(existsptr))); ////ExpireCmd - Cmd* expireptr = new ExpireCmd(kCmdNameExpire, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameExpire, expireptr)); + std::unique_ptr expireptr = std::make_unique( + kCmdNameExpire, 3, kCmdFlagsWrite | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameExpire, std::move(expireptr))); ////PexpireCmd - Cmd* pexpireptr = new PexpireCmd(kCmdNamePexpire, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNamePexpire, pexpireptr)); + std::unique_ptr pexpireptr = std::make_unique( + kCmdNamePexpire, 3, kCmdFlagsWrite | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePexpire, std::move(pexpireptr))); ////ExpireatCmd - Cmd* expireatptr = new ExpireatCmd(kCmdNameExpireat, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameExpireat, expireatptr)); + std::unique_ptr expireatptr = + std::make_unique(kCmdNameExpireat, 3, kCmdFlagsWrite | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameExpireat, std::move(expireatptr))); ////PexpireatCmd - Cmd* pexpireatptr = new PexpireatCmd(kCmdNamePexpireat, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNamePexpireat, pexpireatptr)); + std::unique_ptr pexpireatptr = + std::make_unique(kCmdNamePexpireat, 3, kCmdFlagsWrite | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePexpireat, std::move(pexpireatptr))); ////TtlCmd - Cmd* ttlptr = new TtlCmd(kCmdNameTtl, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameTtl, ttlptr)); + std::unique_ptr ttlptr = + std::make_unique(kCmdNameTtl, 2, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameTtl, std::move(ttlptr))); ////PttlCmd - Cmd* pttlptr = new PttlCmd(kCmdNamePttl, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNamePttl, pttlptr)); + std::unique_ptr pttlptr = + std::make_unique(kCmdNamePttl, 2, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePttl, std::move(pttlptr))); ////PersistCmd - Cmd* persistptr = new PersistCmd(kCmdNamePersist, 2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNamePersist, persistptr)); + std::unique_ptr persistptr = + std::make_unique(kCmdNamePersist, 2, kCmdFlagsWrite | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePersist, std::move(persistptr))); ////TypeCmd - Cmd* typeptr = new TypeCmd(kCmdNameType, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameType, typeptr)); + std::unique_ptr typeptr = + std::make_unique(kCmdNameType, 2, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameType, std::move(typeptr))); ////ScanCmd - Cmd* scanptr = new ScanCmd(kCmdNameScan, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameScan, scanptr)); + std::unique_ptr scanptr = + std::make_unique(kCmdNameScan, -2, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameScan, std::move(scanptr))); ////ScanxCmd - Cmd* scanxptr = new ScanxCmd(kCmdNameScanx, -3, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameScanx, scanxptr)); + std::unique_ptr scanxptr = + std::make_unique(kCmdNameScanx, -3, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameScanx, std::move(scanxptr))); ////PKSetexAtCmd - Cmd* pksetexatptr = new PKSetexAtCmd(kCmdNamePKSetexAt, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNamePKSetexAt, pksetexatptr)); + std::unique_ptr pksetexatptr = std::make_unique( + kCmdNamePKSetexAt, 4, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNamePKSetexAt, std::move(pksetexatptr))); ////PKScanRange - Cmd* pkscanrangeptr = new PKScanRangeCmd(kCmdNamePKScanRange, -4, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNamePKScanRange, pkscanrangeptr)); + std::unique_ptr pkscanrangeptr = std::make_unique( + kCmdNamePKScanRange, -4, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNamePKScanRange, std::move(pkscanrangeptr))); ////PKRScanRange - Cmd* pkrscanrangeptr = new PKRScanRangeCmd(kCmdNamePKRScanRange, -4, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNamePKRScanRange, pkrscanrangeptr)); + std::unique_ptr pkrscanrangeptr = std::make_unique( + kCmdNamePKRScanRange, -4, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNamePKRScanRange, std::move(pkrscanrangeptr))); - //Hash + // Hash ////HDelCmd - Cmd* hdelptr = new HDelCmd(kCmdNameHDel, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHDel, hdelptr)); + std::unique_ptr hdelptr = + std::make_unique(kCmdNameHDel, -3, kCmdFlagsWrite | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameHDel, std::move(hdelptr))); ////HSetCmd - Cmd* hsetptr = new HSetCmd(kCmdNameHSet, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHSet, hsetptr)); + std::unique_ptr hsetptr = + std::make_unique(kCmdNameHSet, 4, kCmdFlagsWrite | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameHSet, std::move(hsetptr))); ////HGetCmd - Cmd* hgetptr = new HGetCmd(kCmdNameHGet, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHGet, hgetptr)); + std::unique_ptr hgetptr = + std::make_unique(kCmdNameHGet, 3, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache |kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameHGet, std::move(hgetptr))); ////HGetallCmd - Cmd* hgetallptr = new HGetallCmd(kCmdNameHGetall, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHGetall, hgetallptr)); + std::unique_ptr hgetallptr = + std::make_unique(kCmdNameHGetall, 2, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsSlow | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache); + cmd_table->insert(std::pair>(kCmdNameHGetall, std::move(hgetallptr))); ////HExistsCmd - Cmd* hexistsptr = new HExistsCmd(kCmdNameHExists, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHExists, hexistsptr)); + std::unique_ptr hexistsptr = + std::make_unique(kCmdNameHExists, 3, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast ); + cmd_table->insert(std::pair>(kCmdNameHExists, std::move(hexistsptr))); ////HIncrbyCmd - Cmd* hincrbyptr = new HIncrbyCmd(kCmdNameHIncrby, 4, kCmdFlagsWrite |kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHIncrby, hincrbyptr)); + std::unique_ptr hincrbyptr = + std::make_unique(kCmdNameHIncrby, 4, kCmdFlagsWrite | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameHIncrby, std::move(hincrbyptr))); ////HIncrbyfloatCmd - Cmd* hincrbyfloatptr = new HIncrbyfloatCmd(kCmdNameHIncrbyfloat, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHIncrbyfloat, hincrbyfloatptr)); + std::unique_ptr hincrbyfloatptr = + std::make_unique(kCmdNameHIncrbyfloat, 4, kCmdFlagsWrite | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameHIncrbyfloat, std::move(hincrbyfloatptr))); ////HKeysCmd - Cmd* hkeysptr = new HKeysCmd(kCmdNameHKeys, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHKeys, hkeysptr)); + std::unique_ptr hkeysptr = + std::make_unique(kCmdNameHKeys, 2, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameHKeys, std::move(hkeysptr))); ////HLenCmd - Cmd* hlenptr = new HLenCmd(kCmdNameHLen, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHLen, hlenptr)); + std::unique_ptr hlenptr = + std::make_unique(kCmdNameHLen, 2, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameHLen, std::move(hlenptr))); ////HMgetCmd - Cmd* hmgetptr = new HMgetCmd(kCmdNameHMget, -3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHMget, hmgetptr)); + std::unique_ptr hmgetptr = + std::make_unique(kCmdNameHMget, -3, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache |kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameHMget, std::move(hmgetptr))); ////HMsetCmd - Cmd* hmsetptr = new HMsetCmd(kCmdNameHMset, -4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHMset, hmsetptr)); + std::unique_ptr hmsetptr = + std::make_unique(kCmdNameHMset, -4, kCmdFlagsWrite | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameHMset, std::move(hmsetptr))); ////HSetnxCmd - Cmd* hsetnxptr = new HSetnxCmd(kCmdNameHSetnx, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHSetnx, hsetnxptr)); + std::unique_ptr hsetnxptr = + std::make_unique(kCmdNameHSetnx, 4, kCmdFlagsWrite | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameHSetnx, std::move(hsetnxptr))); ////HStrlenCmd - Cmd* hstrlenptr = new HStrlenCmd(kCmdNameHStrlen, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHStrlen, hstrlenptr)); + std::unique_ptr hstrlenptr = + std::make_unique(kCmdNameHStrlen, 3, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameHStrlen, std::move(hstrlenptr))); ////HValsCmd - Cmd* hvalsptr = new HValsCmd(kCmdNameHVals, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHVals, hvalsptr)); + std::unique_ptr hvalsptr = + std::make_unique(kCmdNameHVals, 2, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsSlow | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache); + cmd_table->insert(std::pair>(kCmdNameHVals, std::move(hvalsptr))); ////HScanCmd - Cmd* hscanptr = new HScanCmd(kCmdNameHScan, -3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHScan, hscanptr)); + std::unique_ptr hscanptr = std::make_unique( + kCmdNameHScan, -3, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameHScan, std::move(hscanptr))); ////HScanxCmd - Cmd* hscanxptr = new HScanxCmd(kCmdNameHScanx, -3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHScanx, hscanxptr)); + std::unique_ptr hscanxptr = std::make_unique( + kCmdNameHScanx, -3, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameHScanx, std::move(hscanxptr))); ////PKHScanRange - Cmd* pkhscanrangeptr = new PKHScanRangeCmd(kCmdNamePKHScanRange, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNamePKHScanRange, pkhscanrangeptr)); + std::unique_ptr pkhscanrangeptr = std::make_unique( + kCmdNamePKHScanRange, -4, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNamePKHScanRange, std::move(pkhscanrangeptr))); ////PKHRScanRange - Cmd* pkhrscanrangeptr = new PKHRScanRangeCmd(kCmdNamePKHRScanRange, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNamePKHRScanRange, pkhrscanrangeptr)); - - //List - Cmd* lindexptr = new LIndexCmd(kCmdNameLIndex, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLIndex, lindexptr)); - Cmd* linsertptr = new LInsertCmd(kCmdNameLInsert, 5, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLInsert, linsertptr)); - Cmd* llenptr = new LLenCmd(kCmdNameLLen, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLLen, llenptr)); - Cmd* lpopptr = new LPopCmd(kCmdNameLPop, 2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLPop, lpopptr)); - Cmd* lpushptr = new LPushCmd(kCmdNameLPush, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLPush, lpushptr)); - Cmd* lpushxptr = new LPushxCmd(kCmdNameLPushx, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLPushx, lpushxptr)); - Cmd* lrangeptr = new LRangeCmd(kCmdNameLRange, 4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLRange, lrangeptr)); - Cmd* lremptr = new LRemCmd(kCmdNameLRem, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLRem, lremptr)); - Cmd* lsetptr = new LSetCmd(kCmdNameLSet, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLSet, lsetptr)); - Cmd* ltrimptr = new LTrimCmd(kCmdNameLTrim, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLTrim, ltrimptr)); - Cmd* rpopptr = new RPopCmd(kCmdNameRPop, 2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameRPop, rpopptr)); - Cmd* rpoplpushptr = new RPopLPushCmd(kCmdNameRPopLPush, 3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameRPopLPush, rpoplpushptr)); - Cmd* rpushptr = new RPushCmd(kCmdNameRPush, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameRPush, rpushptr)); - Cmd* rpushxptr = new RPushxCmd(kCmdNameRPushx, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameRPushx, rpushxptr)); - - //Zset + std::unique_ptr pkhrscanrangeptr = std::make_unique( + kCmdNamePKHRScanRange, -4, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNamePKHRScanRange, std::move(pkhrscanrangeptr))); + + // List + std::unique_ptr lindexptr = + std::make_unique(kCmdNameLIndex, 3, kCmdFlagsRead | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameLIndex, std::move(lindexptr))); + std::unique_ptr linsertptr = + std::make_unique(kCmdNameLInsert, 5, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameLInsert, std::move(linsertptr))); + + std::unique_ptr llenptr = + std::make_unique(kCmdNameLLen, 2, kCmdFlagsRead | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameLLen, std::move(llenptr))); + std::unique_ptr blpopptr = std::make_unique( + kCmdNameBLPop, -3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameBLPop, std::move(blpopptr))); + + std::unique_ptr lpopptr = + std::make_unique(kCmdNameLPop, -2, kCmdFlagsWrite | kCmdFlagsList |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameLPop, std::move(lpopptr))); + + std::unique_ptr lpushptr = std::make_unique( + kCmdNameLPush, -3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameLPush, std::move(lpushptr))); + + std::unique_ptr lpushxptr = std::make_unique(kCmdNameLPushx, -3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameLPushx, std::move(lpushxptr))); + + std::unique_ptr lrangeptr = std::make_unique( + kCmdNameLRange, 4, kCmdFlagsRead | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameLRange, std::move(lrangeptr))); + std::unique_ptr lremptr = + std::make_unique(kCmdNameLRem, 4, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameLRem, std::move(lremptr))); + std::unique_ptr lsetptr = + std::make_unique(kCmdNameLSet, 4, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameLSet, std::move(lsetptr))); + std::unique_ptr ltrimptr = + std::make_unique(kCmdNameLTrim, 4, kCmdFlagsWrite | kCmdFlagsList |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameLTrim, std::move(ltrimptr))); + + std::unique_ptr brpopptr = std::make_unique( + kCmdNameBRpop, -3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameBRpop, std::move(brpopptr))); + std::unique_ptr rpopptr = + std::make_unique(kCmdNameRPop, -2, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameRPop, std::move(rpopptr))); + std::unique_ptr rpoplpushptr = std::make_unique( + kCmdNameRPopLPush, 3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameRPopLPush, std::move(rpoplpushptr))); + std::unique_ptr rpushptr = + std::make_unique(kCmdNameRPush, -3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameRPush, std::move(rpushptr))); + std::unique_ptr rpushxptr = + std::make_unique(kCmdNameRPushx, -3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameRPushx, std::move(rpushxptr))); + + // Zset ////ZAddCmd - Cmd* zaddptr = new ZAddCmd(kCmdNameZAdd, -4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZAdd, zaddptr)); + std::unique_ptr zaddptr = + std::make_unique(kCmdNameZAdd, -4, kCmdFlagsWrite | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameZAdd, std::move(zaddptr))); ////ZCardCmd - Cmd* zcardptr = new ZCardCmd(kCmdNameZCard, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZCard, zcardptr)); + std::unique_ptr zcardptr = + std::make_unique(kCmdNameZCard, 2, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameZCard, std::move(zcardptr))); ////ZScanCmd - Cmd* zscanptr = new ZScanCmd(kCmdNameZScan, -3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZScan, zscanptr)); + std::unique_ptr zscanptr = std::make_unique( + kCmdNameZScan, -3, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameZScan, std::move(zscanptr))); ////ZIncrbyCmd - Cmd* zincrbyptr = new ZIncrbyCmd(kCmdNameZIncrby, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZIncrby, zincrbyptr)); + std::unique_ptr zincrbyptr = + std::make_unique(kCmdNameZIncrby, 4, kCmdFlagsWrite | kCmdFlagsZset | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast) ; + cmd_table->insert(std::pair>(kCmdNameZIncrby, std::move(zincrbyptr))); ////ZRangeCmd - Cmd* zrangeptr = new ZRangeCmd(kCmdNameZRange, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRange, zrangeptr)); + std::unique_ptr zrangeptr = + std::make_unique(kCmdNameZRange, -4, kCmdFlagsRead | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameZRange, std::move(zrangeptr))); ////ZRevrangeCmd - Cmd* zrevrangeptr = new ZRevrangeCmd(kCmdNameZRevrange, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRevrange, zrevrangeptr)); + std::unique_ptr zrevrangeptr = + std::make_unique(kCmdNameZRevrange, -4, kCmdFlagsRead | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameZRevrange, std::move(zrevrangeptr))); ////ZRangebyscoreCmd - Cmd* zrangebyscoreptr = new ZRangebyscoreCmd(kCmdNameZRangebyscore, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRangebyscore, zrangebyscoreptr)); + std::unique_ptr zrangebyscoreptr = std::make_unique( + kCmdNameZRangebyscore, -4, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameZRangebyscore, std::move(zrangebyscoreptr))); ////ZRevrangebyscoreCmd - Cmd* zrevrangebyscoreptr = new ZRevrangebyscoreCmd(kCmdNameZRevrangebyscore, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRevrangebyscore, zrevrangebyscoreptr)); + std::unique_ptr zrevrangebyscoreptr = std::make_unique( + kCmdNameZRevrangebyscore, -4, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsSlow); + cmd_table->insert( + std::pair>(kCmdNameZRevrangebyscore, std::move(zrevrangebyscoreptr))); ////ZCountCmd - Cmd* zcountptr = new ZCountCmd(kCmdNameZCount, 4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZCount, zcountptr)); + std::unique_ptr zcountptr = + std::make_unique(kCmdNameZCount, 4, kCmdFlagsRead | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameZCount, std::move(zcountptr))); ////ZRemCmd - Cmd* zremptr = new ZRemCmd(kCmdNameZRem, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRem, zremptr)); + std::unique_ptr zremptr = + std::make_unique(kCmdNameZRem, -3, kCmdFlagsWrite | kCmdFlagsZset | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameZRem, std::move(zremptr))); ////ZUnionstoreCmd - Cmd* zunionstoreptr = new ZUnionstoreCmd(kCmdNameZUnionstore, -4, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZUnionstore, zunionstoreptr)); + std::unique_ptr zunionstoreptr = + std::make_unique(kCmdNameZUnionstore, -4, kCmdFlagsWrite | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameZUnionstore, std::move(zunionstoreptr))); ////ZInterstoreCmd - Cmd* zinterstoreptr = new ZInterstoreCmd(kCmdNameZInterstore, -4, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZInterstore, zinterstoreptr)); + std::unique_ptr zinterstoreptr = + std::make_unique(kCmdNameZInterstore, -4, kCmdFlagsWrite | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameZInterstore, std::move(zinterstoreptr))); ////ZRankCmd - Cmd* zrankptr = new ZRankCmd(kCmdNameZRank, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRank, zrankptr)); + std::unique_ptr zrankptr = + std::make_unique(kCmdNameZRank, 3, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameZRank, std::move(zrankptr))); ////ZRevrankCmd - Cmd* zrevrankptr = new ZRevrankCmd(kCmdNameZRevrank, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRevrank, zrevrankptr)); + std::unique_ptr zrevrankptr = + std::make_unique(kCmdNameZRevrank, 3, kCmdFlagsRead | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameZRevrank, std::move(zrevrankptr))); ////ZScoreCmd - Cmd* zscoreptr = new ZScoreCmd(kCmdNameZScore, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZScore, zscoreptr)); + std::unique_ptr zscoreptr = + std::make_unique(kCmdNameZScore, 3, kCmdFlagsRead | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameZScore, std::move(zscoreptr))); ////ZRangebylexCmd - Cmd* zrangebylexptr = new ZRangebylexCmd(kCmdNameZRangebylex, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRangebylex, zrangebylexptr)); + std::unique_ptr zrangebylexptr = + std::make_unique(kCmdNameZRangebylex, -4, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameZRangebylex, std::move(zrangebylexptr))); ////ZRevrangebylexCmd - Cmd* zrevrangebylexptr = new ZRevrangebylexCmd(kCmdNameZRevrangebylex, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRevrangebylex, zrevrangebylexptr)); + std::unique_ptr zrevrangebylexptr = std::make_unique( + kCmdNameZRevrangebylex, -4, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameZRevrangebylex, std::move(zrevrangebylexptr))); ////ZLexcountCmd - Cmd* zlexcountptr = new ZLexcountCmd(kCmdNameZLexcount, 4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZLexcount, zlexcountptr)); + std::unique_ptr zlexcountptr = + std::make_unique(kCmdNameZLexcount, 4, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameZLexcount, std::move(zlexcountptr))); ////ZRemrangebyrankCmd - Cmd* zremrangebyrankptr = new ZRemrangebyrankCmd(kCmdNameZRemrangebyrank, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRemrangebyrank, zremrangebyrankptr)); + std::unique_ptr zremrangebyrankptr = std::make_unique( + kCmdNameZRemrangebyrank, 4, kCmdFlagsWrite | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert( + std::pair>(kCmdNameZRemrangebyrank, std::move(zremrangebyrankptr))); ////ZRemrangebyscoreCmd - Cmd* zremrangebyscoreptr = new ZRemrangebyscoreCmd(kCmdNameZRemrangebyscore, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRemrangebyscore, zremrangebyscoreptr)); + std::unique_ptr zremrangebyscoreptr = std::make_unique( + kCmdNameZRemrangebyscore, 4, kCmdFlagsWrite | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert( + std::pair>(kCmdNameZRemrangebyscore, std::move(zremrangebyscoreptr))); ////ZRemrangebylexCmd - Cmd* zremrangebylexptr = new ZRemrangebylexCmd(kCmdNameZRemrangebylex, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRemrangebylex, zremrangebylexptr)); + std::unique_ptr zremrangebylexptr = std::make_unique( + kCmdNameZRemrangebylex, 4, kCmdFlagsWrite | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameZRemrangebylex, std::move(zremrangebylexptr))); ////ZPopmax - Cmd* zpopmaxptr = new ZPopmaxCmd(kCmdNameZPopmax, -2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZPopmax, zpopmaxptr)); + std::unique_ptr zpopmaxptr = std::make_unique( + kCmdNameZPopmax, -2, kCmdFlagsWrite | kCmdFlagsZset | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameZPopmax, std::move(zpopmaxptr))); ////ZPopmin - Cmd* zpopminptr = new ZPopminCmd(kCmdNameZPopmin, -2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZPopmin, zpopminptr)); + std::unique_ptr zpopminptr = std::make_unique( + kCmdNameZPopmin, -2, kCmdFlagsWrite | kCmdFlagsZset | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameZPopmin, std::move(zpopminptr))); - //Set + // Set ////SAddCmd - Cmd* saddptr = new SAddCmd(kCmdNameSAdd, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSAdd, saddptr)); + std::unique_ptr saddptr = + std::make_unique(kCmdNameSAdd, -3, kCmdFlagsWrite | kCmdFlagsSet |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameSAdd, std::move(saddptr))); ////SPopCmd - Cmd* spopptr = new SPopCmd(kCmdNameSPop, 2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSPop, spopptr)); + std::unique_ptr spopptr = + std::make_unique(kCmdNameSPop, -2, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameSPop, std::move(spopptr))); ////SCardCmd - Cmd* scardptr = new SCardCmd(kCmdNameSCard, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSCard, scardptr)); + std::unique_ptr scardptr = + std::make_unique(kCmdNameSCard, 2, kCmdFlagsRead | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameSCard, std::move(scardptr))); ////SMembersCmd - Cmd* smembersptr = new SMembersCmd(kCmdNameSMembers, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSMembers, smembersptr)); + std::unique_ptr smembersptr = + std::make_unique(kCmdNameSMembers, 2, kCmdFlagsRead | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSMembers, std::move(smembersptr))); ////SScanCmd - Cmd* sscanptr = new SScanCmd(kCmdNameSScan, -3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSScan, sscanptr)); + std::unique_ptr sscanptr = + std::make_unique(kCmdNameSScan, -3, kCmdFlagsRead | kCmdFlagsSet | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSScan, std::move(sscanptr))); ////SRemCmd - Cmd* sremptr = new SRemCmd(kCmdNameSRem, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSRem, sremptr)); + std::unique_ptr sremptr = + std::make_unique(kCmdNameSRem, -3, kCmdFlagsWrite | kCmdFlagsSet |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameSRem, std::move(sremptr))); ////SUnionCmd - Cmd* sunionptr = new SUnionCmd(kCmdNameSUnion, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSUnion, sunionptr)); + std::unique_ptr sunionptr = std::make_unique( + kCmdNameSUnion, -2, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSUnion, std::move(sunionptr))); ////SUnionstoreCmd - Cmd* sunionstoreptr = new SUnionstoreCmd(kCmdNameSUnionstore, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSUnionstore, sunionstoreptr)); + std::unique_ptr sunionstoreptr = + std::make_unique(kCmdNameSUnionstore, -3, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSUnionstore, std::move(sunionstoreptr))); ////SInterCmd - Cmd* sinterptr = new SInterCmd(kCmdNameSInter, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSInter, sinterptr)); + std::unique_ptr sinterptr = std::make_unique( + kCmdNameSInter, -2, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSInter, std::move(sinterptr))); ////SInterstoreCmd - Cmd* sinterstoreptr = new SInterstoreCmd(kCmdNameSInterstore, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSInterstore, sinterstoreptr)); + std::unique_ptr sinterstoreptr = + std::make_unique(kCmdNameSInterstore, -3, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSInterstore, std::move(sinterstoreptr))); ////SIsmemberCmd - Cmd* sismemberptr = new SIsmemberCmd(kCmdNameSIsmember, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSIsmember, sismemberptr)); + std::unique_ptr sismemberptr = + std::make_unique(kCmdNameSIsmember, 3, kCmdFlagsRead | kCmdFlagsSet |kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameSIsmember, std::move(sismemberptr))); ////SDiffCmd - Cmd* sdiffptr = new SDiffCmd(kCmdNameSDiff, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSDiff, sdiffptr)); + std::unique_ptr sdiffptr = + std::make_unique(kCmdNameSDiff, -2, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSDiff, std::move(sdiffptr))); ////SDiffstoreCmd - Cmd* sdiffstoreptr = new SDiffstoreCmd(kCmdNameSDiffstore, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSDiffstore, sdiffstoreptr)); + std::unique_ptr sdiffstoreptr = + std::make_unique(kCmdNameSDiffstore, -3, kCmdFlagsWrite | kCmdFlagsSet |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSDiffstore, std::move(sdiffstoreptr))); ////SMoveCmd - Cmd* smoveptr = new SMoveCmd(kCmdNameSMove, 4, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSMove, smoveptr)); + std::unique_ptr smoveptr = + std::make_unique(kCmdNameSMove, 4, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameSMove, std::move(smoveptr))); ////SRandmemberCmd - Cmd* srandmemberptr = new SRandmemberCmd(kCmdNameSRandmember, -2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSRandmember, srandmemberptr)); + std::unique_ptr srandmemberptr = + std::make_unique(kCmdNameSRandmember, -2, kCmdFlagsRead | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSRandmember, std::move(srandmemberptr))); - //BitMap + // BitMap ////bitsetCmd - Cmd* bitsetptr = new BitSetCmd(kCmdNameBitSet, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsBit); - cmd_table->insert(std::pair(kCmdNameBitSet, bitsetptr)); + std::unique_ptr bitsetptr = + std::make_unique(kCmdNameBitSet, 4, kCmdFlagsWrite | kCmdFlagsBit | kCmdFlagsSlow | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache); + cmd_table->insert(std::pair>(kCmdNameBitSet, std::move(bitsetptr))); ////bitgetCmd - Cmd* bitgetptr = new BitGetCmd(kCmdNameBitGet, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsBit); - cmd_table->insert(std::pair(kCmdNameBitGet, bitgetptr)); + std::unique_ptr bitgetptr = + std::make_unique(kCmdNameBitGet, 3, kCmdFlagsRead | kCmdFlagsBit | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameBitGet, std::move(bitgetptr))); ////bitcountCmd - Cmd* bitcountptr = new BitCountCmd(kCmdNameBitCount, -2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsBit); - cmd_table->insert(std::pair(kCmdNameBitCount, bitcountptr)); + std::unique_ptr bitcountptr = + std::make_unique(kCmdNameBitCount, -2, kCmdFlagsRead | kCmdFlagsBit | kCmdFlagsSlow | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache); + cmd_table->insert(std::pair>(kCmdNameBitCount, std::move(bitcountptr))); ////bitposCmd - Cmd* bitposptr = new BitPosCmd(kCmdNameBitPos, -3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsBit); - cmd_table->insert(std::pair(kCmdNameBitPos, bitposptr)); + std::unique_ptr bitposptr = + std::make_unique(kCmdNameBitPos, -3, kCmdFlagsRead | kCmdFlagsBit | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameBitPos, std::move(bitposptr))); ////bitopCmd - Cmd* bitopptr = new BitOpCmd(kCmdNameBitOp, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsBit); - cmd_table->insert(std::pair(kCmdNameBitOp, bitopptr)); + std::unique_ptr bitopptr = + std::make_unique(kCmdNameBitOp, -3, kCmdFlagsWrite | kCmdFlagsBit | kCmdFlagsSlow | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache); + cmd_table->insert(std::pair>(kCmdNameBitOp, std::move(bitopptr))); - //HyperLogLog + // HyperLogLog ////pfaddCmd - Cmd * pfaddptr = new PfAddCmd(kCmdNamePfAdd, -2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsHyperLogLog); - cmd_table->insert(std::pair(kCmdNamePfAdd, pfaddptr)); + std::unique_ptr pfaddptr = std::make_unique( + kCmdNamePfAdd, -2, kCmdFlagsWrite | kCmdFlagsHyperLogLog | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePfAdd, std::move(pfaddptr))); ////pfcountCmd - Cmd * pfcountptr = new PfCountCmd(kCmdNamePfCount, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsHyperLogLog); - cmd_table->insert(std::pair(kCmdNamePfCount, pfcountptr)); + std::unique_ptr pfcountptr = std::make_unique( + kCmdNamePfCount, -2, kCmdFlagsRead | kCmdFlagsHyperLogLog | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNamePfCount, std::move(pfcountptr))); ////pfmergeCmd - Cmd * pfmergeptr = new PfMergeCmd(kCmdNamePfMerge, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsHyperLogLog); - cmd_table->insert(std::pair(kCmdNamePfMerge, pfmergeptr)); + std::unique_ptr pfmergeptr = std::make_unique( + kCmdNamePfMerge, -2, kCmdFlagsWrite | kCmdFlagsHyperLogLog | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNamePfMerge, std::move(pfmergeptr))); - //GEO + // GEO ////GepAdd - Cmd * geoaddptr = new GeoAddCmd(kCmdNameGeoAdd, -5, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsGeo); - cmd_table->insert(std::pair(kCmdNameGeoAdd, geoaddptr)); + std::unique_ptr geoaddptr = std::make_unique( + kCmdNameGeoAdd, -5, kCmdFlagsWrite | kCmdFlagsGeo | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameGeoAdd, std::move(geoaddptr))); ////GeoPos - Cmd * geoposptr = new GeoPosCmd(kCmdNameGeoPos, -2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsGeo); - cmd_table->insert(std::pair(kCmdNameGeoPos, geoposptr)); + std::unique_ptr geoposptr = std::make_unique( + kCmdNameGeoPos, -2, kCmdFlagsRead | kCmdFlagsGeo | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameGeoPos, std::move(geoposptr))); ////GeoDist - Cmd * geodistptr = new GeoDistCmd(kCmdNameGeoDist, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsGeo); - cmd_table->insert(std::pair(kCmdNameGeoDist, geodistptr)); + std::unique_ptr geodistptr = std::make_unique( + kCmdNameGeoDist, -4, kCmdFlagsRead | kCmdFlagsGeo | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameGeoDist, std::move(geodistptr))); ////GeoHash - Cmd * geohashptr = new GeoHashCmd(kCmdNameGeoHash, -2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsGeo); - cmd_table->insert(std::pair(kCmdNameGeoHash, geohashptr)); + std::unique_ptr geohashptr = std::make_unique( + kCmdNameGeoHash, -2, kCmdFlagsRead | kCmdFlagsGeo | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameGeoHash, std::move(geohashptr))); ////GeoRadius - Cmd * georadiusptr = new GeoRadiusCmd(kCmdNameGeoRadius, -6, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsGeo); - cmd_table->insert(std::pair(kCmdNameGeoRadius, georadiusptr)); + std::unique_ptr georadiusptr = std::make_unique( + kCmdNameGeoRadius, -6, kCmdFlagsRead | kCmdFlagsGeo | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameGeoRadius, std::move(georadiusptr))); ////GeoRadiusByMember - Cmd * georadiusbymemberptr = new GeoRadiusByMemberCmd(kCmdNameGeoRadiusByMember, -5, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsGeo); - cmd_table->insert(std::pair(kCmdNameGeoRadiusByMember, georadiusbymemberptr)); + std::unique_ptr georadiusbymemberptr = std::make_unique( + kCmdNameGeoRadiusByMember, -5, kCmdFlagsRead | kCmdFlagsGeo | kCmdFlagsSlow); + cmd_table->insert( + std::pair>(kCmdNameGeoRadiusByMember, std::move(georadiusbymemberptr))); - //PubSub + // PubSub ////Publish - Cmd * publishptr = new PublishCmd(kCmdNamePublish, 3, kCmdFlagsRead | kCmdFlagsPubSub); - cmd_table->insert(std::pair(kCmdNamePublish, publishptr)); + std::unique_ptr publishptr = + std::make_unique(kCmdNamePublish, 3, kCmdFlagsRead | kCmdFlagsPubSub | kCmdFlagsFast ); + cmd_table->insert(std::pair>(kCmdNamePublish, std::move(publishptr))); ////Subscribe - Cmd * subscribeptr = new SubscribeCmd(kCmdNameSubscribe, -2, kCmdFlagsRead | kCmdFlagsPubSub); - cmd_table->insert(std::pair(kCmdNameSubscribe, subscribeptr)); + std::unique_ptr subscribeptr = + std::make_unique(kCmdNameSubscribe, -2, kCmdFlagsRead | kCmdFlagsPubSub | kCmdFlagsSlow ); + cmd_table->insert(std::pair>(kCmdNameSubscribe, std::move(subscribeptr))); ////UnSubscribe - Cmd * unsubscribeptr = new UnSubscribeCmd(kCmdNameUnSubscribe, -1, kCmdFlagsRead | kCmdFlagsPubSub); - cmd_table->insert(std::pair(kCmdNameUnSubscribe, unsubscribeptr)); + std::unique_ptr unsubscribeptr = + std::make_unique(kCmdNameUnSubscribe, -1, kCmdFlagsRead | kCmdFlagsPubSub | kCmdFlagsSlow ); + cmd_table->insert(std::pair>(kCmdNameUnSubscribe, std::move(unsubscribeptr))); ////PSubscribe - Cmd * psubscribeptr = new PSubscribeCmd(kCmdNamePSubscribe, -2, kCmdFlagsRead | kCmdFlagsPubSub); - cmd_table->insert(std::pair(kCmdNamePSubscribe, psubscribeptr)); + std::unique_ptr psubscribeptr = + std::make_unique(kCmdNamePSubscribe, -2, kCmdFlagsRead | kCmdFlagsPubSub | kCmdFlagsSlow ); + cmd_table->insert(std::pair>(kCmdNamePSubscribe, std::move(psubscribeptr))); ////PUnSubscribe - Cmd * punsubscribeptr = new PUnSubscribeCmd(kCmdNamePUnSubscribe, -1, kCmdFlagsRead | kCmdFlagsPubSub); - cmd_table->insert(std::pair(kCmdNamePUnSubscribe, punsubscribeptr)); + std::unique_ptr punsubscribeptr = + std::make_unique(kCmdNamePUnSubscribe, -1, kCmdFlagsRead | kCmdFlagsPubSub | kCmdFlagsSlow ); + cmd_table->insert(std::pair>(kCmdNamePUnSubscribe, std::move(punsubscribeptr))); ////PubSub - Cmd * pubsubptr = new PubSubCmd(kCmdNamePubSub, -2, kCmdFlagsRead | kCmdFlagsPubSub); - cmd_table->insert(std::pair(kCmdNamePubSub, pubsubptr)); + std::unique_ptr pubsubptr = + std::make_unique(kCmdNamePubSub, -2, kCmdFlagsRead | kCmdFlagsPubSub | kCmdFlagsSlow ); + cmd_table->insert(std::pair>(kCmdNamePubSub, std::move(pubsubptr))); + + ////ACL + std::unique_ptr aclptr = std::make_unique(KCmdNameAcl, -2, kCmdFlagsAdmin | kCmdFlagsSlow ); + cmd_table->insert(std::pair>(KCmdNameAcl, std::move(aclptr))); + + // Transaction + ////Multi + std::unique_ptr multiptr = + std::make_unique(kCmdNameMulti, 1, kCmdFlagsRead | kCmdFlagsFast ); + cmd_table->insert(std::pair>(kCmdNameMulti, std::move(multiptr))); + ////Exec + std::unique_ptr execptr = std::make_unique( + kCmdNameExec, 1, kCmdFlagsRead | kCmdFlagsWrite | kCmdFlagsSuspend | kCmdFlagsSlow ); + cmd_table->insert(std::pair>(kCmdNameExec, std::move(execptr))); + ////Discard + std::unique_ptr discardptr = std::make_unique(kCmdNameDiscard, 1, kCmdFlagsRead | kCmdFlagsFast ); + cmd_table->insert(std::pair>(kCmdNameDiscard, std::move(discardptr))); + ////Watch + std::unique_ptr watchptr = std::make_unique(kCmdNameWatch, -2, kCmdFlagsRead | kCmdFlagsFast ); + cmd_table->insert(std::pair>(kCmdNameWatch, std::move(watchptr))); + ////Unwatch + std::unique_ptr unwatchptr = std::make_unique(kCmdNameUnWatch, 1, kCmdFlagsRead | kCmdFlagsFast ); + cmd_table->insert(std::pair>(kCmdNameUnWatch, std::move(unwatchptr))); + + // Stream + ////XAdd + std::unique_ptr xaddptr = + std::make_unique(kCmdNameXAdd, -4, kCmdFlagsWrite | kCmdFlagsStream | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameXAdd, std::move(xaddptr))); + ////XLen + std::unique_ptr xlenptr = + std::make_unique(kCmdNameXLen, 2, kCmdFlagsRead | kCmdFlagsStream | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameXLen, std::move(xlenptr))); + ////XRead + std::unique_ptr xreadptr = + std::make_unique(kCmdNameXRead, -3, kCmdFlagsRead | kCmdFlagsStream | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameXRead, std::move(xreadptr))); + ////XRange + std::unique_ptr xrangeptr = + std::make_unique(kCmdNameXRange, -4, kCmdFlagsRead | kCmdFlagsStream | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameXRange, std::move(xrangeptr))); + ////XRerange + std::unique_ptr xrerverangeptr = + std::make_unique(kCmdNameXRevrange, -4, kCmdFlagsRead | kCmdFlagsStream | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameXRevrange, std::move(xrerverangeptr))); + ////XTrim + std::unique_ptr xtrimptr = + std::make_unique(kCmdNameXTrim, -2, kCmdFlagsWrite | kCmdFlagsStream | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameXTrim, std::move(xtrimptr))); + ////XDel + std::unique_ptr xdelptr = + std::make_unique(kCmdNameXDel, -3, kCmdFlagsWrite | kCmdFlagsStream | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameXDel, std::move(xdelptr))); + ////XINFO + std::unique_ptr xinfoptr = + std::make_unique(kCmdNameXInfo, -2, kCmdFlagsRead | kCmdFlagsStream | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameXInfo, std::move(xinfoptr))); } -Cmd* GetCmdFromTable(const std::string& opt, const CmdTable& cmd_table) { - CmdTable::const_iterator it = cmd_table.find(opt); +Cmd* GetCmdFromDB(const std::string& opt, const CmdTable& cmd_table) { + auto it = cmd_table.find(opt); if (it != cmd_table.end()) { - return it->second; + return it->second.get(); } - return NULL; + return nullptr; } -void DestoryCmdTable(CmdTable* cmd_table) { - CmdTable::const_iterator it = cmd_table->begin(); - for (; it != cmd_table->end(); ++it) { - delete it->second; - } -} +bool Cmd::CheckArg(uint64_t num) const { return !((arity_ > 0 && num != arity_) || (arity_ < 0 && num < -arity_)); } -void TryAliasChange(std::vector* argv) { - if (argv->empty()) { - return; - } - if (!strcasecmp(argv->front().c_str(), kCmdNameSlaveof.c_str())) { - argv->front() = "slotsslaveof"; - argv->insert(argv->begin(), kClusterPrefix); - if (!strcasecmp(argv->back().c_str(), "force")) { - argv->back() = "all"; - argv->push_back("force"); - } else { - argv->push_back("all"); - } - } +Cmd::Cmd(std::string name, int arity, uint32_t flag, uint32_t aclCategory) + : name_(std::move(name)), arity_(arity), flag_(flag), aclCategory_(aclCategory), cache_missed_in_rtc_(false) { } -void Cmd::Initial(const PikaCmdArgsType& argv, - const std::string& table_name) { +void Cmd::Initial(const PikaCmdArgsType& argv, const std::string& db_name) { argv_ = argv; - if (!g_pika_conf->classic_mode()) { - TryAliasChange(&argv_); - } - table_name_ = table_name; - res_.clear(); // Clear res content - Clear(); // Clear cmd, Derived class can has own implement + db_name_ = db_name; + res_.clear(); // Clear res content + db_ = g_pika_server->GetDB(db_name_); + sync_db_ = g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name_)); + Clear(); // Clear cmd, Derived class can has own implement DoInitial(); }; -std::vector Cmd::current_key() const { - std::vector res; - res.push_back(""); - return res; -} +std::vector Cmd::current_key() const { return {""}; } void Cmd::Execute() { - if (name_ == kCmdNameFlushdb) { - ProcessFlushDBCmd(); - } else if (name_ == kCmdNameFlushall) { - ProcessFlushAllCmd(); - } else if (name_ == kCmdNameInfo || name_ == kCmdNameConfig) { - ProcessDoNotSpecifyPartitionCmd(); - } else if (is_single_partition() || g_pika_conf->classic_mode()) { - ProcessSinglePartitionCmd(); - } else if (is_multi_partition()) { - ProcessMultiPartitionCmd(); - } else { - ProcessDoNotSpecifyPartitionCmd(); - } + ProcessCommand(); } -void Cmd::ProcessFlushDBCmd() { - std::shared_ptr
table = g_pika_server->GetTable(table_name_); - if (!table) { - res_.SetRes(CmdRes::kInvalidTable); +void Cmd::ProcessCommand(const HintKeys& hint_keys) { + if (stage_ == kNone) { + InternalProcessCommand(hint_keys); } else { - if (table->IsKeyScaning()) { - res_.SetRes(CmdRes::kErrOther, "The keyscan operation is executing, Try again later"); - } else { - slash::RWLock l_prw(&table->partitions_rw_, true); - for (const auto& partition_item : table->partitions_) { - ProcessCommand(partition_item.second); - } - res_.SetRes(CmdRes::kOk); + if (stage_ == kBinlogStage) { + DoBinlog(); + } else if (stage_ == kExecuteStage) { + DoCommand(hint_keys); } } } -void Cmd::ProcessFlushAllCmd() { - slash::RWLock l_trw(&g_pika_server->tables_rw_, true); - for (const auto& table_item : g_pika_server->tables_) { - if (table_item.second->IsKeyScaning()) { - res_.SetRes(CmdRes::kErrOther, "The keyscan operation is executing, Try again later"); - return; - } +void Cmd::InternalProcessCommand(const HintKeys& hint_keys) { + pstd::lock::MultiRecordLock record_lock(db_->LockMgr()); + if (is_write()) { + record_lock.Lock(current_key()); } - - for (const auto& table_item : g_pika_server->tables_) { - slash::RWLock l_prw(&table_item.second->partitions_rw_, true); - for (const auto& partition_item : table_item.second->partitions_) { - ProcessCommand(partition_item.second); - } + uint64_t start_us = 0; + if (g_pika_conf->slowlog_slower_than() >= 0) { + start_us = pstd::NowMicros(); } - res_.SetRes(CmdRes::kOk); -} -void Cmd::ProcessSinglePartitionCmd() { - std::shared_ptr partition; - if (g_pika_conf->classic_mode()) { - // in classic mode a table has only one partition - partition = g_pika_server->GetPartitionByDbName(table_name_); - } else { - std::vector cur_key = current_key(); - if (cur_key.empty()) { - res_.SetRes(CmdRes::kErrOther, "Internal Error"); - return; - } - // in sharding mode we select partition by key - partition = g_pika_server->GetTablePartitionByKey(table_name_, cur_key.front()); + if (!IsSuspend()) { + db_->DBLockShared(); } - if (!partition) { - res_.SetRes(CmdRes::kErrOther, "Partition not found"); - return; + DoCommand(hint_keys); + if (g_pika_conf->slowlog_slower_than() >= 0) { + do_duration_ += pstd::NowMicros() - start_us; } - ProcessCommand(partition); -} + DoBinlog(); -void Cmd::ProcessCommand(std::shared_ptr partition) { - slash::lock::MultiRecordLock record_lock(partition->LockMgr()); - if (is_write()) { - record_lock.Lock(current_key()); + if (!IsSuspend()) { + db_->DBUnlockShared(); } - - DoCommand(partition); - - DoBinlog(partition); - if (is_write()) { record_lock.Unlock(current_key()); } - } -void Cmd::DoCommand(std::shared_ptr partition) { - if (!is_suspend()) { - partition->DbRWLockReader(); +void Cmd::DoCommand(const HintKeys& hint_keys) { + if (IsNeedCacheDo() + && PIKA_CACHE_NONE != g_pika_conf->cache_mode() + && db_->cache()->CacheStatus() == PIKA_CACHE_STATUS_OK) { + if (!cache_missed_in_rtc_ + && IsNeedReadCache()) { + ReadCache(); + } + if (is_read() + && (res().CacheMiss() || cache_missed_in_rtc_)) { + pstd::lock::MultiScopeRecordLock record_lock(db_->LockMgr(), current_key()); + DoThroughDB(); + if (IsNeedUpdateCache()) { + DoUpdateCache(); + } + } else if (is_write()) { + DoThroughDB(); + if (IsNeedUpdateCache()) { + DoUpdateCache(); + } + } + } else { + Do(); } - - Do(partition); - - if (!is_suspend()) { - partition->DbRWUnLock(); + if (!IsAdmin() && res().ok()) { + if (res().noexist()) { + g_pika_server->incr_server_keyspace_misses(); + } else { + g_pika_server->incr_server_keyspace_hits(); + } } - } -void Cmd::DoBinlog(std::shared_ptr partition) { - if (res().ok() - && is_write() - && g_pika_conf->write_binlog()) { +bool Cmd::DoReadCommandInCache() { + if (!IsSuspend()) { + db_->DBLockShared(); + } + DEFER { + if (!IsSuspend()) { + db_->DBUnlockShared(); + } + }; - uint32_t filenum = 0; - uint64_t offset = 0; - uint64_t logic_id = 0; + if (db_->cache()->CacheStatus() == PIKA_CACHE_STATUS_OK) { + if (IsNeedReadCache()) { + ReadCache(); + } + // return true only the read command hit + if (is_read() && !res().CacheMiss()) { + return true; + } + } + return false; +} - partition->logger()->Lock(); - partition->logger()->GetProducerStatus(&filenum, &offset, &logic_id); - uint32_t exec_time = time(nullptr); - std::string binlog = ToBinlog(exec_time, - g_pika_conf->server_id(), - logic_id, - filenum, - offset); - Status s = partition->WriteBinlog(binlog); - partition->logger()->Unlock(); +void Cmd::DoBinlog() { + if (res().ok() && is_write() && g_pika_conf->write_binlog()) { + std::shared_ptr conn_ptr = GetConn(); + std::shared_ptr resp_ptr = GetResp(); + // Consider that dummy cmd appended by system, both conn and resp are null. + if ((!conn_ptr || !resp_ptr) && (name_ != kCmdDummy)) { + if (!conn_ptr) { + LOG(WARNING) << sync_db_->SyncDBInfo().ToString() << " conn empty."; + } + if (!resp_ptr) { + LOG(WARNING) << sync_db_->SyncDBInfo().ToString() << " resp empty."; + } + res().SetRes(CmdRes::kErrOther); + return; + } + Status s = sync_db_->ConsensusProposeLog(shared_from_this()); if (!s.ok()) { + LOG(WARNING) << sync_db_->SyncDBInfo().ToString() << " Writing binlog failed, maybe no space left on device " + << s.ToString(); res().SetRes(CmdRes::kErrOther, s.ToString()); + return; } } } -void Cmd::ProcessMultiPartitionCmd() { - if (argv_.size() == static_cast(arity_ < 0 ? -arity_ : arity_)) { - ProcessSinglePartitionCmd(); - } else { - res_.SetRes(CmdRes::kErrOther, "This command usage only support in classic mode\r\n"); - return; - } -} +bool Cmd::hasFlag(uint32_t flag) const { return (flag_ & flag); } +bool Cmd::is_read() const { return (flag_ & kCmdFlagsRead); } +bool Cmd::is_write() const { return (flag_ & kCmdFlagsWrite); } +bool Cmd::IsLocal() const { return (flag_ & kCmdFlagsLocal); } -void Cmd::ProcessDoNotSpecifyPartitionCmd() { - Do(); +int8_t Cmd::SubCmdIndex(const std::string& cmdName) { + if (subCmdName_.empty()) { + return -1; + } + for (size_t i = 0; i < subCmdName_.size(); ++i) { + if (!strcasecmp(subCmdName_[i].data(), cmdName.data())) { + return i; + } + } + return -1; } -bool Cmd::is_write() const { - return ((flag_ & kCmdFlagsMaskRW) == kCmdFlagsWrite); -} -bool Cmd::is_local() const { - return ((flag_ & kCmdFlagsMaskLocal) == kCmdFlagsLocal); -} // Others need to be suspended when a suspend command run -bool Cmd::is_suspend() const { - return ((flag_ & kCmdFlagsMaskSuspend) == kCmdFlagsSuspend); -} -// Must with admin auth -bool Cmd::is_admin_require() const { - return ((flag_ & kCmdFlagsMaskAdminRequire) == kCmdFlagsAdminRequire); -} -bool Cmd::is_single_partition() const { - return ((flag_ & kCmdFlagsMaskPartition) == kCmdFlagsSinglePartition); -} -bool Cmd::is_multi_partition() const { - return ((flag_ & kCmdFlagsMaskPartition) == kCmdFlagsMultiPartition); -} +bool Cmd::IsSuspend() const { return (flag_ & kCmdFlagsSuspend); } +// std::string Cmd::CurrentSubCommand() const { return ""; }; +bool Cmd::HasSubCommand() const { return subCmdName_.size() > 0; }; +std::vector Cmd::SubCommand() const { return subCmdName_; }; +bool Cmd::IsAdmin() const { return (flag_ & kCmdFlagsAdmin); } +bool Cmd::IsNeedUpdateCache() const { return (flag_ & kCmdFlagsUpdateCache); } +bool Cmd::IsNeedCacheDo() const { + if (g_pika_conf->IsCacheDisabledTemporarily()) { + return false; + } -std::string Cmd::name() const { - return name_; -} -CmdRes& Cmd::res() { - return res_; + if (hasFlag(kCmdFlagsKv)) { + if (!g_pika_conf->GetCacheString()) { + return false; + } + } else if (hasFlag(kCmdFlagsSet)) { + if (!g_pika_conf->GetCacheSet()) { + return false; + } + } else if (hasFlag(kCmdFlagsZset)) { + if (!g_pika_conf->GetCacheZset()) { + return false; + } + } else if (hasFlag(kCmdFlagsHash)) { + if (!g_pika_conf->GetCacheHash()) { + return false; + } + } else if (hasFlag(kCmdFlagsList)) { + if (!g_pika_conf->GetCacheList()) { + return false; + } + } else if (hasFlag(kCmdFlagsBit)) { + if (!g_pika_conf->GetCacheBit()) { + return false; + } + } + return (hasFlag(kCmdFlagsDoThroughDB)); } -std::string Cmd::ToBinlog(uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) { +bool Cmd::IsNeedReadCache() const { return hasFlag(kCmdFlagsReadCache); } + +bool Cmd::HashtagIsConsistent(const std::string& lhs, const std::string& rhs) const { return true; } + +std::string Cmd::name() const { return name_; } +CmdRes& Cmd::res() { return res_; } + +std::string Cmd::db_name() const { return db_name_; } + +PikaCmdArgsType& Cmd::argv() { return argv_; } + +uint32_t Cmd::AclCategory() const { return aclCategory_; } + +void Cmd::AddAclCategory(uint32_t aclCategory) { aclCategory_ |= aclCategory; } +uint32_t Cmd::flag() const { return flag_; } + +std::string Cmd::ToRedisProtocol() { std::string content; content.reserve(RAW_ARGS_LEN); - RedisAppendLen(content, argv_.size(), "*"); + RedisAppendLenUint64(content, argv_.size(), "*"); for (const auto& v : argv_) { - RedisAppendLen(content, v.size(), "$"); + RedisAppendLenUint64(content, v.size(), "$"); RedisAppendContent(content, v); } - return PikaBinlogTransverter::BinlogEncode(BinlogType::TypeFirst, - exec_time, - std::stoi(server_id), - logic_id, - filenum, - offset, - content, - {}); -} - -bool Cmd::CheckArg(int num) const { - if ((arity_ > 0 && num != arity_) - || (arity_ < 0 && num < -arity_)) { - return false; - } - return true; + return content; } void Cmd::LogCommand() const { @@ -754,10 +1063,14 @@ void Cmd::LogCommand() const { LOG(INFO) << "command:" << command; } -void Cmd::SetConn(const std::shared_ptr conn) { - conn_ = conn; -} +void Cmd::SetConn(const std::shared_ptr& conn) { conn_ = conn; } -std::shared_ptr Cmd::GetConn() { - return conn_.lock(); -} +std::shared_ptr Cmd::GetConn() { return conn_.lock(); } + +void Cmd::SetResp(const std::shared_ptr& resp) { resp_ = resp; } + +std::shared_ptr Cmd::GetResp() { return resp_.lock(); } + +void Cmd::SetStage(CmdStage stage) { stage_ = stage; } +bool Cmd::IsCacheMissedInRtc() const { return cache_missed_in_rtc_; } +void Cmd::SetCacheMissedInRtc(bool value) { cache_missed_in_rtc_ = value; } diff --git a/tools/pika_migrate/src/pika_command_docs.cc b/tools/pika_migrate/src/pika_command_docs.cc new file mode 100644 index 0000000000..50087d17d3 --- /dev/null +++ b/tools/pika_migrate/src/pika_command_docs.cc @@ -0,0 +1,10845 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifdef WITH_COMMAND_DOCS + +# include "include/pika_admin.h" + +# include +# include +# include +# include + +static CommandCmd::EncodablePtr operator""_RedisInt(unsigned long long value) { + return std::make_shared(value); +} + +static CommandCmd::EncodablePtr operator""_RedisString(const char* value, std::size_t length) { + return std::make_shared(std::string(value, length)); +} + +static CommandCmd::EncodablePtr operator""_RedisStatus(const char* value, std::size_t length) { + return std::make_shared(std::string(value, length)); +} + +static CommandCmd::EncodablePtr RedisMap(CommandCmd::EncodableMap::RedisMap values) { + return std::make_shared(std::move(values)); +} + +static CommandCmd::EncodablePtr RedisSet(std::vector values) { + return std::make_shared(std::move(values)); +} + +static CommandCmd::EncodablePtr RedisArray(std::vector values) { + return std::make_shared(std::move(values)); +} + +const std::string CommandCmd::kPikaField{"pika"}; +const CommandCmd::EncodablePtr CommandCmd::kNotSupportedLiteral = "当前还未支持"_RedisString; +const CommandCmd::EncodablePtr CommandCmd::kCompatibleLiteral = + "该接口完全支持,使用方式与redis没有任何区别"_RedisString; +const CommandCmd::EncodablePtr CommandCmd::kBitSpecLiteral = + "BIT操作:与Redis不同,Pika的bit操作范围为2^21, bitmap的最大值为256Kb。redis setbit 只是对key的value值更新。但是pika使用rocksdb作为存储引擎,rocksdb只会新写入数据并且只在compact的时候才从硬盘删除旧数据。如果pika的bit操作范围和redis一致都是2^32的话,那么有可能每次对同一个key setbit时,rocksdb都会存储一个512M大小的value。这会产生 严重的性能隐患。因此我们对pika的bit操作范围作了取舍。"_RedisString; +const CommandCmd::EncodablePtr CommandCmd::kHyperLogLiteral = + "50w以内误差均小于1%, 100w以内误差小于3%, 但付出了时间代价."_RedisString; +const CommandCmd::EncodablePtr CommandCmd::kPubSubLiteral = "暂不支持keyspace notifications"_RedisString; + +const CommandCmd::EncodablePtr CommandCmd::kNotSupportedSpecialization = RedisMap({{kPikaField, kNotSupportedLiteral}}); +const CommandCmd::EncodablePtr CommandCmd::kCompatibleSpecialization = RedisMap({{kPikaField, kCompatibleLiteral}}); +const CommandCmd::EncodablePtr CommandCmd::kBitSpecialization = RedisMap({{kPikaField, kBitSpecLiteral}}); +const CommandCmd::EncodablePtr CommandCmd::kHyperLogSpecialization = RedisMap({{kPikaField, kHyperLogLiteral}}); +const CommandCmd::EncodablePtr CommandCmd::kPubSubSpecialization = RedisMap({{kPikaField, kPubSubLiteral}}); + +const std::unordered_map CommandCmd::kPikaSpecialization{ + {"pexpire", RedisMap({{kPikaField, "无法精确到毫秒,底层会自动截断按秒级别进行处理"_RedisString}})}, + {"pexpireat", RedisMap({{kPikaField, "无法精确到毫秒,底层会自动截断按秒级别进行处理"_RedisString}})}, + {"scan", + RedisMap( + {{kPikaField, + "会顺序迭代当前db的快照,由于pika允许重名五次,所以scan有优先输出顺序,依次为:string -> hash -> list -> zset -> set"_RedisString}})}, + {"type", + RedisMap( + {{kPikaField, + "另外由于pika允许重名五次,所以type有优先输出顺序,依次为:string -> hash -> list -> zset -> set,如果这个key在string中存在,那么只输出sting,如果不存在,那么则输出hash的,依次类推"_RedisString}})}, + {"keys", + RedisMap( + {{kPikaField, + "KEYS命令支持参数支持扫描指定类型的数据,用法如 \"keys * [string, hash, list, zset, set]\""_RedisString}})}, + {"bitop", kBitSpecialization}, + {"getbit", kBitSpecialization}, + {"setbit", kBitSpecialization}, + {"hset", RedisMap({{kPikaField, "暂不支持单条命令设置多个field value,如有需求请用HMSET"_RedisString}})}, + {"srandmember", RedisMap({{kPikaField, "时间复杂度O( n ),耗时较多"_RedisString}})}, + {"zadd", RedisMap({{kPikaField, "的选项 [NX|XX] [CH] [INCR] 暂不支持"_RedisString}})}, + {"pfadd", kHyperLogSpecialization}, + {"pfcount", kHyperLogSpecialization}, + {"pfmerge", kHyperLogSpecialization}, + {"psubscribe", kPubSubSpecialization}, + {"pubsub", kPubSubSpecialization}, + {"publish", kPubSubSpecialization}, + {"punsubscribe", kPubSubSpecialization}, + {"subscribe", kPubSubSpecialization}, + {"unsubscribe", kPubSubSpecialization}, + {"info", + RedisMap( + {{kPikaField, + "info支持全部输出,也支持匹配形式的输出,例如可以通过info stats查看状态信息,需要注意的是key space与redis不同,pika对于key space的展示选择了分类型展示而非redis的分库展示(因为pika没有库),pika对于key space的统计是被动的,需要手动触发,然后pika会在后台进行统计,pika的key space统计是精确的。触发方式为执行:keyspace命令即可,然后pika会在后台统计,此时可以使用:keyspace readonly命令来进行查看,readonly参数可以避免反复进行统计,如果当前数据为0,则证明还在统计中"_RedisString}})}, + {"client", RedisMap({{kPikaField, + "当前client命令支持client list及client kill,client list显示的内容少于redis"_RedisString}})}, + {"select", RedisMap({{kPikaField, "该命令在3.1.0版前无任何效果,自3.1.0版开始与Redis一致"_RedisString}})}, + {"ping", RedisMap({{kPikaField, "该命令仅支持无参数使用,即使用PING,客户端返回PONG"_RedisString}})}, + {"type", + RedisMap( + {{kPikaField, + "pika不同类型的key name 是允许重复的,例如:string 类型里有 key1,hash list set zset类型可以同时存在 key1,在使用 type命令查询时,只能得到一个,如果要查询同一个 name 所有的类型,需要使用 ptype 命令查询"_RedisString}})}, +}; + +const std::unordered_map CommandCmd::kCommandDocs{ + {"zremrangebyscore", + RedisMap({ + {"summary", + "Removes members in a sorted set within a range of scores. Deletes the sorted set if all members were removed."_RedisString}, + {"since", "1.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements removed by the operation."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "min"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "min"_RedisString}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "max"_RedisString}, + }), + })}, + })}, + {"sunion", RedisMap({ + {"summary", "Returns the union of multiple sets."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", "O(N) where N is the total number of elements in all given sets."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"debug", RedisMap({ + {"summary", "A container for debugging commands."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"doc_flags", RedisSet({ + "syscmd"_RedisStatus, + })}, + })}, + {"readonly", + RedisMap({ + {"summary", "Enables read-only queries for a connection to a Redis Cluster replica node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"latency", + RedisMap({ + {"summary", "A container for latency diagnostics commands."_RedisString}, + {"since", "2.8.13"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"latency|doctor", RedisMap({ + {"summary", "Returns a human-readable latency analysis report."_RedisString}, + {"since", "2.8.13"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"latency|help", RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "2.8.13"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"latency|histogram", + RedisMap({ + {"summary", + "Returns the cumulative distribution of latencies of a subset or all commands."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", + "O(N) where N is the number of commands with latency information being retrieved."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "command"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "command"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"latency|history", RedisMap({ + {"summary", "Returns timestamp-latency samples for an event."_RedisString}, + {"since", "2.8.13"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "event"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "event"_RedisString}, + }), + })}, + })}, + {"latency|graph", RedisMap({ + {"summary", "Returns a latency graph for an event."_RedisString}, + {"since", "2.8.13"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "event"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "event"_RedisString}, + }), + })}, + })}, + {"latency|latest", RedisMap({ + {"summary", "Returns the latest latency samples for all events."_RedisString}, + {"since", "2.8.13"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"latency|reset", RedisMap({ + {"summary", "Resets the latency data for one or more events."_RedisString}, + {"since", "2.8.13"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "event"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "event"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + })}, + })}, + {"setbit", + RedisMap({ + {"summary", + "Sets or clears the bit at offset of the string value. Creates the key if it doesn't exist."_RedisString}, + {"since", "2.2.0"_RedisString}, + {"group", "bitmap"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + })}, + {"lpush", + RedisMap({ + {"summary", "Prepends one or more elements to a list. Creates the key if it doesn't exist."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", + "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments."_RedisString}, + {"history", RedisSet({ + RedisArray({"2.4.0"_RedisString, "Accepts multiple `element` arguments."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "element"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "element"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"punsubscribe", + RedisMap({ + {"summary", "Stops listening to messages published to channels that match one or more patterns."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", + "O(N+M) where N is the number of patterns the client is already subscribed and M is the number of total patterns subscribed in the system (by any client)."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"role", RedisMap({ + {"summary", "Returns the replication role."_RedisString}, + {"since", "2.8.12"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"lmove", + RedisMap({ + {"summary", + "Returns an element after popping it from one list and pushing it to another. Deletes the list if the last element was moved."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "source"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "source"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 1_RedisInt}, + }), + RedisMap({ + {"name", "wherefrom"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "left"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "left"_RedisString}, + {"token", "LEFT"_RedisString}, + }), + RedisMap({ + {"name", "right"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "right"_RedisString}, + {"token", "RIGHT"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "whereto"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "left"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "left"_RedisString}, + {"token", "LEFT"_RedisString}, + }), + RedisMap({ + {"name", "right"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "right"_RedisString}, + {"token", "RIGHT"_RedisString}, + }), + })}, + }), + })}, + })}, + {"memory", + RedisMap({ + {"summary", "A container for memory diagnostics commands."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"memory|doctor", RedisMap({ + {"summary", "Outputs a memory problems report."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"memory|malloc-stats", + RedisMap({ + {"summary", "Returns the allocator statistics."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "Depends on how much memory is allocated, could be slow"_RedisString}, + })}, + {"memory|help", RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"memory|purge", RedisMap({ + {"summary", "Asks the allocator to release memory."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "Depends on how much memory is allocated, could be slow"_RedisString}, + })}, + {"memory|stats", RedisMap({ + {"summary", "Returns details about memory usage."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"memory|usage", RedisMap({ + {"summary", "Estimates the memory usage of a key."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) where N is the number of samples."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "SAMPLES"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + })}, + })}, + {"time", RedisMap({ + {"summary", "Returns the server time."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"sunsubscribe", + RedisMap({ + {"summary", "Stops listening to messages posted to shard channels."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", "O(N) where N is the number of clients already subscribed to a shard channel."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "shardchannel"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "shardchannel"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"module", + RedisMap({ + {"summary", "A container for module commands."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"module|load", RedisMap({ + {"summary", "Loads a module."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "path"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "path"_RedisString}, + }), + RedisMap({ + {"name", "arg"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "arg"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"module|loadex", RedisMap({ + {"summary", "Loads a module using extended parameters."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "path"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "path"_RedisString}, + }), + RedisMap({ + {"name", "configs"_RedisString}, + {"type", "block"_RedisString}, + {"token", "CONFIG"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + "multiple_token"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "name"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "name"_RedisString}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "args"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "args"_RedisString}, + {"token", "ARGS"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"module|list", RedisMap({ + {"summary", "Returns all loaded modules."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) where N is the number of loaded modules."_RedisString}, + })}, + {"module|help", RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"module|unload", RedisMap({ + {"summary", "Unloads a module."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "name"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "name"_RedisString}, + }), + })}, + })}, + })}, + })}, + {"bzmpop", + RedisMap({ + {"summary", + "Removes and returns a member by score from one or more sorted sets. Blocks until a member is available otherwise. Deletes the sorted set if the last element was popped."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(K) + O(M*log(N)) where K is the number of provided keys, N being the number of elements in the sorted set, and M being the number of elements popped."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "timeout"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "timeout"_RedisString}, + }), + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "where"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "min"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "min"_RedisString}, + {"token", "MIN"_RedisString}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "max"_RedisString}, + {"token", "MAX"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"readwrite", + RedisMap({ + {"summary", "Enables read-write queries for a connection to a Reids Cluster replica node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"zadd", + RedisMap({ + {"summary", + "Adds one or more members to a sorted set, or updates their scores. Creates the key if it doesn't exist."_RedisString}, + {"since", "1.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(log(N)) for each item added, where N is the number of elements in the sorted set."_RedisString}, + {"history", + RedisSet({ + RedisArray({"2.4.0"_RedisString, "Accepts multiple elements."_RedisString}), + RedisArray({"3.0.2"_RedisString, "Added the `XX`, `NX`, `CH` and `INCR` options."_RedisString}), + RedisArray({"6.2.0"_RedisString, "Added the `GT` and `LT` options."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "condition"_RedisString}, + {"type", "oneof"_RedisString}, + {"since", "3.0.2"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "nx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "nx"_RedisString}, + {"token", "NX"_RedisString}, + }), + RedisMap({ + {"name", "xx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "xx"_RedisString}, + {"token", "XX"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "comparison"_RedisString}, + {"type", "oneof"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "gt"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "gt"_RedisString}, + {"token", "GT"_RedisString}, + }), + RedisMap({ + {"name", "lt"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "lt"_RedisString}, + {"token", "LT"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "change"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "change"_RedisString}, + {"token", "CH"_RedisString}, + {"since", "3.0.2"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "increment"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "increment"_RedisString}, + {"token", "INCR"_RedisString}, + {"since", "3.0.2"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "data"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "score"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "score"_RedisString}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + }), + })}, + }), + })}, + })}, + {"swapdb", + RedisMap({ + {"summary", "Swaps two Redis databases."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", + "O(N) where N is the count of clients watching or blocking on keys from both databases."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "index1"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "index1"_RedisString}, + }), + RedisMap({ + {"name", "index2"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "index2"_RedisString}, + }), + })}, + })}, + {"incrby", + RedisMap({ + {"summary", + "Increments the integer value of a key by a number. Uses 0 as initial value if the key doesn't exist."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "increment"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "increment"_RedisString}, + }), + })}, + })}, + {"zscore", RedisMap({ + {"summary", "Returns the score of a member in a sorted set."_RedisString}, + {"since", "1.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + }), + })}, + })}, + {"spop", + RedisMap({ + {"summary", + "Returns one or more random members from a set after removing them. Deletes the set if the last member was popped."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", + "Without the count argument O(1), otherwise O(N) where N is the value of the passed count."_RedisString}, + {"history", RedisSet({ + RedisArray({"3.2.0"_RedisString, "Added the `count` argument."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"since", "3.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"mset", RedisMap({ + {"summary", "Atomically creates or modifies the string values of one or more keys."_RedisString}, + {"since", "1.0.1"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(N) where N is the number of keys to set."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "data"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + }), + })}, + })}, + {"geosearch", + RedisMap({ + {"summary", "Queries a geospatial index for members inside an area of a box or a circle."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "geo"_RedisString}, + {"complexity", + "O(N+log(M)) where N is the number of elements in the grid-aligned bounding box area around the shape provided as the filter and M is the number of items inside the shape"_RedisString}, + {"history", RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added support for uppercase unit names."_RedisString}), + })}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "from"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + {"token", "FROMMEMBER"_RedisString}, + }), + RedisMap({ + {"name", "fromlonlat"_RedisString}, + {"type", "block"_RedisString}, + {"token", "FROMLONLAT"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "longitude"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "longitude"_RedisString}, + }), + RedisMap({ + {"name", "latitude"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "latitude"_RedisString}, + }), + })}, + }), + })}, + }), + RedisMap({ + {"name", "by"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "circle"_RedisString}, + {"type", "block"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "radius"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "radius"_RedisString}, + {"token", "BYRADIUS"_RedisString}, + }), + RedisMap({ + {"name", "unit"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "m"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "m"_RedisString}, + {"token", "M"_RedisString}, + }), + RedisMap({ + {"name", "km"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "km"_RedisString}, + {"token", "KM"_RedisString}, + }), + RedisMap({ + {"name", "ft"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "ft"_RedisString}, + {"token", "FT"_RedisString}, + }), + RedisMap({ + {"name", "mi"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "mi"_RedisString}, + {"token", "MI"_RedisString}, + }), + })}, + }), + })}, + }), + RedisMap({ + {"name", "box"_RedisString}, + {"type", "block"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "width"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "width"_RedisString}, + {"token", "BYBOX"_RedisString}, + }), + RedisMap({ + {"name", "height"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "height"_RedisString}, + }), + RedisMap({ + {"name", "unit"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "m"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "m"_RedisString}, + {"token", "M"_RedisString}, + }), + RedisMap({ + {"name", "km"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "km"_RedisString}, + {"token", "KM"_RedisString}, + }), + RedisMap({ + {"name", "ft"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "ft"_RedisString}, + {"token", "FT"_RedisString}, + }), + RedisMap({ + {"name", "mi"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "mi"_RedisString}, + {"token", "MI"_RedisString}, + }), + })}, + }), + })}, + }), + })}, + }), + RedisMap({ + {"name", "order"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "asc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "asc"_RedisString}, + {"token", "ASC"_RedisString}, + }), + RedisMap({ + {"name", "desc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "desc"_RedisString}, + {"token", "DESC"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "count-block"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + }), + RedisMap({ + {"name", "any"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "any"_RedisString}, + {"token", "ANY"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + RedisMap({ + {"name", "withcoord"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withcoord"_RedisString}, + {"token", "WITHCOORD"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "withdist"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withdist"_RedisString}, + {"token", "WITHDIST"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "withhash"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withhash"_RedisString}, + {"token", "WITHHASH"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"hget", RedisMap({ + {"summary", "Returns the value of a field in a hash."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "field"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "field"_RedisString}, + }), + })}, + })}, + {"zscan", + RedisMap({ + {"summary", "Iterates over members and scores of a sorted set."_RedisString}, + {"since", "2.8.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "cursor"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "cursor"_RedisString}, + }), + RedisMap({ + {"name", "pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + {"token", "MATCH"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"xreadgroup", + RedisMap({ + {"summary", + "Returns new or historical messages from a stream for a consumer in a group. Blocks until a message is available otherwise."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", + "For each stream mentioned: O(M) with M being the number of elements returned. If M is constant (e.g. always asking for the first 10 elements with COUNT), you can consider it O(1). On the other side when XREADGROUP blocks, XADD will pay the O(N) time in order to serve the N clients blocked on the stream getting new data."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "group-block"_RedisString}, + {"type", "block"_RedisString}, + {"token", "GROUP"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "group"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "group"_RedisString}, + }), + RedisMap({ + {"name", "consumer"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "consumer"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "milliseconds"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "milliseconds"_RedisString}, + {"token", "BLOCK"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "noack"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "noack"_RedisString}, + {"token", "NOACK"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "streams"_RedisString}, + {"type", "block"_RedisString}, + {"token", "STREAMS"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "id"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + }), + })}, + })}, + {"copy", + RedisMap({ + {"summary", "Copies the value of a key to a new key."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", + "O(N) worst case for collections, where N is the number of nested items. O(1) for string values."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "source"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "source"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 1_RedisInt}, + }), + RedisMap({ + {"name", "destination-db"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "destination-db"_RedisString}, + {"token", "DB"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "replace"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "replace"_RedisString}, + {"token", "REPLACE"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"getbit", RedisMap({ + {"summary", "Returns a bit value by offset."_RedisString}, + {"since", "2.2.0"_RedisString}, + {"group", "bitmap"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + })}, + })}, + {"xautoclaim", + RedisMap({ + {"summary", + "Changes, or acquires, ownership of messages in a consumer group, as if the messages were delivered to as consumer group member."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(1) if COUNT is small."_RedisString}, + {"history", + RedisSet({ + RedisArray( + {"7.0.0"_RedisString, + "Added an element to the reply array, containing deleted entries the command cleared from the PEL"_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "group"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "group"_RedisString}, + }), + RedisMap({ + {"name", "consumer"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "consumer"_RedisString}, + }), + RedisMap({ + {"name", "min-idle-time"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "min-idle-time"_RedisString}, + }), + RedisMap({ + {"name", "start"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "start"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "justid"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "justid"_RedisString}, + {"token", "JUSTID"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"lpushx", + RedisMap({ + {"summary", "Prepends one or more elements to a list only when the list exists."_RedisString}, + {"since", "2.2.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", + "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments."_RedisString}, + {"history", RedisSet({ + RedisArray({"4.0.0"_RedisString, "Accepts multiple `element` arguments."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "element"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "element"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"sdiffstore", RedisMap({ + {"summary", "Stores the difference of multiple sets in a key."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", "O(N) where N is the total number of elements in all given sets."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"setrange", + RedisMap({ + {"summary", + "Overwrites a part of a string value with another by an offset. Creates the key if it doesn't exist."_RedisString}, + {"since", "2.2.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", + "O(1), not counting the time taken to copy the new string in place. Usually, this string is very small so the amortized complexity is O(1). Otherwise, complexity is O(M) with M being the length of the value argument."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + })}, + {"eval_ro", RedisMap({ + {"summary", "Executes a read-only server-side Lua script."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "Depends on the script that is executed."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "script"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "script"_RedisString}, + }), + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "arg"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "arg"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"bgsave", RedisMap({ + {"summary", "Asynchronously saves the database(s) to disk."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", RedisSet({ + RedisArray({"3.2.2"_RedisString, "Added the `SCHEDULE` option."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "schedule"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "schedule"_RedisString}, + {"token", "SCHEDULE"_RedisString}, + {"since", "3.2.2"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"discard", RedisMap({ + {"summary", "Discards a transaction."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "transactions"_RedisString}, + {"complexity", "O(N), when N is the number of queued commands"_RedisString}, + })}, + {"psync", RedisMap({ + {"summary", "An internal command used in replication."_RedisString}, + {"since", "2.8.0"_RedisString}, + {"group", "server"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "replicationid"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "replicationid"_RedisString}, + }), + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + })}, + })}, + {"keys", + RedisMap({ + {"summary", "Returns all key names that match a pattern."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", + "O(N) with N being the number of keys in the database, under the assumption that the key names in the database and the given pattern have limited length."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + }), + })}, + })}, + {"flushall", + RedisMap({ + {"summary", "Removes all keys from all databases."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) where N is the total number of keys in all databases"_RedisString}, + {"history", RedisSet({ + RedisArray({"4.0.0"_RedisString, "Added the `ASYNC` flushing mode modifier."_RedisString}), + RedisArray({"6.2.0"_RedisString, "Added the `SYNC` flushing mode modifier."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "flush-type"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "async"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "async"_RedisString}, + {"token", "ASYNC"_RedisString}, + {"since", "4.0.0"_RedisString}, + }), + RedisMap({ + {"name", "sync"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "sync"_RedisString}, + {"token", "SYNC"_RedisString}, + {"since", "6.2.0"_RedisString}, + }), + })}, + }), + })}, + })}, + {"incrbyfloat", + RedisMap({ + {"summary", + "Increment the floating point value of a key by a number. Uses 0 as initial value if the key doesn't exist."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "increment"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "increment"_RedisString}, + }), + })}, + })}, + {"expireat", + RedisMap({ + {"summary", "Sets the expiration time of a key to a Unix timestamp."_RedisString}, + {"since", "1.2.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added options: `NX`, `XX`, `GT` and `LT`."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "unix-time-seconds"_RedisString}, + {"type", "unix-time"_RedisString}, + {"display_text", "unix-time-seconds"_RedisString}, + }), + RedisMap({ + {"name", "condition"_RedisString}, + {"type", "oneof"_RedisString}, + {"since", "7.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "nx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "nx"_RedisString}, + {"token", "NX"_RedisString}, + }), + RedisMap({ + {"name", "xx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "xx"_RedisString}, + {"token", "XX"_RedisString}, + }), + RedisMap({ + {"name", "gt"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "gt"_RedisString}, + {"token", "GT"_RedisString}, + }), + RedisMap({ + {"name", "lt"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "lt"_RedisString}, + {"token", "LT"_RedisString}, + }), + })}, + }), + })}, + })}, + {"zunion", + RedisMap({ + {"summary", "Returns the union of multiple sorted sets."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(N)+O(M*log(M)) with N being the sum of the sizes of the input sorted sets, and M being the number of elements in the resulting sorted set."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "weight"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "weight"_RedisString}, + {"token", "WEIGHTS"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "aggregate"_RedisString}, + {"type", "oneof"_RedisString}, + {"token", "AGGREGATE"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "sum"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "sum"_RedisString}, + {"token", "SUM"_RedisString}, + }), + RedisMap({ + {"name", "min"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "min"_RedisString}, + {"token", "MIN"_RedisString}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "max"_RedisString}, + {"token", "MAX"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "withscores"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withscores"_RedisString}, + {"token", "WITHSCORES"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"monitor", RedisMap({ + {"summary", "Listens for all requests received by the server in real-time."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "server"_RedisString}, + })}, + {"substr", + RedisMap({ + {"summary", "Returns a substring from a string value."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", + "O(N) where N is the length of the returned string. The complexity is ultimately determined by the returned length, but because creating a substring from an existing string is very cheap, it can be considered O(1) for small strings."_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "2.0.0"_RedisString}, + {"replaced_by", "`GETRANGE`"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "start"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "start"_RedisString}, + }), + RedisMap({ + {"name", "end"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "end"_RedisString}, + }), + })}, + })}, + {"setex", + RedisMap({ + {"summary", + "Sets the string value and expiration time of a key. Creates the key if it doesn't exist."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "2.6.12"_RedisString}, + {"replaced_by", "`SET` with the `EX` argument"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "seconds"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "seconds"_RedisString}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + })}, + {"pfselftest", RedisMap({ + {"summary", "An internal command for testing HyperLogLog values."_RedisString}, + {"since", "2.8.9"_RedisString}, + {"group", "hyperloglog"_RedisString}, + {"complexity", "N/A"_RedisString}, + {"doc_flags", RedisSet({ + "syscmd"_RedisStatus, + })}, + })}, + {"blpop", + RedisMap({ + {"summary", + "Removes and returns the first element in a list. Blocks until an element is available otherwise. Deletes the list if the last element was popped."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", "O(N) where N is the number of provided keys."_RedisString}, + {"history", RedisSet({ + RedisArray({"6.0.0"_RedisString, + "`timeout` is interpreted as a double instead of an integer."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "timeout"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "timeout"_RedisString}, + }), + })}, + })}, + {"ssubscribe", RedisMap({ + {"summary", "Listens for messages published to shard channels."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", "O(N) where N is the number of shard channels to subscribe to."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "shardchannel"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "shardchannel"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"rpush", + RedisMap({ + {"summary", "Appends one or more elements to a list. Creates the key if it doesn't exist."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", + "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments."_RedisString}, + {"history", RedisSet({ + RedisArray({"2.4.0"_RedisString, "Accepts multiple `element` arguments."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "element"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "element"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"sdiff", RedisMap({ + {"summary", "Returns the difference of multiple sets."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", "O(N) where N is the total number of elements in all given sets."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"geosearchstore", + RedisMap({ + {"summary", + "Queries a geospatial index for members inside an area of a box or a circle, optionally stores the result."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "geo"_RedisString}, + {"complexity", + "O(N+log(M)) where N is the number of elements in the grid-aligned bounding box area around the shape provided as the filter and M is the number of items inside the shape"_RedisString}, + {"history", RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added support for uppercase unit names."_RedisString}), + })}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "source"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "source"_RedisString}, + {"key_spec_index", 1_RedisInt}, + }), + RedisMap({ + {"name", "from"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + {"token", "FROMMEMBER"_RedisString}, + }), + RedisMap({ + {"name", "fromlonlat"_RedisString}, + {"type", "block"_RedisString}, + {"token", "FROMLONLAT"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "longitude"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "longitude"_RedisString}, + }), + RedisMap({ + {"name", "latitude"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "latitude"_RedisString}, + }), + })}, + }), + })}, + }), + RedisMap({ + {"name", "by"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "circle"_RedisString}, + {"type", "block"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "radius"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "radius"_RedisString}, + {"token", "BYRADIUS"_RedisString}, + }), + RedisMap({ + {"name", "unit"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "m"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "m"_RedisString}, + {"token", "M"_RedisString}, + }), + RedisMap({ + {"name", "km"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "km"_RedisString}, + {"token", "KM"_RedisString}, + }), + RedisMap({ + {"name", "ft"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "ft"_RedisString}, + {"token", "FT"_RedisString}, + }), + RedisMap({ + {"name", "mi"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "mi"_RedisString}, + {"token", "MI"_RedisString}, + }), + })}, + }), + })}, + }), + RedisMap({ + {"name", "box"_RedisString}, + {"type", "block"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "width"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "width"_RedisString}, + {"token", "BYBOX"_RedisString}, + }), + RedisMap({ + {"name", "height"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "height"_RedisString}, + }), + RedisMap({ + {"name", "unit"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "m"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "m"_RedisString}, + {"token", "M"_RedisString}, + }), + RedisMap({ + {"name", "km"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "km"_RedisString}, + {"token", "KM"_RedisString}, + }), + RedisMap({ + {"name", "ft"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "ft"_RedisString}, + {"token", "FT"_RedisString}, + }), + RedisMap({ + {"name", "mi"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "mi"_RedisString}, + {"token", "MI"_RedisString}, + }), + })}, + }), + })}, + }), + })}, + }), + RedisMap({ + {"name", "order"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "asc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "asc"_RedisString}, + {"token", "ASC"_RedisString}, + }), + RedisMap({ + {"name", "desc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "desc"_RedisString}, + {"token", "DESC"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "count-block"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + }), + RedisMap({ + {"name", "any"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "any"_RedisString}, + {"token", "ANY"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + RedisMap({ + {"name", "storedist"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "storedist"_RedisString}, + {"token", "STOREDIST"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"zmscore", RedisMap({ + {"summary", "Returns the score of one or more members in a sorted set."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", "O(N) where N is the number of members being requested."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"smismember", + RedisMap({ + {"summary", "Determines whether multiple members belong to a set."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", "O(N) where N is the number of elements being checked for membership"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"append", + RedisMap({ + {"summary", "Appends a string to the value of a key. Creates the key if it doesn't exist."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", + "O(1). The amortized time complexity is O(1) assuming the appended value is small and the already present value is of any size, since the dynamic string library used by Redis will double the free space available on every reallocation."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + })}, + {"zrangebylex", RedisMap({ + {"summary", "Returns members in a sorted set within a lexicographical range."_RedisString}, + {"since", "2.8.9"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with LIMIT), you can consider it O(log(N))."_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "6.2.0"_RedisString}, + {"replaced_by", "`ZRANGE` with the `BYLEX` argument"_RedisString}, + {"arguments", RedisArray( + { + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "min"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "min"_RedisString}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "max"_RedisString}, + }), + RedisMap({ + {"name", "limit"_RedisString}, + {"type", "block"_RedisString}, + {"token", "LIMIT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + })}, + }), + })}, + })}, + {"eval", + RedisMap({ + {"summary", "Executes a server-side Lua script."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "Depends on the script that is executed."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "script"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "script"_RedisString}, + }), + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "arg"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "arg"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"slaveof", + RedisMap({ + {"summary", "Sets a Redis server as a replica of another, or promotes it to being a master."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "5.0.0"_RedisString}, + {"replaced_by", "`REPLICAOF`"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "host"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "host"_RedisString}, + }), + RedisMap({ + {"name", "port"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "port"_RedisString}, + }), + })}, + })}, + {"reset", RedisMap({ + {"summary", "Resets the connection."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"zinter", + RedisMap({ + {"summary", "Returns the intersect of multiple sorted sets."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(N*K)+O(M*log(M)) worst case with N being the smallest input sorted set, K being the number of input sorted sets and M being the number of elements in the resulting sorted set."_RedisString}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "weight"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "weight"_RedisString}, + {"token", "WEIGHTS"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "aggregate"_RedisString}, + {"type", "oneof"_RedisString}, + {"token", "AGGREGATE"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "sum"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "sum"_RedisString}, + {"token", "SUM"_RedisString}, + }), + RedisMap({ + {"name", "min"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "min"_RedisString}, + {"token", "MIN"_RedisString}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "max"_RedisString}, + {"token", "MAX"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "withscores"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withscores"_RedisString}, + {"token", "WITHSCORES"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"pexpire", + RedisMap({ + {"summary", "Sets the expiration time of a key in milliseconds."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added options: `NX`, `XX`, `GT` and `LT`."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "milliseconds"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "milliseconds"_RedisString}, + }), + RedisMap({ + {"name", "condition"_RedisString}, + {"type", "oneof"_RedisString}, + {"since", "7.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "nx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "nx"_RedisString}, + {"token", "NX"_RedisString}, + }), + RedisMap({ + {"name", "xx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "xx"_RedisString}, + {"token", "XX"_RedisString}, + }), + RedisMap({ + {"name", "gt"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "gt"_RedisString}, + {"token", "GT"_RedisString}, + }), + RedisMap({ + {"name", "lt"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "lt"_RedisString}, + {"token", "LT"_RedisString}, + }), + })}, + }), + })}, + })}, + {"command", + RedisMap({ + {"summary", "Returns detailed information about all commands."_RedisString}, + {"since", "2.8.13"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) where N is the total number of Redis commands"_RedisString}, + {"subcommands", + RedisMap({ + {"command|getkeys", + RedisMap({ + {"summary", "Extracts the key names from an arbitrary command."_RedisString}, + {"since", "2.8.13"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) where N is the number of arguments to the command"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "command"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "command"_RedisString}, + }), + RedisMap({ + {"name", "arg"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "arg"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"command|docs", + RedisMap({ + {"summary", "Returns documentary information about one, multiple or all commands."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) where N is the number of commands to look up"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "command-name"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "command-name"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"command|count", RedisMap({ + {"summary", "Returns a count of commands."_RedisString}, + {"since", "2.8.13"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"command|getkeysandflags", + RedisMap({ + {"summary", "Extracts the key names and access flags for an arbitrary command."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) where N is the number of arguments to the command"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "command"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "command"_RedisString}, + }), + RedisMap({ + {"name", "arg"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "arg"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"command|info", + RedisMap({ + {"summary", "Returns information about one, multiple or all commands."_RedisString}, + {"since", "2.8.13"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) where N is the number of commands to look up"_RedisString}, + {"history", + RedisSet({ + RedisArray({"7.0.0"_RedisString, + "Allowed to be called with no argument to get info on all commands."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "command-name"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "command-name"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"command|list", + RedisMap({ + {"summary", "Returns a list of command names."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) where N is the total number of Redis commands"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "filterby"_RedisString}, + {"type", "oneof"_RedisString}, + {"token", "FILTERBY"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "module-name"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "module-name"_RedisString}, + {"token", "MODULE"_RedisString}, + }), + RedisMap({ + {"name", "category"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "category"_RedisString}, + {"token", "ACLCAT"_RedisString}, + }), + RedisMap({ + {"name", "pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + {"token", "PATTERN"_RedisString}, + }), + })}, + }), + })}, + })}, + {"command|help", + RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + })}, + })}, + {"lrange", + RedisMap({ + {"summary", "Returns a range of elements from a list."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", + "O(S+N) where S is the distance of start offset from HEAD for small lists, from nearest end (HEAD or TAIL) for large lists; and N is the number of elements in the specified range."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "start"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "start"_RedisString}, + }), + RedisMap({ + {"name", "stop"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "stop"_RedisString}, + }), + })}, + })}, + {"lindex", + RedisMap({ + {"summary", "Returns an element from a list by its index."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", + "O(N) where N is the number of elements to traverse to get to the element at index. This makes asking for the first or the last element of the list O(1)."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "index"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "index"_RedisString}, + }), + })}, + })}, + {"blmove", + RedisMap({ + {"summary", + "Pops an element from a list, pushes it to another list and returns it. Blocks until an element is available otherwise. Deletes the list if the last element was moved."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "source"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "source"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 1_RedisInt}, + }), + RedisMap({ + {"name", "wherefrom"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "left"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "left"_RedisString}, + {"token", "LEFT"_RedisString}, + }), + RedisMap({ + {"name", "right"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "right"_RedisString}, + {"token", "RIGHT"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "whereto"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "left"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "left"_RedisString}, + {"token", "LEFT"_RedisString}, + }), + RedisMap({ + {"name", "right"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "right"_RedisString}, + {"token", "RIGHT"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "timeout"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "timeout"_RedisString}, + }), + })}, + })}, + {"ttl", + RedisMap({ + {"summary", "Returns the expiration time in seconds of a key."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", RedisSet({ + RedisArray({"2.8.0"_RedisString, "Added the -2 reply."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"xread", + RedisMap({ + {"summary", + "Returns messages from multiple streams with IDs greater than the ones requested. Blocks until a message is available otherwise."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "milliseconds"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "milliseconds"_RedisString}, + {"token", "BLOCK"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "streams"_RedisString}, + {"type", "block"_RedisString}, + {"token", "STREAMS"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "id"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + }), + })}, + })}, + {"xgroup", + RedisMap({ + {"summary", "A container for consumer groups commands."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"xgroup|delconsumer", RedisMap({ + {"summary", "Deletes a consumer from a consumer group."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "group"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "group"_RedisString}, + }), + RedisMap({ + {"name", "consumer"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "consumer"_RedisString}, + }), + })}, + })}, + {"xgroup|create", + RedisMap({ + {"summary", "Creates a consumer group."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", + RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added the `entries_read` named argument."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "group"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "group"_RedisString}, + }), + RedisMap({ + {"name", "id-selector"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "id"_RedisString}, + }), + RedisMap({ + {"name", "new-id"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "new-id"_RedisString}, + {"token", "$"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "mkstream"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "mkstream"_RedisString}, + {"token", "MKSTREAM"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "entries-read"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "entries-read"_RedisString}, + {"token", "ENTRIESREAD"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"xgroup|destroy", + RedisMap({ + {"summary", "Destroys a consumer group."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", + "O(N) where N is the number of entries in the group's pending entries list (PEL)."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "group"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "group"_RedisString}, + }), + })}, + })}, + {"xgroup|createconsumer", RedisMap({ + {"summary", "Creates a consumer in a consumer group."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "group"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "group"_RedisString}, + }), + RedisMap({ + {"name", "consumer"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "consumer"_RedisString}, + }), + })}, + })}, + {"xgroup|setid", + RedisMap({ + {"summary", "Sets the last-delivered ID of a consumer group."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", + RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added the optional `entries_read` argument."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "group"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "group"_RedisString}, + }), + RedisMap({ + {"name", "id-selector"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "id"_RedisString}, + }), + RedisMap({ + {"name", "new-id"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "new-id"_RedisString}, + {"token", "$"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "entriesread"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "entries-read"_RedisString}, + {"token", "ENTRIESREAD"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"xgroup|help", RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + })}, + })}, + {"hmget", RedisMap({ + {"summary", "Returns the values of all fields in a hash."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(N) where N is the number of fields being requested."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "field"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "field"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"quit", RedisMap({ + {"summary", "Closes the connection."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "7.2.0"_RedisString}, + {"replaced_by", "just closing the connection"_RedisString}, + })}, + {"unlink", + RedisMap({ + {"summary", "Asynchronously deletes one or more keys."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", + "O(1) for each key removed regardless of its size. Then the command does O(N) work in a different thread in order to reclaim memory, where N is the number of allocations the deleted objects where composed of."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"mget", RedisMap({ + {"summary", "Atomically returns the string values of one or more keys."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(N) where N is the number of keys to retrieve."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"unwatch", RedisMap({ + {"summary", "Forgets about watched keys of a transaction."_RedisString}, + {"since", "2.2.0"_RedisString}, + {"group", "transactions"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"zpopmax", + RedisMap({ + {"summary", + "Returns the highest-scoring members from a sorted set after removing them. Deletes the sorted set if the last member was popped."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(log(N)*M) with N being the number of elements in the sorted set, and M being the number of elements popped."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"lpos", + RedisMap({ + {"summary", "Returns the index of matching elements in a list."_RedisString}, + {"since", "6.0.6"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", + "O(N) where N is the number of elements in the list, for the average case. When searching for elements near the head or the tail of the list, or when the MAXLEN option is provided, the command may run in constant time."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "element"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "element"_RedisString}, + }), + RedisMap({ + {"name", "rank"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "rank"_RedisString}, + {"token", "RANK"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "num-matches"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "num-matches"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "len"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "len"_RedisString}, + {"token", "MAXLEN"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"bitcount", + RedisMap({ + {"summary", "Counts the number of set bits (population counting) in a string."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "bitmap"_RedisString}, + {"complexity", "O(N)"_RedisString}, + {"history", RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added the `BYTE|BIT` option."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "range"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "start"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "start"_RedisString}, + }), + RedisMap({ + {"name", "end"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "end"_RedisString}, + }), + RedisMap({ + {"name", "unit"_RedisString}, + {"type", "oneof"_RedisString}, + {"since", "7.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "byte"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "byte"_RedisString}, + {"token", "BYTE"_RedisString}, + }), + RedisMap({ + {"name", "bit"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "bit"_RedisString}, + {"token", "BIT"_RedisString}, + }), + })}, + }), + })}, + }), + })}, + })}, + {"xdel", RedisMap({ + {"summary", "Returns the number of messages after removing them from a stream."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", + "O(1) for each single item to delete in the stream, regardless of the stream size."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "id"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"xpending", + RedisMap({ + {"summary", + "Returns the information and entries from a stream consumer group's pending entries list."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", + "O(N) with N being the number of elements returned, so asking for a small fixed number of entries per call is O(1). O(M), where M is the total number of entries scanned when used with the IDLE filter. When the command returns just the summary and the list of consumers is small, it runs in O(1) time; otherwise, an additional O(N) time for iterating every consumer."_RedisString}, + {"history", + RedisSet({ + RedisArray({"6.2.0"_RedisString, "Added the `IDLE` option and exclusive range intervals."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "group"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "group"_RedisString}, + }), + RedisMap({ + {"name", "filters"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "min-idle-time"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "min-idle-time"_RedisString}, + {"token", "IDLE"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "start"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "start"_RedisString}, + }), + RedisMap({ + {"name", "end"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "end"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + RedisMap({ + {"name", "consumer"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "consumer"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + })}, + })}, + {"auth", + RedisMap({ + {"summary", "Authenticates the connection."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(N) where N is the number of passwords defined for the user"_RedisString}, + {"history", RedisSet({ + RedisArray({"6.0.0"_RedisString, "Added ACL style (username and password)."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "username"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "username"_RedisString}, + {"since", "6.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "password"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "password"_RedisString}, + }), + })}, + })}, + {"select", RedisMap({ + {"summary", "Changes the selected database."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "index"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "index"_RedisString}, + }), + })}, + })}, + {"hmset", RedisMap({ + {"summary", "Sets the values of multiple fields."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(N) where N is the number of fields being set."_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "4.0.0"_RedisString}, + {"replaced_by", "`HSET` with multiple field-value pairs"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "data"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "field"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "field"_RedisString}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + }), + })}, + })}, + {"hstrlen", RedisMap({ + {"summary", "Returns the length of the value of a field."_RedisString}, + {"since", "3.2.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "field"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "field"_RedisString}, + }), + })}, + })}, + {"decr", + RedisMap({ + {"summary", + "Decrements the integer value of a key by one. Uses 0 as initial value if the key doesn't exist."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"hdel", + RedisMap({ + {"summary", + "Deletes one or more fields and their values from a hash. Deletes the hash if no fields remain."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(N) where N is the number of fields to be removed."_RedisString}, + {"history", RedisSet({ + RedisArray({"2.4.0"_RedisString, "Accepts multiple `field` arguments."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "field"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "field"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"replicaof", RedisMap({ + {"summary", "Configures a server as replica of another, or promotes it to a master."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "host"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "host"_RedisString}, + }), + RedisMap({ + {"name", "port"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "port"_RedisString}, + }), + })}, + })}, + {"psubscribe", + RedisMap({ + {"summary", "Listens for messages published to channels that match one or more patterns."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", "O(N) where N is the number of patterns the client is already subscribed to."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"hset", + RedisMap({ + {"summary", "Creates or modifies the value of a field in a hash."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", + "O(1) for each field/value pair added, so O(N) to add N field/value pairs when the command is called with multiple field/value pairs."_RedisString}, + {"history", + RedisSet({ + RedisArray({"4.0.0"_RedisString, "Accepts multiple `field` and `value` arguments."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "data"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "field"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "field"_RedisString}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + }), + })}, + })}, + {"brpop", + RedisMap({ + {"summary", + "Removes and returns the last element in a list. Blocks until an element is available otherwise. Deletes the list if the last element was popped."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", "O(N) where N is the number of provided keys."_RedisString}, + {"history", RedisSet({ + RedisArray({"6.0.0"_RedisString, + "`timeout` is interpreted as a double instead of an integer."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "timeout"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "timeout"_RedisString}, + }), + })}, + })}, + {"exists", RedisMap({ + {"summary", "Determines whether one or more keys exist."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(N) where N is the number of keys to check."_RedisString}, + {"history", RedisSet({ + RedisArray({"3.0.3"_RedisString, "Accepts multiple `key` arguments."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"getrange", + RedisMap({ + {"summary", "Returns a substring of the string stored at a key."_RedisString}, + {"since", "2.4.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", + "O(N) where N is the length of the returned string. The complexity is ultimately determined by the returned length, but because creating a substring from an existing string is very cheap, it can be considered O(1) for small strings."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "start"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "start"_RedisString}, + }), + RedisMap({ + {"name", "end"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "end"_RedisString}, + }), + })}, + })}, + {"llen", RedisMap({ + {"summary", "Returns the length of a list."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"xclaim", + RedisMap({ + {"summary", + "Changes, or acquires, ownership of a message in a consumer group, as if the message was delivered a consumer group member."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(log N) with N being the number of messages in the PEL of the consumer group."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "group"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "group"_RedisString}, + }), + RedisMap({ + {"name", "consumer"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "consumer"_RedisString}, + }), + RedisMap({ + {"name", "min-idle-time"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "min-idle-time"_RedisString}, + }), + RedisMap({ + {"name", "id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "id"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "ms"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "ms"_RedisString}, + {"token", "IDLE"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "unix-time-milliseconds"_RedisString}, + {"type", "unix-time"_RedisString}, + {"display_text", "unix-time-milliseconds"_RedisString}, + {"token", "TIME"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "RETRYCOUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "force"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "force"_RedisString}, + {"token", "FORCE"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "justid"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "justid"_RedisString}, + {"token", "JUSTID"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "lastid"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "lastid"_RedisString}, + {"token", "LASTID"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"zrevrange", + RedisMap({ + {"summary", "Returns members in a sorted set within a range of indexes in reverse order."_RedisString}, + {"since", "1.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements returned."_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "6.2.0"_RedisString}, + {"replaced_by", "`ZRANGE` with the `REV` argument"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "start"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "start"_RedisString}, + }), + RedisMap({ + {"name", "stop"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "stop"_RedisString}, + }), + RedisMap({ + {"name", "withscores"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withscores"_RedisString}, + {"token", "WITHSCORES"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"xtrim", + RedisMap({ + {"summary", "Deletes messages from the beginning of a stream."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", + "O(N), with N being the number of evicted entries. Constant times are very small however, since entries are organized in macro nodes containing multiple entries that can be released with a single deallocation."_RedisString}, + {"history", RedisSet({ + RedisArray({"6.2.0"_RedisString, + "Added the `MINID` trimming strategy and the `LIMIT` option."_RedisString}), + })}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "trim"_RedisString}, + {"type", "block"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "strategy"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "maxlen"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "maxlen"_RedisString}, + {"token", "MAXLEN"_RedisString}, + }), + RedisMap({ + {"name", "minid"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "minid"_RedisString}, + {"token", "MINID"_RedisString}, + {"since", "6.2.0"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "operator"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "equal"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "equal"_RedisString}, + {"token", "="_RedisString}, + }), + RedisMap({ + {"name", "approximately"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "approximately"_RedisString}, + {"token", "~"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "threshold"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "threshold"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "LIMIT"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + })}, + })}, + {"acl", RedisMap({ + {"summary", "A container for Access List Control commands."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"acl|list", RedisMap({ + {"summary", "Dumps the effective rules in ACL file format."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N). Where N is the number of configured users."_RedisString}, + })}, + {"acl|help", RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"acl|users", RedisMap({ + {"summary", "Lists all ACL users."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N). Where N is the number of configured users."_RedisString}, + })}, + {"acl|setuser", + RedisMap({ + {"summary", "Creates and modifies an ACL user and its rules."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N). Where N is the number of rules provided."_RedisString}, + {"history", + RedisSet({ + RedisArray({"6.2.0"_RedisString, "Added Pub/Sub channel patterns."_RedisString}), + RedisArray( + {"7.0.0"_RedisString, "Added selectors and key based permissions."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "username"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "username"_RedisString}, + }), + RedisMap({ + {"name", "rule"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "rule"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"acl|log", + RedisMap({ + {"summary", "Lists recent security events generated due to ACL rules."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) with N being the number of entries shown."_RedisString}, + {"history", + RedisSet({ + RedisArray({"7.2.0"_RedisString, + "Added entry ID, timestamp created, and timestamp last updated."_RedisString}), + })}, + {"arguments", RedisArray( + { + RedisMap( + { + {"name", "operation"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + RedisMap({ + {"name", "reset"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "reset"_RedisString}, + {"token", "RESET"_RedisString}, + }), + })}, + }), + })}, + })}, + {"acl|dryrun", + RedisMap({ + {"summary", + "Simulates the execution of a command by a user, without executing the command."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)."_RedisString}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "username"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "username"_RedisString}, + }), + RedisMap({ + {"name", "command"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "command"_RedisString}, + }), + RedisMap({ + {"name", "arg"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "arg"_RedisString}, + {"flags", + RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"acl|cat", + RedisMap({ + {"summary", "Lists the ACL categories, or the commands inside a category."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1) since the categories and commands are a fixed set."_RedisString}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "category"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "category"_RedisString}, + {"flags", + RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"acl|deluser", + RedisMap({ + {"summary", "Deletes ACL users, and terminates their connections."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1) amortized time considering the typical user."_RedisString}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "username"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "username"_RedisString}, + {"flags", + RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"acl|save", + RedisMap({ + {"summary", "Saves the effective ACL rules in the configured ACL file."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N). Where N is the number of configured users."_RedisString}, + })}, + {"acl|genpass", + RedisMap({ + {"summary", + "Generates a pseudorandom, secure password that can be used to identify ACL users."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray( + { + RedisMap({ + {"name", "bits"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "bits"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"acl|getuser", RedisMap( + { + {"summary", "Lists the ACL rules of a user."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", + "O(N). Where N is the number of password, command and pattern rules that the user has."_RedisString}, + {"history", RedisSet( + { + RedisArray({"6.2.0"_RedisString, + "Added Pub/Sub channel patterns."_RedisString}), + RedisArray({"7.0.0"_RedisString, + "Added selectors and changed the format of key and channel patterns from a list to their rule representation."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "username"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "username"_RedisString}, + }), + })}, + })}, + {"acl|load", RedisMap({ + {"summary", "Reloads the rules from the configured ACL file."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N). Where N is the number of configured users."_RedisString}, + })}, + {"acl|whoami", + RedisMap({ + {"summary", "Returns the authenticated username of the current connection."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + })}, + })}, + {"sadd", + RedisMap({ + {"summary", "Adds one or more members to a set. Creates the key if it doesn't exist."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", + "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments."_RedisString}, + {"history", RedisSet({ + RedisArray({"2.4.0"_RedisString, "Accepts multiple `member` arguments."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"zlexcount", + RedisMap({ + {"summary", "Returns the number of members in a sorted set within a lexicographical range."_RedisString}, + {"since", "2.8.9"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", "O(log(N)) with N being the number of elements in the sorted set."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "min"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "min"_RedisString}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "max"_RedisString}, + }), + })}, + })}, + {"sinter", + RedisMap({ + {"summary", "Returns the intersect of multiple sets."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", + "O(N*M) worst case where N is the cardinality of the smallest set and M is the number of sets."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"georadiusbymember_ro", + RedisMap({ + {"summary", "Returns members from a geospatial index that are within a distance from a member."_RedisString}, + {"since", "3.2.10"_RedisString}, + {"group", "geo"_RedisString}, + {"complexity", + "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index."_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "6.2.0"_RedisString}, + {"replaced_by", "`GEOSEARCH` with the `BYRADIUS` and `FROMMEMBER` arguments"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + }), + RedisMap({ + {"name", "radius"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "radius"_RedisString}, + }), + RedisMap({ + {"name", "unit"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "m"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "m"_RedisString}, + {"token", "M"_RedisString}, + }), + RedisMap({ + {"name", "km"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "km"_RedisString}, + {"token", "KM"_RedisString}, + }), + RedisMap({ + {"name", "ft"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "ft"_RedisString}, + {"token", "FT"_RedisString}, + }), + RedisMap({ + {"name", "mi"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "mi"_RedisString}, + {"token", "MI"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "withcoord"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withcoord"_RedisString}, + {"token", "WITHCOORD"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "withdist"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withdist"_RedisString}, + {"token", "WITHDIST"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "withhash"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withhash"_RedisString}, + {"token", "WITHHASH"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "count-block"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + }), + RedisMap({ + {"name", "any"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "any"_RedisString}, + {"token", "ANY"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + RedisMap({ + {"name", "order"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "asc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "asc"_RedisString}, + {"token", "ASC"_RedisString}, + }), + RedisMap({ + {"name", "desc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "desc"_RedisString}, + {"token", "DESC"_RedisString}, + }), + })}, + }), + })}, + })}, + {"smove", RedisMap({ + {"summary", "Moves a member from one set to another."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "source"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "source"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 1_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + }), + })}, + })}, + {"del", + RedisMap({ + {"summary", "Deletes one or more keys."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", + "O(N) where N is the number of keys that will be removed. When a key to remove holds a value other than a string, the individual complexity for this key is O(M) where M is the number of elements in the list, set, sorted set or hash. Removing a single key that holds a string value is O(1)."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"zrem", + RedisMap({ + {"summary", + "Removes one or more members from a sorted set. Deletes the sorted set if all members were removed."_RedisString}, + {"since", "1.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(M*log(N)) with N being the number of elements in the sorted set and M the number of elements to be removed."_RedisString}, + {"history", RedisSet({ + RedisArray({"2.4.0"_RedisString, "Accepts multiple elements."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"bzpopmin", + RedisMap({ + {"summary", + "Removes and returns the member with the lowest score from one or more sorted sets. Blocks until a member is available otherwise. Deletes the sorted set if the last element was popped."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", "O(log(N)) with N being the number of elements in the sorted set."_RedisString}, + {"history", RedisSet({ + RedisArray({"6.0.0"_RedisString, + "`timeout` is interpreted as a double instead of an integer."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "timeout"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "timeout"_RedisString}, + }), + })}, + })}, + {"xsetid", + RedisMap({ + {"summary", "An internal command for replicating stream values."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", RedisSet({ + RedisArray({"7.0.0"_RedisString, + "Added the `entries_added` and `max_deleted_entry_id` arguments."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "last-id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "last-id"_RedisString}, + }), + RedisMap({ + {"name", "entries-added"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "entries-added"_RedisString}, + {"token", "ENTRIESADDED"_RedisString}, + {"since", "7.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "max-deleted-id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "max-deleted-id"_RedisString}, + {"token", "MAXDELETEDID"_RedisString}, + {"since", "7.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"zdiffstore", + RedisMap({ + {"summary", "Stores the difference of multiple sorted sets in a key."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(L + (N-K)log(N)) worst case where L is the total number of elements in all the sets, N is the size of the first set, and K is the size of the result set."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"setnx", RedisMap({ + {"summary", "Set the string value of a key only when the key doesn't exist."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "2.6.12"_RedisString}, + {"replaced_by", "`SET` with the `NX` argument"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + })}, + {"getset", + RedisMap({ + {"summary", "Returns the previous string value of a key after setting it to a new value."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "6.2.0"_RedisString}, + {"replaced_by", "`SET` with the `!GET` argument"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + })}, + {"unsubscribe", + RedisMap({ + {"summary", "Stops listening to messages posted to channels."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", "O(N) where N is the number of clients already subscribed to a channel."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "channel"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "channel"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"lcs", RedisMap({ + {"summary", "Finds the longest common substring."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(N*M) where N and M are the lengths of s1 and s2, respectively"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key1"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key1"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "key2"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key2"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "len"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "len"_RedisString}, + {"token", "LEN"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "idx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "idx"_RedisString}, + {"token", "IDX"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "min-match-len"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "min-match-len"_RedisString}, + {"token", "MINMATCHLEN"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "withmatchlen"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withmatchlen"_RedisString}, + {"token", "WITHMATCHLEN"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"lastsave", RedisMap({ + {"summary", "Returns the Unix timestamp of the last successful save to disk."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"xrange", + RedisMap({ + {"summary", "Returns the messages from a stream within a range of IDs."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", + "O(N) with N being the number of elements being returned. If N is constant (e.g. always asking for the first 10 elements with COUNT), you can consider it O(1)."_RedisString}, + {"history", RedisSet({ + RedisArray({"6.2.0"_RedisString, "Added exclusive ranges."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "start"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "start"_RedisString}, + }), + RedisMap({ + {"name", "end"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "end"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"set", + RedisMap({ + {"summary", + "Sets the string value of a key, ignoring its type. The key is created if it doesn't exist."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", + RedisSet({ + RedisArray({"2.6.12"_RedisString, "Added the `EX`, `PX`, `NX` and `XX` options."_RedisString}), + RedisArray({"6.0.0"_RedisString, "Added the `KEEPTTL` option."_RedisString}), + RedisArray({"6.2.0"_RedisString, "Added the `GET`, `EXAT` and `PXAT` option."_RedisString}), + RedisArray({"7.0.0"_RedisString, "Allowed the `NX` and `GET` options to be used together."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + RedisMap({ + {"name", "condition"_RedisString}, + {"type", "oneof"_RedisString}, + {"since", "2.6.12"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "nx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "nx"_RedisString}, + {"token", "NX"_RedisString}, + }), + RedisMap({ + {"name", "xx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "xx"_RedisString}, + {"token", "XX"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "get"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "get"_RedisString}, + {"token", "GET"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "expiration"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "seconds"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "seconds"_RedisString}, + {"token", "EX"_RedisString}, + {"since", "2.6.12"_RedisString}, + }), + RedisMap({ + {"name", "milliseconds"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "milliseconds"_RedisString}, + {"token", "PX"_RedisString}, + {"since", "2.6.12"_RedisString}, + }), + RedisMap({ + {"name", "unix-time-seconds"_RedisString}, + {"type", "unix-time"_RedisString}, + {"display_text", "unix-time-seconds"_RedisString}, + {"token", "EXAT"_RedisString}, + {"since", "6.2.0"_RedisString}, + }), + RedisMap({ + {"name", "unix-time-milliseconds"_RedisString}, + {"type", "unix-time"_RedisString}, + {"display_text", "unix-time-milliseconds"_RedisString}, + {"token", "PXAT"_RedisString}, + {"since", "6.2.0"_RedisString}, + }), + RedisMap({ + {"name", "keepttl"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "keepttl"_RedisString}, + {"token", "KEEPTTL"_RedisString}, + {"since", "6.0.0"_RedisString}, + }), + })}, + }), + })}, + })}, + {"geopos", RedisMap({ + {"summary", "Returns the longitude and latitude of members from a geospatial index."_RedisString}, + {"since", "3.2.0"_RedisString}, + {"group", "geo"_RedisString}, + {"complexity", "O(N) where N is the number of members requested."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"bgrewriteaof", RedisMap({ + {"summary", "Asynchronously rewrites the append-only file to disk."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"hincrby", + RedisMap({ + {"summary", + "Increments the integer value of a field in a hash by a number. Uses 0 as initial value if the field doesn't exist."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "field"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "field"_RedisString}, + }), + RedisMap({ + {"name", "increment"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "increment"_RedisString}, + }), + })}, + })}, + {"lolwut", RedisMap({ + {"summary", "Displays computer art and the Redis version"_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "version"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "version"_RedisString}, + {"token", "VERSION"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"get", RedisMap({ + {"summary", "Returns the string value of a key."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"persist", RedisMap({ + {"summary", "Removes the expiration time of a key."_RedisString}, + {"since", "2.2.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"pexpireat", + RedisMap({ + {"summary", "Sets the expiration time of a key to a Unix milliseconds timestamp."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added options: `NX`, `XX`, `GT` and `LT`."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "unix-time-milliseconds"_RedisString}, + {"type", "unix-time"_RedisString}, + {"display_text", "unix-time-milliseconds"_RedisString}, + }), + RedisMap({ + {"name", "condition"_RedisString}, + {"type", "oneof"_RedisString}, + {"since", "7.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "nx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "nx"_RedisString}, + {"token", "NX"_RedisString}, + }), + RedisMap({ + {"name", "xx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "xx"_RedisString}, + {"token", "XX"_RedisString}, + }), + RedisMap({ + {"name", "gt"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "gt"_RedisString}, + {"token", "GT"_RedisString}, + }), + RedisMap({ + {"name", "lt"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "lt"_RedisString}, + {"token", "LT"_RedisString}, + }), + })}, + }), + })}, + })}, + {"sunionstore", RedisMap({ + {"summary", "Stores the union of multiple sets in a key."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", "O(N) where N is the total number of elements in all given sets."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"migrate", + RedisMap({ + {"summary", "Atomically transfers a key from one Redis instance to another."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", + "This command actually executes a DUMP+DEL in the source instance, and a RESTORE in the target instance. See the pages of these commands for time complexity. Also an O(N) data transfer between the two instances is performed."_RedisString}, + {"history", RedisSet({ + RedisArray({"3.0.0"_RedisString, "Added the `COPY` and `REPLACE` options."_RedisString}), + RedisArray({"3.0.6"_RedisString, "Added the `KEYS` option."_RedisString}), + RedisArray({"4.0.7"_RedisString, "Added the `AUTH` option."_RedisString}), + RedisArray({"6.0.0"_RedisString, "Added the `AUTH2` option."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "host"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "host"_RedisString}, + }), + RedisMap({ + {"name", "port"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "port"_RedisString}, + }), + RedisMap({ + {"name", "key-selector"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "empty-string"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "empty-string"_RedisString}, + {"token", ""_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "destination-db"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "destination-db"_RedisString}, + }), + RedisMap({ + {"name", "timeout"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "timeout"_RedisString}, + }), + RedisMap({ + {"name", "copy"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "copy"_RedisString}, + {"token", "COPY"_RedisString}, + {"since", "3.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "replace"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "replace"_RedisString}, + {"token", "REPLACE"_RedisString}, + {"since", "3.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "authentication"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "auth"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "password"_RedisString}, + {"token", "AUTH"_RedisString}, + {"since", "4.0.7"_RedisString}, + }), + RedisMap({ + {"name", "auth2"_RedisString}, + {"type", "block"_RedisString}, + {"token", "AUTH2"_RedisString}, + {"since", "6.0.0"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "username"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "username"_RedisString}, + }), + RedisMap({ + {"name", "password"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "password"_RedisString}, + }), + })}, + }), + })}, + }), + RedisMap({ + {"name", "keys"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"token", "KEYS"_RedisString}, + {"since", "3.0.6"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"xadd", + RedisMap({ + {"summary", "Appends a new message to a stream. Creates the key if it doesn't exist."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", + "O(1) when adding a new entry, O(N) when trimming where N being the number of entries evicted."_RedisString}, + {"history", + RedisSet({ + RedisArray( + {"6.2.0"_RedisString, + "Added the `NOMKSTREAM` option, `MINID` trimming strategy and the `LIMIT` option."_RedisString}), + RedisArray({"7.0.0"_RedisString, "Added support for the `-*` explicit ID form."_RedisString}), + })}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "nomkstream"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "nomkstream"_RedisString}, + {"token", "NOMKSTREAM"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "trim"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "strategy"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "maxlen"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "maxlen"_RedisString}, + {"token", "MAXLEN"_RedisString}, + }), + RedisMap({ + {"name", "minid"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "minid"_RedisString}, + {"token", "MINID"_RedisString}, + {"since", "6.2.0"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "operator"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "equal"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "equal"_RedisString}, + {"token", "="_RedisString}, + }), + RedisMap({ + {"name", "approximately"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "approximately"_RedisString}, + {"token", "~"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "threshold"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "threshold"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "LIMIT"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + RedisMap({ + {"name", "id-selector"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "auto-id"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "auto-id"_RedisString}, + {"token", "*"_RedisString}, + }), + RedisMap({ + {"name", "id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "id"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "data"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "field"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "field"_RedisString}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + }), + })}, + })}, + {"sinterstore", + RedisMap({ + {"summary", "Stores the intersect of multiple sets in a key."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", + "O(N*M) worst case where N is the cardinality of the smallest set and M is the number of sets."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"zrank", + RedisMap({ + {"summary", "Returns the index of a member in a sorted set ordered by ascending scores."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", "O(log(N))"_RedisString}, + {"history", RedisSet({ + RedisArray({"7.2.0"_RedisString, "Added the optional `WITHSCORE` argument."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + }), + RedisMap({ + {"name", "withscore"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withscore"_RedisString}, + {"token", "WITHSCORE"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"pexpiretime", + RedisMap({ + {"summary", "Returns the expiration time of a key as a Unix milliseconds timestamp."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"bitop", RedisMap({ + {"summary", "Performs bitwise operations on multiple strings, and stores the result."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "bitmap"_RedisString}, + {"complexity", "O(N)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "operation"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "and"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "and"_RedisString}, + {"token", "AND"_RedisString}, + }), + RedisMap({ + {"name", "or"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "or"_RedisString}, + {"token", "OR"_RedisString}, + }), + RedisMap({ + {"name", "xor"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "xor"_RedisString}, + {"token", "XOR"_RedisString}, + }), + RedisMap({ + {"name", "not"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "not"_RedisString}, + {"token", "NOT"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "destkey"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destkey"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"wait", + RedisMap({ + {"summary", + "Blocks until the asynchronous replication of all preceding write commands sent by the connection is completed."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "numreplicas"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numreplicas"_RedisString}, + }), + RedisMap({ + {"name", "timeout"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "timeout"_RedisString}, + }), + })}, + })}, + {"hexists", RedisMap({ + {"summary", "Determines whether a field exists in a hash."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "field"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "field"_RedisString}, + }), + })}, + })}, + {"strlen", RedisMap({ + {"summary", "Returns the length of a string value."_RedisString}, + {"since", "2.2.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"sort_ro", + RedisMap({ + {"summary", "Returns the sorted elements of a list, a set, or a sorted set."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", + "O(N+M*log(M)) where N is the number of elements in the list or set to sort, and M the number of returned elements. When the elements are not sorted, complexity is O(N)."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "by-pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"token", "BY"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "limit"_RedisString}, + {"type", "block"_RedisString}, + {"token", "LIMIT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "get-pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"token", "GET"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + "multiple_token"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "order"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "asc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "asc"_RedisString}, + {"token", "ASC"_RedisString}, + }), + RedisMap({ + {"name", "desc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "desc"_RedisString}, + {"token", "DESC"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "sorting"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "sorting"_RedisString}, + {"token", "ALPHA"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"subscribe", RedisMap({ + {"summary", "Listens for messages published to channels."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", "O(N) where N is the number of channels to subscribe to."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "channel"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "channel"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"touch", + RedisMap({ + {"summary", + "Returns the number of existing keys out of those specified after updating the time they were last accessed."_RedisString}, + {"since", "3.2.1"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(N) where N is the number of keys that will be touched."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"hvals", RedisMap({ + {"summary", "Returns all values in a hash."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(N) where N is the size of the hash."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"zmpop", + RedisMap({ + {"summary", + "Returns the highest- or lowest-scoring members from one or more sorted sets after removing them. Deletes the sorted set if the last member was popped."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(K) + O(M*log(N)) where K is the number of provided keys, N being the number of elements in the sorted set, and M being the number of elements popped."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "where"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "min"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "min"_RedisString}, + {"token", "MIN"_RedisString}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "max"_RedisString}, + {"token", "MAX"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"object", + RedisMap({ + {"summary", "A container for object introspection commands."_RedisString}, + {"since", "2.2.3"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"object|freq", + RedisMap({ + {"summary", "Returns the logarithmic access frequency counter of a Redis object."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"object|encoding", RedisMap({ + {"summary", "Returns the internal encoding of a Redis object."_RedisString}, + {"since", "2.2.3"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"object|idletime", + RedisMap({ + {"summary", "Returns the time since the last access to a Redis object."_RedisString}, + {"since", "2.2.3"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"object|help", RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"object|refcount", RedisMap({ + {"summary", "Returns the reference count of a value of a key."_RedisString}, + {"since", "2.2.3"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + })}, + })}, + {"smembers", RedisMap({ + {"summary", "Returns all members of a set."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", "O(N) where N is the set cardinality."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"save", RedisMap({ + {"summary", "Synchronously saves the database(s) to disk."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) where N is the total number of keys in all databases"_RedisString}, + })}, + {"script", + RedisMap({ + {"summary", "A container for Lua scripts management commands."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"script|exists", + RedisMap({ + {"summary", "Determines whether server-side Lua scripts exist in the script cache."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", + "O(N) with N being the number of scripts to check (so checking a single script is an O(1) operation)."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "sha1"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "sha1"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"script|help", RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"script|debug", RedisMap({ + {"summary", "Sets the debug mode of server-side Lua scripts."_RedisString}, + {"since", "3.2.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "mode"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "yes"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "yes"_RedisString}, + {"token", "YES"_RedisString}, + }), + RedisMap({ + {"name", "sync"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "sync"_RedisString}, + {"token", "SYNC"_RedisString}, + }), + RedisMap({ + {"name", "no"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "no"_RedisString}, + {"token", "NO"_RedisString}, + }), + })}, + }), + })}, + })}, + {"script|kill", RedisMap({ + {"summary", "Terminates a server-side Lua script during execution."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"script|flush", + RedisMap({ + {"summary", "Removes all server-side Lua scripts from the script cache."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(N) with N being the number of scripts in cache"_RedisString}, + {"history", RedisSet({ + RedisArray({"6.2.0"_RedisString, + "Added the `ASYNC` and `SYNC` flushing mode modifiers."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "flush-type"_RedisString}, + {"type", "oneof"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "async"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "async"_RedisString}, + {"token", "ASYNC"_RedisString}, + }), + RedisMap({ + {"name", "sync"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "sync"_RedisString}, + {"token", "SYNC"_RedisString}, + }), + })}, + }), + })}, + })}, + {"script|load", + RedisMap({ + {"summary", "Loads a server-side Lua script to the script cache."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(N) with N being the length in bytes of the script body."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "script"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "script"_RedisString}, + }), + })}, + })}, + })}, + })}, + {"zrevrangebylex", + RedisMap({ + {"summary", "Returns members in a sorted set within a lexicographical range in reverse order."_RedisString}, + {"since", "2.8.9"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with LIMIT), you can consider it O(log(N))."_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "6.2.0"_RedisString}, + {"replaced_by", "`ZRANGE` with the `REV` and `BYLEX` arguments"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "max"_RedisString}, + }), + RedisMap({ + {"name", "min"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "min"_RedisString}, + }), + RedisMap({ + {"name", "limit"_RedisString}, + {"type", "block"_RedisString}, + {"token", "LIMIT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + })}, + }), + })}, + })}, + {"asking", RedisMap({ + {"summary", "Signals that a cluster client is following an -ASK redirect."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"hscan", + RedisMap({ + {"summary", "Iterates over fields and values of a hash."_RedisString}, + {"since", "2.8.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", + "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "cursor"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "cursor"_RedisString}, + }), + RedisMap({ + {"name", "pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + {"token", "MATCH"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"expiretime", RedisMap({ + {"summary", "Returns the expiration time of a key as a Unix timestamp."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"scard", RedisMap({ + {"summary", "Returns the number of members in a set."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"function", + RedisMap({ + {"summary", "A container for function commands."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"function|delete", RedisMap({ + {"summary", "Deletes a library and its functions."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "library-name"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "library-name"_RedisString}, + }), + })}, + })}, + {"function|kill", RedisMap({ + {"summary", "Terminates a function during execution."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"function|flush", RedisMap({ + {"summary", "Deletes all libraries and functions."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(N) where N is the number of functions deleted"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "flush-type"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "async"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "async"_RedisString}, + {"token", "ASYNC"_RedisString}, + }), + RedisMap({ + {"name", "sync"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "sync"_RedisString}, + {"token", "SYNC"_RedisString}, + }), + })}, + }), + })}, + })}, + {"function|load", RedisMap({ + {"summary", "Creates a library."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(1) (considering compilation time is redundant)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "replace"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "replace"_RedisString}, + {"token", "REPLACE"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "function-code"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "function-code"_RedisString}, + }), + })}, + })}, + {"function|restore", + RedisMap({ + {"summary", "Restores all libraries from a payload."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(N) where N is the number of functions on the payload"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "serialized-value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "serialized-value"_RedisString}, + }), + RedisMap({ + {"name", "policy"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "flush"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "flush"_RedisString}, + {"token", "FLUSH"_RedisString}, + }), + RedisMap({ + {"name", "append"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "append"_RedisString}, + {"token", "APPEND"_RedisString}, + }), + RedisMap({ + {"name", "replace"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "replace"_RedisString}, + {"token", "REPLACE"_RedisString}, + }), + })}, + }), + })}, + })}, + {"function|dump", RedisMap({ + {"summary", "Dumps all libraries into a serialized binary payload."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(N) where N is the number of functions"_RedisString}, + })}, + {"function|help", RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"function|list", RedisMap({ + {"summary", "Returns information about all libraries."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(N) where N is the number of functions"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "library-name-pattern"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "library-name-pattern"_RedisString}, + {"token", "LIBRARYNAME"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "withcode"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withcode"_RedisString}, + {"token", "WITHCODE"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"function|stats", RedisMap({ + {"summary", "Returns information about a function during execution."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + })}, + })}, + {"georadiusbymember", + RedisMap({ + {"summary", + "Queries a geospatial index for members within a distance from a member, optionally stores the result."_RedisString}, + {"since", "3.2.0"_RedisString}, + {"group", "geo"_RedisString}, + {"complexity", + "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index."_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "6.2.0"_RedisString}, + {"replaced_by", "`GEOSEARCH` and `GEOSEARCHSTORE` with the `BYRADIUS` and `FROMMEMBER` arguments"_RedisString}, + {"history", RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added support for uppercase unit names."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + }), + RedisMap({ + {"name", "radius"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "radius"_RedisString}, + }), + RedisMap({ + {"name", "unit"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "m"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "m"_RedisString}, + {"token", "M"_RedisString}, + }), + RedisMap({ + {"name", "km"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "km"_RedisString}, + {"token", "KM"_RedisString}, + }), + RedisMap({ + {"name", "ft"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "ft"_RedisString}, + {"token", "FT"_RedisString}, + }), + RedisMap({ + {"name", "mi"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "mi"_RedisString}, + {"token", "MI"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "withcoord"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withcoord"_RedisString}, + {"token", "WITHCOORD"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "withdist"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withdist"_RedisString}, + {"token", "WITHDIST"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "withhash"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withhash"_RedisString}, + {"token", "WITHHASH"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "count-block"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + }), + RedisMap({ + {"name", "any"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "any"_RedisString}, + {"token", "ANY"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + RedisMap({ + {"name", "order"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "asc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "asc"_RedisString}, + {"token", "ASC"_RedisString}, + }), + RedisMap({ + {"name", "desc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "desc"_RedisString}, + {"token", "DESC"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "store"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "storekey"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"token", "STORE"_RedisString}, + }), + RedisMap({ + {"name", "storedistkey"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 2_RedisInt}, + {"token", "STOREDIST"_RedisString}, + }), + })}, + }), + })}, + })}, + {"zdiff", + RedisMap({ + {"summary", "Returns the difference between multiple sorted sets."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(L + (N-K)log(N)) worst case where L is the total number of elements in all the sets, N is the size of the first set, and K is the size of the result set."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "withscores"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withscores"_RedisString}, + {"token", "WITHSCORES"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"georadius_ro", + RedisMap({ + {"summary", + "Returns members from a geospatial index that are within a distance from a coordinate."_RedisString}, + {"since", "3.2.10"_RedisString}, + {"group", "geo"_RedisString}, + {"complexity", + "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index."_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "6.2.0"_RedisString}, + {"replaced_by", "`GEOSEARCH` with the `BYRADIUS` argument"_RedisString}, + {"history", RedisSet({ + RedisArray({"6.2.0"_RedisString, "Added the `ANY` option for `COUNT`."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "longitude"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "longitude"_RedisString}, + }), + RedisMap({ + {"name", "latitude"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "latitude"_RedisString}, + }), + RedisMap({ + {"name", "radius"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "radius"_RedisString}, + }), + RedisMap({ + {"name", "unit"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "m"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "m"_RedisString}, + {"token", "M"_RedisString}, + }), + RedisMap({ + {"name", "km"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "km"_RedisString}, + {"token", "KM"_RedisString}, + }), + RedisMap({ + {"name", "ft"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "ft"_RedisString}, + {"token", "FT"_RedisString}, + }), + RedisMap({ + {"name", "mi"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "mi"_RedisString}, + {"token", "MI"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "withcoord"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withcoord"_RedisString}, + {"token", "WITHCOORD"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "withdist"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withdist"_RedisString}, + {"token", "WITHDIST"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "withhash"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withhash"_RedisString}, + {"token", "WITHHASH"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "count-block"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + }), + RedisMap({ + {"name", "any"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "any"_RedisString}, + {"token", "ANY"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + RedisMap({ + {"name", "order"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "asc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "asc"_RedisString}, + {"token", "ASC"_RedisString}, + }), + RedisMap({ + {"name", "desc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "desc"_RedisString}, + {"token", "DESC"_RedisString}, + }), + })}, + }), + })}, + })}, + {"pubsub", + RedisMap( + { + {"summary", "A container for Pub/Sub commands."_RedisString}, + {"since", "2.8.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"pubsub|numsub", + RedisMap({ + {"summary", "Returns a count of subscribers to channels."_RedisString}, + {"since", "2.8.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", + "O(N) for the NUMSUB subcommand, where N is the number of requested channels"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "channel"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "channel"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"pubsub|numpat", RedisMap({ + {"summary", "Returns a count of unique pattern subscriptions."_RedisString}, + {"since", "2.8.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"pubsub|help", RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"pubsub|shardnumsub", + RedisMap({ + {"summary", "Returns the count of subscribers of shard channels."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", + "O(N) for the SHARDNUMSUB subcommand, where N is the number of requested shard channels"_RedisString}, + {"arguments", RedisArray( + { + RedisMap( + { + {"name", "shardchannel"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "shardchannel"_RedisString}, + {"flags", RedisArray( + { + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"pubsub|shardchannels", RedisMap( + { + {"summary", "Returns the active shard channels."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", + "O(N) where N is the number of active shard channels, and assuming constant time pattern matching (relatively short shard channels)."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"pubsub|channels", + RedisMap({ + {"summary", "Returns the active channels."_RedisString}, + {"since", "2.8.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", + "O(N) where N is the number of active channels, and assuming constant time pattern matching (relatively short channels and patterns)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + })}, + })}, + {"zrandmember", RedisMap({ + {"summary", "Returns one or more random members from a sorted set."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", "O(N) where N is the number of members returned"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "options"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + RedisMap({ + {"name", "withscores"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withscores"_RedisString}, + {"token", "WITHSCORES"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + })}, + })}, + {"pfcount", + RedisMap({ + {"summary", + "Returns the approximated cardinality of the set(s) observed by the HyperLogLog key(s)."_RedisString}, + {"since", "2.8.9"_RedisString}, + {"group", "hyperloglog"_RedisString}, + {"complexity", + "O(1) with a very small average constant time when called with a single key. O(N) with N being the number of keys, and much bigger constant times, when called with multiple keys."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"move", RedisMap({ + {"summary", "Moves a key to another database."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "db"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "db"_RedisString}, + }), + })}, + })}, + {"blmpop", + RedisMap({ + {"summary", + "Pops the first element from one of multiple lists. Blocks until an element is available otherwise. Deletes the list if the last element was popped."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", + "O(N+M) where N is the number of provided keys and M is the number of elements returned."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "timeout"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "timeout"_RedisString}, + }), + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "where"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "left"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "left"_RedisString}, + {"token", "LEFT"_RedisString}, + }), + RedisMap({ + {"name", "right"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "right"_RedisString}, + {"token", "RIGHT"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"publish", + RedisMap({ + {"summary", "Posts a message to a channel."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", + "O(N+M) where N is the number of clients subscribed to the receiving channel and M is the total number of subscribed patterns (by any client)."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "channel"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "channel"_RedisString}, + }), + RedisMap({ + {"name", "message"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "message"_RedisString}, + }), + })}, + })}, + {"xlen", RedisMap({ + {"summary", "Return the number of messages in a stream."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"info", + RedisMap({ + {"summary", "Returns information and statistics about the server."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", + RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added support for taking multiple section arguments."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "section"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "section"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"sismember", RedisMap({ + {"summary", "Determines whether a member belongs to a set."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + }), + })}, + })}, + {"cluster", + RedisMap({ + {"summary", "A container for Redis Cluster commands."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"cluster|links", RedisMap({ + {"summary", "Returns a list of all TCP links to and from peer nodes."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(N) where N is the total number of Cluster nodes"_RedisString}, + })}, + {"cluster|flushslots", RedisMap({ + {"summary", "Deletes all slots information from a node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"cluster|setslot", + RedisMap({ + {"summary", "Binds a hash slot to a node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "slot"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "slot"_RedisString}, + }), + RedisMap({ + {"name", "subcommand"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "importing"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "node-id"_RedisString}, + {"token", "IMPORTING"_RedisString}, + }), + RedisMap({ + {"name", "migrating"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "node-id"_RedisString}, + {"token", "MIGRATING"_RedisString}, + }), + RedisMap({ + {"name", "node"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "node-id"_RedisString}, + {"token", "NODE"_RedisString}, + }), + RedisMap({ + {"name", "stable"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "stable"_RedisString}, + {"token", "STABLE"_RedisString}, + }), + })}, + }), + })}, + })}, + {"cluster|keyslot", RedisMap({ + {"summary", "Returns the hash slot for a key."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(N) where N is the number of bytes in the key"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "key"_RedisString}, + }), + })}, + })}, + {"cluster|addslotsrange", + RedisMap({ + {"summary", "Assigns new hash slot ranges to a node."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", + "O(N) where N is the total number of the slots between the start slot and end slot arguments."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "range"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "start-slot"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "start-slot"_RedisString}, + }), + RedisMap({ + {"name", "end-slot"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "end-slot"_RedisString}, + }), + })}, + }), + })}, + })}, + {"cluster|saveconfig", + RedisMap({ + {"summary", "Forces a node to save the cluster configuration to disk."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"cluster|failover", + RedisMap({ + {"summary", "Forces a replica to perform a manual failover of its master."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "options"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "force"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "force"_RedisString}, + {"token", "FORCE"_RedisString}, + }), + RedisMap({ + {"name", "takeover"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "takeover"_RedisString}, + {"token", "TAKEOVER"_RedisString}, + }), + })}, + }), + })}, + })}, + {"cluster|replicate", RedisMap({ + {"summary", "Configure a node as replica of a master node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "node-id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "node-id"_RedisString}, + }), + })}, + })}, + {"cluster|shards", RedisMap({ + {"summary", "Returns the mapping of cluster slots to shards."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(N) where N is the total number of cluster nodes"_RedisString}, + })}, + {"cluster|meet", + RedisMap({ + {"summary", "Forces a node to handshake with another node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", RedisSet({ + RedisArray({"4.0.0"_RedisString, + "Added the optional `cluster_bus_port` argument."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "ip"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "ip"_RedisString}, + }), + RedisMap({ + {"name", "port"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "port"_RedisString}, + }), + RedisMap({ + {"name", "cluster-bus-port"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "cluster-bus-port"_RedisString}, + {"since", "4.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"cluster|nodes", RedisMap({ + {"summary", "Returns the cluster configuration for a node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(N) where N is the total number of Cluster nodes"_RedisString}, + })}, + {"cluster|countkeysinslot", RedisMap({ + {"summary", "Returns the number of keys in a hash slot."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "slot"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "slot"_RedisString}, + }), + })}, + })}, + {"cluster|myshardid", RedisMap({ + {"summary", "Returns the shard ID of a node."_RedisString}, + {"since", "7.2.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"cluster|slaves", RedisMap({ + {"summary", "Lists the replica nodes of a master node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "5.0.0"_RedisString}, + {"replaced_by", "`CLUSTER REPLICAS`"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "node-id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "node-id"_RedisString}, + }), + })}, + })}, + {"cluster|delslots", + RedisMap({ + {"summary", "Sets hash slots as unbound for a node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(N) where N is the total number of hash slot arguments"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "slot"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "slot"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"cluster|myid", RedisMap({ + {"summary", "Returns the ID of a node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"cluster|replicas", RedisMap({ + {"summary", "Lists the replica nodes of a master node."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "node-id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "node-id"_RedisString}, + }), + })}, + })}, + {"cluster|slots", + RedisMap({ + {"summary", "Returns the mapping of cluster slots to nodes."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(N) where N is the total number of Cluster nodes"_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "7.0.0"_RedisString}, + {"replaced_by", "`CLUSTER SHARDS`"_RedisString}, + {"history", + RedisSet({ + RedisArray({"4.0.0"_RedisString, "Added node IDs."_RedisString}), + RedisArray({"7.0.0"_RedisString, "Added additional networking metadata field."_RedisString}), + })}, + })}, + {"cluster|info", RedisMap({ + {"summary", "Returns information about the state of a node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"cluster|forget", RedisMap({ + {"summary", "Removes a node from the nodes table."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "node-id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "node-id"_RedisString}, + }), + })}, + })}, + {"cluster|help", RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"cluster|count-failure-reports", + RedisMap({ + {"summary", "Returns the number of active failure reports active for a node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(N) where N is the number of failure reports"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "node-id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "node-id"_RedisString}, + }), + })}, + })}, + {"cluster|addslots", + RedisMap({ + {"summary", "Assigns new hash slots to a node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(N) where N is the total number of hash slot arguments"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "slot"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "slot"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"cluster|getkeysinslot", RedisMap({ + {"summary", "Returns the key names in a hash slot."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(N) where N is the number of requested keys"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "slot"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "slot"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + })}, + })}, + {"cluster|delslotsrange", + RedisMap({ + {"summary", "Sets hash slot ranges as unbound for a node."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", + "O(N) where N is the total number of the slots between the start slot and end slot arguments."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "range"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "start-slot"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "start-slot"_RedisString}, + }), + RedisMap({ + {"name", "end-slot"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "end-slot"_RedisString}, + }), + })}, + }), + })}, + })}, + {"cluster|set-config-epoch", RedisMap({ + {"summary", "Sets the configuration epoch for a new node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "config-epoch"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "config-epoch"_RedisString}, + }), + })}, + })}, + {"cluster|reset", + RedisMap({ + {"summary", "Resets a node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", + "O(N) where N is the number of known nodes. The command may execute a FLUSHALL as a side effect."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "reset-type"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "hard"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "hard"_RedisString}, + {"token", "HARD"_RedisString}, + }), + RedisMap({ + {"name", "soft"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "soft"_RedisString}, + {"token", "SOFT"_RedisString}, + }), + })}, + }), + })}, + })}, + {"cluster|bumpepoch", RedisMap({ + {"summary", "Advances the cluster config epoch."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + })}, + })}, + {"pttl", RedisMap({ + {"summary", "Returns the expiration time in milliseconds of a key."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", RedisSet({ + RedisArray({"2.8.0"_RedisString, "Added the -2 reply."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"zcount", + RedisMap({ + {"summary", "Returns the count of members in a sorted set that have scores within a range."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", "O(log(N)) with N being the number of elements in the sorted set."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "min"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "min"_RedisString}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "max"_RedisString}, + }), + })}, + })}, + {"replconf", RedisMap({ + {"summary", "An internal command for configuring the replication stream."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"doc_flags", RedisSet({ + "syscmd"_RedisStatus, + })}, + })}, + {"zintercard", + RedisMap({ + {"summary", "Returns the number of members of the intersect of multiple sorted sets."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(N*K) worst case with N being the smallest input sorted set, K being the number of input sorted sets."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "limit"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "limit"_RedisString}, + {"token", "LIMIT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"zremrangebylex", + RedisMap({ + {"summary", + "Removes members in a sorted set within a lexicographical range. Deletes the sorted set if all members were removed."_RedisString}, + {"since", "2.8.9"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements removed by the operation."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "min"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "min"_RedisString}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "max"_RedisString}, + }), + })}, + })}, + {"pfdebug", RedisMap({ + {"summary", "Internal commands for debugging HyperLogLog values."_RedisString}, + {"since", "2.8.9"_RedisString}, + {"group", "hyperloglog"_RedisString}, + {"complexity", "N/A"_RedisString}, + {"doc_flags", RedisSet({ + "syscmd"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "subcommand"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "subcommand"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"hgetall", RedisMap({ + {"summary", "Returns all fields and values in a hash."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(N) where N is the size of the hash."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"dump", + RedisMap({ + {"summary", "Returns a serialized representation of the value stored at a key."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", + "O(1) to access the key and additional O(N*M) to serialize it, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1)."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"geohash", + RedisMap({ + {"summary", "Returns members from a geospatial index as geohash strings."_RedisString}, + {"since", "3.2.0"_RedisString}, + {"group", "geo"_RedisString}, + {"complexity", + "O(log(N)) for each member requested, where N is the number of elements in the sorted set."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"pfadd", RedisMap({ + {"summary", "Adds elements to a HyperLogLog key. Creates the key if it doesn't exist."_RedisString}, + {"since", "2.8.9"_RedisString}, + {"group", "hyperloglog"_RedisString}, + {"complexity", "O(1) to add every element."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "element"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "element"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"scan", + RedisMap({ + {"summary", "Iterates over the key names in the database."_RedisString}, + {"since", "2.8.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", + "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection."_RedisString}, + {"history", RedisSet({ + RedisArray({"6.0.0"_RedisString, "Added the `TYPE` subcommand."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "cursor"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "cursor"_RedisString}, + }), + RedisMap({ + {"name", "pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + {"token", "MATCH"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "type"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "type"_RedisString}, + {"token", "TYPE"_RedisString}, + {"since", "6.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"client", + RedisMap({ + {"summary", "A container for client connection commands."_RedisString}, + {"since", "2.4.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"client|caching", + RedisMap({ + {"summary", "Instructs the server whether to track the keys in the next request."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "mode"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "yes"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "yes"_RedisString}, + {"token", "YES"_RedisString}, + }), + RedisMap({ + {"name", "no"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "no"_RedisString}, + {"token", "NO"_RedisString}, + }), + })}, + }), + })}, + })}, + {"client|trackinginfo", + RedisMap({ + {"summary", + "Returns information about server-assisted client-side caching for the connection."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"client|getredir", + RedisMap({ + {"summary", + "Returns the client ID to which the connection's tracking notifications are redirected."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"client|info", RedisMap({ + {"summary", "Returns information about the connection."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"client|pause", + RedisMap({ + {"summary", "Suspends commands processing."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", + RedisSet({ + RedisArray({"6.2.0"_RedisString, + "`CLIENT PAUSE WRITE` mode added along with the `mode` option."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "timeout"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "timeout"_RedisString}, + }), + RedisMap({ + {"name", "mode"_RedisString}, + {"type", "oneof"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "write"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "write"_RedisString}, + {"token", "WRITE"_RedisString}, + }), + RedisMap({ + {"name", "all"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "all"_RedisString}, + {"token", "ALL"_RedisString}, + }), + })}, + }), + })}, + })}, + {"client|help", RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"client|no-evict", RedisMap({ + {"summary", "Sets the client eviction mode of the connection."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "enabled"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "on"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "on"_RedisString}, + {"token", "ON"_RedisString}, + }), + RedisMap({ + {"name", "off"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "off"_RedisString}, + {"token", "OFF"_RedisString}, + }), + })}, + }), + })}, + })}, + {"client|no-touch", + RedisMap({ + {"summary", + "Controls whether commands sent by the client affect the LRU/LFU of accessed keys."_RedisString}, + {"since", "7.2.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "enabled"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "on"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "on"_RedisString}, + {"token", "ON"_RedisString}, + }), + RedisMap({ + {"name", "off"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "off"_RedisString}, + {"token", "OFF"_RedisString}, + }), + })}, + }), + })}, + })}, + {"client|kill", + RedisMap({ + {"summary", "Terminates open connections."_RedisString}, + {"since", "2.4.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(N) where N is the number of client connections"_RedisString}, + {"history", + RedisSet({ + RedisArray({"2.8.12"_RedisString, "Added new filter format."_RedisString}), + RedisArray({"2.8.12"_RedisString, "`ID` option."_RedisString}), + RedisArray({"3.2.0"_RedisString, "Added `master` type in for `TYPE` option."_RedisString}), + RedisArray( + {"5.0.0"_RedisString, + "Replaced `slave` `TYPE` with `replica`. `slave` still supported for backward compatibility."_RedisString}), + RedisArray({"6.2.0"_RedisString, "`LADDR` option."_RedisString}), + })}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "filter"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "old-format"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "ip:port"_RedisString}, + {"deprecated_since", "2.8.12"_RedisString}, + }), + RedisMap({ + {"name", "new-format"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "client-id"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "client-id"_RedisString}, + {"token", "ID"_RedisString}, + {"since", "2.8.12"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "client-type"_RedisString}, + {"type", "oneof"_RedisString}, + {"token", "TYPE"_RedisString}, + {"since", "2.8.12"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "normal"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "normal"_RedisString}, + {"token", "NORMAL"_RedisString}, + }), + RedisMap({ + {"name", "master"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "master"_RedisString}, + {"token", "MASTER"_RedisString}, + {"since", "3.2.0"_RedisString}, + }), + RedisMap({ + {"name", "slave"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "slave"_RedisString}, + {"token", "SLAVE"_RedisString}, + }), + RedisMap({ + {"name", "replica"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "replica"_RedisString}, + {"token", "REPLICA"_RedisString}, + {"since", "5.0.0"_RedisString}, + }), + RedisMap({ + {"name", "pubsub"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "pubsub"_RedisString}, + {"token", "PUBSUB"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "username"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "username"_RedisString}, + {"token", "USER"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "addr"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "ip:port"_RedisString}, + {"token", "ADDR"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "laddr"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "ip:port"_RedisString}, + {"token", "LADDR"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "skipme"_RedisString}, + {"type", "oneof"_RedisString}, + {"token", "SKIPME"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "yes"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "yes"_RedisString}, + {"token", "YES"_RedisString}, + }), + RedisMap({ + {"name", "no"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "no"_RedisString}, + {"token", "NO"_RedisString}, + }), + })}, + }), + })}, + }), + })}, + }), + })}, + })}, + {"client|setinfo", + RedisMap({ + {"summary", "Sets information specific to the client or connection."_RedisString}, + {"since", "7.2.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "attr"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "libname"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "libname"_RedisString}, + {"token", "LIB-NAME"_RedisString}, + }), + RedisMap({ + {"name", "libver"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "libver"_RedisString}, + {"token", "LIB-VER"_RedisString}, + }), + })}, + }), + })}, + })}, + {"client|id", RedisMap({ + {"summary", "Returns the unique client ID of the connection."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"client|getname", RedisMap({ + {"summary", "Returns the name of the connection."_RedisString}, + {"since", "2.6.9"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"client|tracking", + RedisMap({ + {"summary", "Controls server-assisted client-side caching for the connection."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1). Some options may introduce additional complexity."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "status"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "on"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "on"_RedisString}, + {"token", "ON"_RedisString}, + }), + RedisMap({ + {"name", "off"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "off"_RedisString}, + {"token", "OFF"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "client-id"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "client-id"_RedisString}, + {"token", "REDIRECT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "prefix"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "prefix"_RedisString}, + {"token", "PREFIX"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + "multiple_token"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "bcast"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "bcast"_RedisString}, + {"token", "BCAST"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "optin"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "optin"_RedisString}, + {"token", "OPTIN"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "optout"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "optout"_RedisString}, + {"token", "OPTOUT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "noloop"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "noloop"_RedisString}, + {"token", "NOLOOP"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"client|setname", RedisMap({ + {"summary", "Sets the connection name."_RedisString}, + {"since", "2.6.9"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "connection-name"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "connection-name"_RedisString}, + }), + })}, + })}, + {"client|list", + RedisMap({ + {"summary", "Lists open connections."_RedisString}, + {"since", "2.4.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(N) where N is the number of client connections"_RedisString}, + {"history", + RedisSet({ + RedisArray({"2.8.12"_RedisString, "Added unique client `id` field."_RedisString}), + RedisArray({"5.0.0"_RedisString, "Added optional `TYPE` filter."_RedisString}), + RedisArray({"6.0.0"_RedisString, "Added `user` field."_RedisString}), + RedisArray( + {"6.2.0"_RedisString, + "Added `argv-mem`, `tot-mem`, `laddr` and `redir` fields and the optional `ID` filter."_RedisString}), + RedisArray( + {"7.0.0"_RedisString, "Added `resp`, `multi-mem`, `rbs` and `rbp` fields."_RedisString}), + RedisArray({"7.0.3"_RedisString, "Added `ssub` field."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "client-type"_RedisString}, + {"type", "oneof"_RedisString}, + {"token", "TYPE"_RedisString}, + {"since", "5.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "normal"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "normal"_RedisString}, + {"token", "NORMAL"_RedisString}, + }), + RedisMap({ + {"name", "master"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "master"_RedisString}, + {"token", "MASTER"_RedisString}, + }), + RedisMap({ + {"name", "replica"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "replica"_RedisString}, + {"token", "REPLICA"_RedisString}, + }), + RedisMap({ + {"name", "pubsub"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "pubsub"_RedisString}, + {"token", "PUBSUB"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "client-id"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "client-id"_RedisString}, + {"token", "ID"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"client|reply", RedisMap({ + {"summary", "Instructs the server whether to reply to commands."_RedisString}, + {"since", "3.2.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "action"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "on"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "on"_RedisString}, + {"token", "ON"_RedisString}, + }), + RedisMap({ + {"name", "off"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "off"_RedisString}, + {"token", "OFF"_RedisString}, + }), + RedisMap({ + {"name", "skip"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "skip"_RedisString}, + {"token", "SKIP"_RedisString}, + }), + })}, + }), + })}, + })}, + {"client|unblock", + RedisMap({ + {"summary", + "Unblocks a client blocked by a blocking command from a different connection."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(log N) where N is the number of client connections"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "client-id"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "client-id"_RedisString}, + }), + RedisMap({ + {"name", "unblock-type"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "timeout"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "timeout"_RedisString}, + {"token", "TIMEOUT"_RedisString}, + }), + RedisMap({ + {"name", "error"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "error"_RedisString}, + {"token", "ERROR"_RedisString}, + }), + })}, + }), + })}, + })}, + {"client|unpause", RedisMap({ + {"summary", "Resumes processing commands from paused clients."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(N) Where N is the number of paused clients"_RedisString}, + })}, + })}, + })}, + {"shutdown", + RedisMap({ + {"summary", "Synchronously saves the database(s) to disk and shuts down the Redis server."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", + "O(N) when saving, where N is the total number of keys in all databases when saving data, otherwise O(1)"_RedisString}, + {"history", + RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added the `NOW`, `FORCE` and `ABORT` modifiers."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "save-selector"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "nosave"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "nosave"_RedisString}, + {"token", "NOSAVE"_RedisString}, + }), + RedisMap({ + {"name", "save"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "save"_RedisString}, + {"token", "SAVE"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "now"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "now"_RedisString}, + {"token", "NOW"_RedisString}, + {"since", "7.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "force"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "force"_RedisString}, + {"token", "FORCE"_RedisString}, + {"since", "7.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "abort"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "abort"_RedisString}, + {"token", "ABORT"_RedisString}, + {"since", "7.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"lmpop", + RedisMap({ + {"summary", + "Returns multiple elements from a list after removing them. Deletes the list if the last element was popped."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", + "O(N+M) where N is the number of provided keys and M is the number of elements returned."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "where"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "left"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "left"_RedisString}, + {"token", "LEFT"_RedisString}, + }), + RedisMap({ + {"name", "right"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "right"_RedisString}, + {"token", "RIGHT"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"watch", RedisMap({ + {"summary", "Monitors changes to keys to determine the execution of a transaction."_RedisString}, + {"since", "2.2.0"_RedisString}, + {"group", "transactions"_RedisString}, + {"complexity", "O(1) for every key."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"hkeys", RedisMap({ + {"summary", "Returns all fields in a hash."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(N) where N is the size of the hash."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"zpopmin", + RedisMap({ + {"summary", + "Returns the lowest-scoring members from a sorted set after removing them. Deletes the sorted set if the last member was popped."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(log(N)*M) with N being the number of elements in the sorted set, and M being the number of elements popped."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"ltrim", + RedisMap({ + {"summary", + "Removes elements from both ends a list. Deletes the list if all elements were trimmed."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", "O(N) where N is the number of elements to be removed by the operation."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "start"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "start"_RedisString}, + }), + RedisMap({ + {"name", "stop"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "stop"_RedisString}, + }), + })}, + })}, + {"evalsha_ro", RedisMap({ + {"summary", "Executes a read-only server-side Lua script by SHA1 digest."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "Depends on the script that is executed."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "sha1"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "sha1"_RedisString}, + }), + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "arg"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "arg"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"fcall", RedisMap({ + {"summary", "Invokes a function."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "Depends on the function that is executed."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "function"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "function"_RedisString}, + }), + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "arg"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "arg"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"sort", + RedisMap({ + {"summary", + "Sorts the elements in a list, a set, or a sorted set, optionally storing the result."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", + "O(N+M*log(M)) where N is the number of elements in the list or set to sort, and M the number of returned elements. When the elements are not sorted, complexity is O(N)."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "by-pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"token", "BY"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "limit"_RedisString}, + {"type", "block"_RedisString}, + {"token", "LIMIT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "get-pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"token", "GET"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + "multiple_token"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "order"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "asc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "asc"_RedisString}, + {"token", "ASC"_RedisString}, + }), + RedisMap({ + {"name", "desc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "desc"_RedisString}, + {"token", "DESC"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "sorting"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "sorting"_RedisString}, + {"token", "ALPHA"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 2_RedisInt}, + {"token", "STORE"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"pfmerge", RedisMap({ + {"summary", "Merges one or more HyperLogLog values into a single key."_RedisString}, + {"since", "2.8.9"_RedisString}, + {"group", "hyperloglog"_RedisString}, + {"complexity", "O(N) to merge N HyperLogLogs, but with high constant times."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "destkey"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destkey"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "sourcekey"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "sourcekey"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"georadius", + RedisMap({ + {"summary", + "Queries a geospatial index for members within a distance from a coordinate, optionally stores the result."_RedisString}, + {"since", "3.2.0"_RedisString}, + {"group", "geo"_RedisString}, + {"complexity", + "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index."_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "6.2.0"_RedisString}, + {"replaced_by", "`GEOSEARCH` and `GEOSEARCHSTORE` with the `BYRADIUS` argument"_RedisString}, + {"history", RedisSet({ + RedisArray({"6.2.0"_RedisString, "Added the `ANY` option for `COUNT`."_RedisString}), + RedisArray({"7.0.0"_RedisString, "Added support for uppercase unit names."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "longitude"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "longitude"_RedisString}, + }), + RedisMap({ + {"name", "latitude"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "latitude"_RedisString}, + }), + RedisMap({ + {"name", "radius"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "radius"_RedisString}, + }), + RedisMap({ + {"name", "unit"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "m"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "m"_RedisString}, + {"token", "M"_RedisString}, + }), + RedisMap({ + {"name", "km"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "km"_RedisString}, + {"token", "KM"_RedisString}, + }), + RedisMap({ + {"name", "ft"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "ft"_RedisString}, + {"token", "FT"_RedisString}, + }), + RedisMap({ + {"name", "mi"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "mi"_RedisString}, + {"token", "MI"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "withcoord"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withcoord"_RedisString}, + {"token", "WITHCOORD"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "withdist"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withdist"_RedisString}, + {"token", "WITHDIST"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "withhash"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withhash"_RedisString}, + {"token", "WITHHASH"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "count-block"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + }), + RedisMap({ + {"name", "any"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "any"_RedisString}, + {"token", "ANY"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + RedisMap({ + {"name", "order"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "asc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "asc"_RedisString}, + {"token", "ASC"_RedisString}, + }), + RedisMap({ + {"name", "desc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "desc"_RedisString}, + {"token", "DESC"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "store"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "storekey"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"token", "STORE"_RedisString}, + }), + RedisMap({ + {"name", "storedistkey"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 2_RedisInt}, + {"token", "STOREDIST"_RedisString}, + }), + })}, + }), + })}, + })}, + {"zrevrangebyscore", + RedisMap({ + {"summary", "Returns members in a sorted set within a range of scores in reverse order."_RedisString}, + {"since", "2.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with LIMIT), you can consider it O(log(N))."_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "6.2.0"_RedisString}, + {"replaced_by", "`ZRANGE` with the `REV` and `BYSCORE` arguments"_RedisString}, + {"history", RedisSet({ + RedisArray({"2.1.6"_RedisString, "`min` and `max` can be exclusive."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "max"_RedisString}, + }), + RedisMap({ + {"name", "min"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "min"_RedisString}, + }), + RedisMap({ + {"name", "withscores"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withscores"_RedisString}, + {"token", "WITHSCORES"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "limit"_RedisString}, + {"type", "block"_RedisString}, + {"token", "LIMIT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + })}, + }), + })}, + })}, + {"lset", + RedisMap({ + {"summary", "Sets the value of an element in a list by its index."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", + "O(N) where N is the length of the list. Setting either the first or the last element of the list is O(1)."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "index"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "index"_RedisString}, + }), + RedisMap({ + {"name", "element"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "element"_RedisString}, + }), + })}, + })}, + {"xrevrange", + RedisMap({ + {"summary", "Returns the messages from a stream within a range of IDs in reverse order."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", + "O(N) with N being the number of elements returned. If N is constant (e.g. always asking for the first 10 elements with COUNT), you can consider it O(1)."_RedisString}, + {"history", RedisSet({ + RedisArray({"6.2.0"_RedisString, "Added exclusive ranges."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "end"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "end"_RedisString}, + }), + RedisMap({ + {"name", "start"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "start"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"linsert", + RedisMap({ + {"summary", "Inserts an element before or after another element in a list."_RedisString}, + {"since", "2.2.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", + "O(N) where N is the number of elements to traverse before seeing the value pivot. This means that inserting somewhere on the left end on the list (head) can be considered O(1) and inserting somewhere on the right end (tail) is O(N)."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "where"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "before"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "before"_RedisString}, + {"token", "BEFORE"_RedisString}, + }), + RedisMap({ + {"name", "after"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "after"_RedisString}, + {"token", "AFTER"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "pivot"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "pivot"_RedisString}, + }), + RedisMap({ + {"name", "element"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "element"_RedisString}, + }), + })}, + })}, + {"incr", + RedisMap({ + {"summary", + "Increments the integer value of a key by one. Uses 0 as initial value if the key doesn't exist."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"hrandfield", RedisMap({ + {"summary", "Returns one or more random fields from a hash."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(N) where N is the number of fields returned"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "options"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + RedisMap({ + {"name", "withvalues"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withvalues"_RedisString}, + {"token", "WITHVALUES"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + })}, + })}, + {"rpushx", + RedisMap({ + {"summary", "Appends an element to a list only when the list exists."_RedisString}, + {"since", "2.2.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", + "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments."_RedisString}, + {"history", RedisSet({ + RedisArray({"4.0.0"_RedisString, "Accepts multiple `element` arguments."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "element"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "element"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"lrem", + RedisMap({ + {"summary", "Removes elements from a list. Deletes the list if the last element was removed."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", + "O(N+M) where N is the length of the list and M is the number of elements removed."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + RedisMap({ + {"name", "element"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "element"_RedisString}, + }), + })}, + })}, + {"hello", + RedisMap({ + {"summary", "Handshakes with the Redis server."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", + RedisSet({ + RedisArray( + {"6.2.0"_RedisString, + "`protover` made optional; when called without arguments the command reports the current connection's context."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "arguments"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "protover"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "protover"_RedisString}, + }), + RedisMap({ + {"name", "auth"_RedisString}, + {"type", "block"_RedisString}, + {"token", "AUTH"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "username"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "username"_RedisString}, + }), + RedisMap({ + {"name", "password"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "password"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "clientname"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "clientname"_RedisString}, + {"token", "SETNAME"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + })}, + })}, + {"config", + RedisMap({ + {"summary", "A container for server configuration commands."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"config|resetstat", RedisMap({ + {"summary", "Resets the server's statistics."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"config|get", + RedisMap({ + {"summary", "Returns the effective values of configuration parameters."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) when N is the number of configuration parameters provided"_RedisString}, + {"history", + RedisSet({ + RedisArray({"7.0.0"_RedisString, + "Added the ability to pass multiple pattern parameters in one call"_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "parameter"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "parameter"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"config|rewrite", RedisMap({ + {"summary", "Persists the effective configuration to file."_RedisString}, + {"since", "2.8.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"config|help", RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"config|set", + RedisMap({ + {"summary", "Sets configuration parameters in-flight."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) when N is the number of configuration parameters provided"_RedisString}, + {"history", + RedisSet({ + RedisArray({"7.0.0"_RedisString, + "Added the ability to set multiple parameters in one call."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "data"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "parameter"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "parameter"_RedisString}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + }), + })}, + })}, + })}, + })}, + {"zincrby", RedisMap({ + {"summary", "Increments the score of a member in a sorted set."_RedisString}, + {"since", "1.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", "O(log(N)) where N is the number of elements in the sorted set."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "increment"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "increment"_RedisString}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + }), + })}, + })}, + {"bitfield_ro", RedisMap({ + {"summary", "Performs arbitrary read-only bitfield integer operations on strings."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "bitmap"_RedisString}, + {"complexity", "O(1) for each subcommand specified"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "get-block"_RedisString}, + {"type", "block"_RedisString}, + {"token", "GET"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + "multiple_token"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "encoding"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "encoding"_RedisString}, + }), + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + })}, + }), + })}, + })}, + {"expire", + RedisMap({ + {"summary", "Sets the expiration time of a key in seconds."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added options: `NX`, `XX`, `GT` and `LT`."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "seconds"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "seconds"_RedisString}, + }), + RedisMap({ + {"name", "condition"_RedisString}, + {"type", "oneof"_RedisString}, + {"since", "7.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "nx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "nx"_RedisString}, + {"token", "NX"_RedisString}, + }), + RedisMap({ + {"name", "xx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "xx"_RedisString}, + {"token", "XX"_RedisString}, + }), + RedisMap({ + {"name", "gt"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "gt"_RedisString}, + {"token", "GT"_RedisString}, + }), + RedisMap({ + {"name", "lt"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "lt"_RedisString}, + {"token", "LT"_RedisString}, + }), + })}, + }), + })}, + })}, + {"hincrbyfloat", + RedisMap({ + {"summary", + "Increments the floating point value of a field by a number. Uses 0 as initial value if the field doesn't exist."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "field"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "field"_RedisString}, + }), + RedisMap({ + {"name", "increment"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "increment"_RedisString}, + }), + })}, + })}, + {"srandmember", + RedisMap({ + {"summary", "Get one or multiple random members from a set"_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", + "Without the count argument O(1), otherwise O(N) where N is the absolute value of the passed count."_RedisString}, + {"history", RedisSet({ + RedisArray({"2.6.0"_RedisString, "Added the optional `count` argument."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"since", "2.6.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"multi", RedisMap({ + {"summary", "Starts a transaction."_RedisString}, + {"since", "1.2.0"_RedisString}, + {"group", "transactions"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"evalsha", RedisMap({ + {"summary", "Executes a server-side Lua script by SHA1 digest."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "Depends on the script that is executed."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "sha1"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "sha1"_RedisString}, + }), + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "arg"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "arg"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"sscan", + RedisMap({ + {"summary", "Iterates over members of a set."_RedisString}, + {"since", "2.8.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", + "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "cursor"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "cursor"_RedisString}, + }), + RedisMap({ + {"name", "pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + {"token", "MATCH"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"exec", RedisMap({ + {"summary", "Executes all commands in a transaction."_RedisString}, + {"since", "1.2.0"_RedisString}, + {"group", "transactions"_RedisString}, + {"complexity", "Depends on commands in the transaction"_RedisString}, + })}, + {"geoadd", + RedisMap({ + {"summary", + "Adds one or more members to a geospatial index. The key is created if it doesn't exist."_RedisString}, + {"since", "3.2.0"_RedisString}, + {"group", "geo"_RedisString}, + {"complexity", + "O(log(N)) for each item added, where N is the number of elements in the sorted set."_RedisString}, + {"history", RedisSet({ + RedisArray({"6.2.0"_RedisString, "Added the `CH`, `NX` and `XX` options."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "condition"_RedisString}, + {"type", "oneof"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "nx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "nx"_RedisString}, + {"token", "NX"_RedisString}, + }), + RedisMap({ + {"name", "xx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "xx"_RedisString}, + {"token", "XX"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "change"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "change"_RedisString}, + {"token", "CH"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "data"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "longitude"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "longitude"_RedisString}, + }), + RedisMap({ + {"name", "latitude"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "latitude"_RedisString}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + }), + })}, + }), + })}, + })}, + {"waitaof", + RedisMap({ + {"summary", + "Blocks until all of the preceding write commands sent by the connection are written to the append-only file of the master and/or replicas."_RedisString}, + {"since", "7.2.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "numlocal"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numlocal"_RedisString}, + }), + RedisMap({ + {"name", "numreplicas"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numreplicas"_RedisString}, + }), + RedisMap({ + {"name", "timeout"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "timeout"_RedisString}, + }), + })}, + })}, + {"brpoplpush", + RedisMap({ + {"summary", + "Pops an element from a list, pushes it to another list and returns it. Block until an element is available otherwise. Deletes the list if the last element was popped."_RedisString}, + {"since", "2.2.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "6.2.0"_RedisString}, + {"replaced_by", "`BLMOVE` with the `RIGHT` and `LEFT` arguments"_RedisString}, + {"history", RedisSet({ + RedisArray({"6.0.0"_RedisString, + "`timeout` is interpreted as a double instead of an integer."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "source"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "source"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 1_RedisInt}, + }), + RedisMap({ + {"name", "timeout"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "timeout"_RedisString}, + }), + })}, + })}, + {"xinfo", + RedisMap( + { + {"summary", "A container for stream introspection commands."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"xinfo|groups", + RedisMap({ + {"summary", "Returns a list of the consumer groups of a stream."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", + RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added the `entries-read` and `lag` fields"_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"xinfo|consumers", + RedisMap({ + {"summary", "Returns a list of the consumers in a consumer group."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", RedisSet({ + RedisArray({"7.2.0"_RedisString, "Added the `inactive` field."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "group"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "group"_RedisString}, + }), + })}, + })}, + {"xinfo|stream", + RedisMap( + { + {"summary", "Returns information about a stream."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", RedisSet({ + RedisArray({"6.0.0"_RedisString, "Added the `FULL` modifier."_RedisString}), + RedisArray( + {"7.0.0"_RedisString, + "Added the `max-deleted-entry-id`, `entries-added`, `recorded-first-entry-id`, `entries-read` and `lag` fields"_RedisString}), + RedisArray( + {"7.2.0"_RedisString, + "Added the `active-time` field, and changed the meaning of `seen-time`."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "full-block"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "full"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "full"_RedisString}, + {"token", "FULL"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + })}, + })}, + {"xinfo|help", RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + })}, + })}, + {"getdel", RedisMap({ + {"summary", "Returns the string value of a key after deleting the key."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"restore", + RedisMap({ + {"summary", "Creates a key from the serialized representation of a value."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", + "O(1) to create the new key and additional O(N*M) to reconstruct the serialized value, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1). However for sorted set values the complexity is O(N*M*log(N)) because inserting values into sorted sets is O(log(N))."_RedisString}, + {"history", RedisSet({ + RedisArray({"3.0.0"_RedisString, "Added the `REPLACE` modifier."_RedisString}), + RedisArray({"5.0.0"_RedisString, "Added the `ABSTTL` modifier."_RedisString}), + RedisArray({"5.0.0"_RedisString, "Added the `IDLETIME` and `FREQ` options."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "ttl"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "ttl"_RedisString}, + }), + RedisMap({ + {"name", "serialized-value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "serialized-value"_RedisString}, + }), + RedisMap({ + {"name", "replace"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "replace"_RedisString}, + {"token", "REPLACE"_RedisString}, + {"since", "3.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "absttl"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "absttl"_RedisString}, + {"token", "ABSTTL"_RedisString}, + {"since", "5.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "seconds"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "seconds"_RedisString}, + {"token", "IDLETIME"_RedisString}, + {"since", "5.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "frequency"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "frequency"_RedisString}, + {"token", "FREQ"_RedisString}, + {"since", "5.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"xack", + RedisMap({ + {"summary", + "Returns the number of messages that were successfully acknowledged by the consumer group member of a stream."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(1) for each message ID processed."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "group"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "group"_RedisString}, + }), + RedisMap({ + {"name", "id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "id"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"bzpopmax", + RedisMap({ + {"summary", + "Removes and returns the member with the highest score from one or more sorted sets. Blocks until a member available otherwise. Deletes the sorted set if the last element was popped."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", "O(log(N)) with N being the number of elements in the sorted set."_RedisString}, + {"history", RedisSet({ + RedisArray({"6.0.0"_RedisString, + "`timeout` is interpreted as a double instead of an integer."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "timeout"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "timeout"_RedisString}, + }), + })}, + })}, + {"hsetnx", RedisMap({ + {"summary", "Sets the value of a field in a hash only when the field doesn't exist."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "field"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "field"_RedisString}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + })}, + {"zcard", RedisMap({ + {"summary", "Returns the number of members in a sorted set."_RedisString}, + {"since", "1.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"getex", RedisMap({ + {"summary", "Returns the string value of a key after setting its expiration time."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "expiration"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "seconds"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "seconds"_RedisString}, + {"token", "EX"_RedisString}, + }), + RedisMap({ + {"name", "milliseconds"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "milliseconds"_RedisString}, + {"token", "PX"_RedisString}, + }), + RedisMap({ + {"name", "unix-time-seconds"_RedisString}, + {"type", "unix-time"_RedisString}, + {"display_text", "unix-time-seconds"_RedisString}, + {"token", "EXAT"_RedisString}, + }), + RedisMap({ + {"name", "unix-time-milliseconds"_RedisString}, + {"type", "unix-time"_RedisString}, + {"display_text", "unix-time-milliseconds"_RedisString}, + {"token", "PXAT"_RedisString}, + }), + RedisMap({ + {"name", "persist"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "persist"_RedisString}, + {"token", "PERSIST"_RedisString}, + }), + })}, + }), + })}, + })}, + {"dbsize", RedisMap({ + {"summary", "Returns the number of keys in the database."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"sintercard", + RedisMap({ + {"summary", "Returns the number of members of the intersect of multiple sets."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", + "O(N*M) worst case where N is the cardinality of the smallest set and M is the number of sets."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "limit"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "limit"_RedisString}, + {"token", "LIMIT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"geodist", RedisMap({ + {"summary", "Returns the distance between two members of a geospatial index."_RedisString}, + {"since", "3.2.0"_RedisString}, + {"group", "geo"_RedisString}, + {"complexity", "O(log(N))"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member1"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member1"_RedisString}, + }), + RedisMap({ + {"name", "member2"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member2"_RedisString}, + }), + RedisMap({ + {"name", "unit"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "m"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "m"_RedisString}, + {"token", "M"_RedisString}, + }), + RedisMap({ + {"name", "km"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "km"_RedisString}, + {"token", "KM"_RedisString}, + }), + RedisMap({ + {"name", "ft"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "ft"_RedisString}, + {"token", "FT"_RedisString}, + }), + RedisMap({ + {"name", "mi"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "mi"_RedisString}, + {"token", "MI"_RedisString}, + }), + })}, + }), + })}, + })}, + {"renamenx", + RedisMap({ + {"summary", "Renames a key only when the target key name doesn't exist."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", + RedisSet({ + RedisArray( + {"3.2.0"_RedisString, + "The command no longer returns an error when source and destination names are the same."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "newkey"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "newkey"_RedisString}, + {"key_spec_index", 1_RedisInt}, + }), + })}, + })}, + {"flushdb", + RedisMap({ + {"summary", "Remove all keys from the current database."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) where N is the number of keys in the selected database"_RedisString}, + {"history", RedisSet({ + RedisArray({"4.0.0"_RedisString, "Added the `ASYNC` flushing mode modifier."_RedisString}), + RedisArray({"6.2.0"_RedisString, "Added the `SYNC` flushing mode modifier."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "flush-type"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "async"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "async"_RedisString}, + {"token", "ASYNC"_RedisString}, + {"since", "4.0.0"_RedisString}, + }), + RedisMap({ + {"name", "sync"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "sync"_RedisString}, + {"token", "SYNC"_RedisString}, + {"since", "6.2.0"_RedisString}, + }), + })}, + }), + })}, + })}, + {"zrange", + RedisMap({ + {"summary", "Returns members in a sorted set within a range of indexes."_RedisString}, + {"since", "1.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements returned."_RedisString}, + {"history", + RedisSet({ + RedisArray({"6.2.0"_RedisString, "Added the `REV`, `BYSCORE`, `BYLEX` and `LIMIT` options."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "start"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "start"_RedisString}, + }), + RedisMap({ + {"name", "stop"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "stop"_RedisString}, + }), + RedisMap({ + {"name", "sortby"_RedisString}, + {"type", "oneof"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "byscore"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "byscore"_RedisString}, + {"token", "BYSCORE"_RedisString}, + }), + RedisMap({ + {"name", "bylex"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "bylex"_RedisString}, + {"token", "BYLEX"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "rev"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "rev"_RedisString}, + {"token", "REV"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "limit"_RedisString}, + {"type", "block"_RedisString}, + {"token", "LIMIT"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "withscores"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withscores"_RedisString}, + {"token", "WITHSCORES"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"zrevrank", + RedisMap({ + {"summary", "Returns the index of a member in a sorted set ordered by descending scores."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", "O(log(N))"_RedisString}, + {"history", RedisSet({ + RedisArray({"7.2.0"_RedisString, "Added the optional `WITHSCORE` argument."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + }), + RedisMap({ + {"name", "withscore"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withscore"_RedisString}, + {"token", "WITHSCORE"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"decrby", + RedisMap({ + {"summary", + "Decrements a number from the integer value of a key. Uses 0 as initial value if the key doesn't exist."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "decrement"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "decrement"_RedisString}, + }), + })}, + })}, + {"rename", RedisMap({ + {"summary", "Renames a key and overwrites the destination."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "newkey"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "newkey"_RedisString}, + {"key_spec_index", 1_RedisInt}, + }), + })}, + })}, + {"rpoplpush", + RedisMap({ + {"summary", + "Returns the last element of a list after removing and pushing it to another list. Deletes the list if the last element was popped."_RedisString}, + {"since", "1.2.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "6.2.0"_RedisString}, + {"replaced_by", "`LMOVE` with the `RIGHT` and `LEFT` arguments"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "source"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "source"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 1_RedisInt}, + }), + })}, + })}, + {"randomkey", RedisMap({ + {"summary", "Returns a random key name from the database."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"fcall_ro", RedisMap({ + {"summary", "Invokes a read-only function."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "Depends on the function that is executed."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "function"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "function"_RedisString}, + }), + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "arg"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "arg"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"failover", RedisMap({ + {"summary", "Starts a coordinated failover from a server to one of its replicas."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "target"_RedisString}, + {"type", "block"_RedisString}, + {"token", "TO"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "host"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "host"_RedisString}, + }), + RedisMap({ + {"name", "port"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "port"_RedisString}, + }), + RedisMap({ + {"name", "force"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "force"_RedisString}, + {"token", "FORCE"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + RedisMap({ + {"name", "abort"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "abort"_RedisString}, + {"token", "ABORT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "milliseconds"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "milliseconds"_RedisString}, + {"token", "TIMEOUT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"lpop", + RedisMap({ + {"summary", + "Returns the first elements in a list after removing it. Deletes the list if the last element was popped."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", "O(N) where N is the number of elements returned"_RedisString}, + {"history", RedisSet({ + RedisArray({"6.2.0"_RedisString, "Added the `count` argument."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"echo", RedisMap({ + {"summary", "Returns the given string."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "message"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "message"_RedisString}, + }), + })}, + })}, + {"rpop", + RedisMap({ + {"summary", + "Returns and removes the last elements of a list. Deletes the list if the last element was popped."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", "O(N) where N is the number of elements returned"_RedisString}, + {"history", RedisSet({ + RedisArray({"6.2.0"_RedisString, "Added the `count` argument."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"zrangestore", + RedisMap({ + {"summary", "Stores a range of members from sorted set in a key."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements stored into the destination key."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "dst"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "dst"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "src"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "src"_RedisString}, + {"key_spec_index", 1_RedisInt}, + }), + RedisMap({ + {"name", "min"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "min"_RedisString}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "max"_RedisString}, + }), + RedisMap({ + {"name", "sortby"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "byscore"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "byscore"_RedisString}, + {"token", "BYSCORE"_RedisString}, + }), + RedisMap({ + {"name", "bylex"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "bylex"_RedisString}, + {"token", "BYLEX"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "rev"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "rev"_RedisString}, + {"token", "REV"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "limit"_RedisString}, + {"type", "block"_RedisString}, + {"token", "LIMIT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + })}, + }), + })}, + })}, + {"srem", + RedisMap({ + {"summary", + "Removes one or more members from a set. Deletes the set if the last member was removed."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", "O(N) where N is the number of members to be removed."_RedisString}, + {"history", RedisSet({ + RedisArray({"2.4.0"_RedisString, "Accepts multiple `member` arguments."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"restore-asking", + RedisMap({ + {"summary", "An internal command for migrating keys in a cluster."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", + "O(1) to create the new key and additional O(N*M) to reconstruct the serialized value, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1). However for sorted set values the complexity is O(N*M*log(N)) because inserting values into sorted sets is O(log(N))."_RedisString}, + {"doc_flags", RedisSet({ + "syscmd"_RedisStatus, + })}, + {"history", RedisSet({ + RedisArray({"3.0.0"_RedisString, "Added the `REPLACE` modifier."_RedisString}), + RedisArray({"5.0.0"_RedisString, "Added the `ABSTTL` modifier."_RedisString}), + RedisArray({"5.0.0"_RedisString, "Added the `IDLETIME` and `FREQ` options."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "ttl"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "ttl"_RedisString}, + }), + RedisMap({ + {"name", "serialized-value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "serialized-value"_RedisString}, + }), + RedisMap({ + {"name", "replace"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "replace"_RedisString}, + {"token", "REPLACE"_RedisString}, + {"since", "3.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "absttl"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "absttl"_RedisString}, + {"token", "ABSTTL"_RedisString}, + {"since", "5.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "seconds"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "seconds"_RedisString}, + {"token", "IDLETIME"_RedisString}, + {"since", "5.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "frequency"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "frequency"_RedisString}, + {"token", "FREQ"_RedisString}, + {"since", "5.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"bitfield", + RedisMap({ + {"summary", "Performs arbitrary bitfield integer operations on strings."_RedisString}, + {"since", "3.2.0"_RedisString}, + {"group", "bitmap"_RedisString}, + {"complexity", "O(1) for each subcommand specified"_RedisString}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "operation"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "get-block"_RedisString}, + {"type", "block"_RedisString}, + {"token", "GET"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "encoding"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "encoding"_RedisString}, + }), + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "write"_RedisString}, + {"type", "block"_RedisString}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "overflow-block"_RedisString}, + {"type", "oneof"_RedisString}, + {"token", "OVERFLOW"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "wrap"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "wrap"_RedisString}, + {"token", "WRAP"_RedisString}, + }), + RedisMap({ + {"name", "sat"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "sat"_RedisString}, + {"token", "SAT"_RedisString}, + }), + RedisMap({ + {"name", "fail"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "fail"_RedisString}, + {"token", "FAIL"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "write-operation"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "set-block"_RedisString}, + {"type", "block"_RedisString}, + {"token", "SET"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "encoding"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "encoding"_RedisString}, + }), + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "incrby-block"_RedisString}, + {"type", "block"_RedisString}, + {"token", "INCRBY"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "encoding"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "encoding"_RedisString}, + }), + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + RedisMap({ + {"name", "increment"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "increment"_RedisString}, + }), + })}, + }), + })}, + }), + })}, + }), + })}, + }), + })}, + })}, + {"psetex", + RedisMap({ + {"summary", + "Sets both string value and expiration time in milliseconds of a key. The key is created if it doesn't exist."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "2.6.12"_RedisString}, + {"replaced_by", "`SET` with the `PX` argument"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "milliseconds"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "milliseconds"_RedisString}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + })}, + {"ping", RedisMap({ + {"summary", "Returns the server's liveliness response."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "message"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "message"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"hlen", RedisMap({ + {"summary", "Returns the number of fields in a hash."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"msetnx", + RedisMap({ + {"summary", + "Atomically modifies the string values of one or more keys only when all keys don't exist."_RedisString}, + {"since", "1.0.1"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(N) where N is the number of keys to set."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "data"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + }), + })}, + })}, + {"slowlog", + RedisMap({ + {"summary", "A container for slow log commands."_RedisString}, + {"since", "2.2.12"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"slowlog|get", + RedisMap({ + {"summary", "Returns the slow log's entries."_RedisString}, + {"since", "2.2.12"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) where N is the number of entries returned"_RedisString}, + {"history", RedisSet({ + RedisArray({"4.0.0"_RedisString, + "Added client IP address, port and name to the reply."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"slowlog|reset", RedisMap({ + {"summary", "Clears all entries from the slow log."_RedisString}, + {"since", "2.2.12"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) where N is the number of entries in the slowlog"_RedisString}, + })}, + {"slowlog|len", RedisMap({ + {"summary", "Returns the number of entries in the slow log."_RedisString}, + {"since", "2.2.12"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"slowlog|help", RedisMap({ + {"summary", "Show helpful text about the different subcommands"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + })}, + })}, + {"zremrangebyrank", + RedisMap({ + {"summary", + "Removes members in a sorted set within a range of indexes. Deletes the sorted set if all members were removed."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements removed by the operation."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "start"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "start"_RedisString}, + }), + RedisMap({ + {"name", "stop"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "stop"_RedisString}, + }), + })}, + })}, + {"zrangebyscore", + RedisMap({ + {"summary", "Returns members in a sorted set within a range of scores."_RedisString}, + {"since", "1.0.5"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with LIMIT), you can consider it O(log(N))."_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "6.2.0"_RedisString}, + {"replaced_by", "`ZRANGE` with the `BYSCORE` argument"_RedisString}, + {"history", RedisSet({ + RedisArray({"2.0.0"_RedisString, "Added the `WITHSCORES` modifier."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "min"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "min"_RedisString}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "max"_RedisString}, + }), + RedisMap({ + {"name", "withscores"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withscores"_RedisString}, + {"token", "WITHSCORES"_RedisString}, + {"since", "2.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "limit"_RedisString}, + {"type", "block"_RedisString}, + {"token", "LIMIT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + })}, + }), + })}, + })}, + {"sync", RedisMap({ + {"summary", "An internal command used in replication."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "server"_RedisString}, + })}, + {"zinterstore", + RedisMap({ + {"summary", "Stores the intersect of multiple sorted sets in a key."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(N*K)+O(M*log(M)) worst case with N being the smallest input sorted set, K being the number of input sorted sets and M being the number of elements in the resulting sorted set."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "weight"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "weight"_RedisString}, + {"token", "WEIGHTS"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "aggregate"_RedisString}, + {"type", "oneof"_RedisString}, + {"token", "AGGREGATE"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "sum"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "sum"_RedisString}, + {"token", "SUM"_RedisString}, + }), + RedisMap({ + {"name", "min"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "min"_RedisString}, + {"token", "MIN"_RedisString}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "max"_RedisString}, + {"token", "MAX"_RedisString}, + }), + })}, + }), + })}, + })}, + {"type", RedisMap({ + {"summary", "Determines the type of value stored at a key."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"spublish", + RedisMap({ + {"summary", "Post a message to a shard channel"_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", "O(N) where N is the number of clients subscribed to the receiving shard channel."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "shardchannel"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "shardchannel"_RedisString}, + }), + RedisMap({ + {"name", "message"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "message"_RedisString}, + }), + })}, + })}, + {"bitpos", RedisMap({ + {"summary", "Finds the first set (1) or clear (0) bit in a string."_RedisString}, + {"since", "2.8.7"_RedisString}, + {"group", "bitmap"_RedisString}, + {"complexity", "O(N)"_RedisString}, + {"history", RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added the `BYTE|BIT` option."_RedisString}), + })}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "bit"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "bit"_RedisString}, + }), + RedisMap({ + {"name", "range"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "start"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "start"_RedisString}, + }), + RedisMap({ + {"name", "end-unit-block"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "end"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "end"_RedisString}, + }), + RedisMap({ + {"name", "unit"_RedisString}, + {"type", "oneof"_RedisString}, + {"since", "7.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "byte"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "byte"_RedisString}, + {"token", "BYTE"_RedisString}, + }), + RedisMap({ + {"name", "bit"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "bit"_RedisString}, + {"token", "BIT"_RedisString}, + }), + })}, + }), + })}, + }), + })}, + }), + })}, + })}, + {"zunionstore", + RedisMap({ + {"summary", "Stores the union of multiple sorted sets in a key."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(N)+O(M log(M)) with N being the sum of the sizes of the input sorted sets, and M being the number of elements in the resulting sorted set."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "weight"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "weight"_RedisString}, + {"token", "WEIGHTS"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "aggregate"_RedisString}, + {"type", "oneof"_RedisString}, + {"token", "AGGREGATE"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "sum"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "sum"_RedisString}, + {"token", "SUM"_RedisString}, + }), + RedisMap({ + {"name", "min"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "min"_RedisString}, + {"token", "MIN"_RedisString}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "max"_RedisString}, + {"token", "MAX"_RedisString}, + }), + })}, + }), + })}, + })}, +}; + +#endif // WITH_COMMAND_DOCS diff --git a/tools/pika_migrate/src/pika_conf.cc b/tools/pika_migrate/src/pika_conf.cc index ca19667eb7..00a6793699 100644 --- a/tools/pika_migrate/src/pika_conf.cc +++ b/tools/pika_migrate/src/pika_conf.cc @@ -3,118 +3,48 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#include "include/pika_conf.h" - -#include - -#include #include +#include -#include "slash/include/env.h" +#include +#include "cache/include/config.h" +#include "include/acl.h" +#include "include/pika_cmd_table_manager.h" +#include "include/pika_conf.h" #include "include/pika_define.h" -PikaConf::PikaConf(const std::string& path) - : slash::BaseConf(path), conf_path_(path) { - pthread_rwlock_init(&rwlock_, NULL); - local_meta_ = new PikaMeta(); -} - -PikaConf::~PikaConf() { - pthread_rwlock_destroy(&rwlock_); - delete local_meta_; -} - -Status PikaConf::InternalGetTargetTable(const std::string& table_name, uint32_t* const target) { - int32_t table_index = -1; - for (size_t idx = 0; table_structs_.size(); ++idx) { - if (table_structs_[idx].table_name == table_name) { - table_index = idx; - break; - } - } - if (table_index == -1) { - return Status::NotFound("table : " + table_name + " not found"); - } - *target = table_index; - return Status::OK(); -} - -Status PikaConf::TablePartitionsSanityCheck(const std::string& table_name, - const std::set& partition_ids, - bool is_add) { - RWLock l(&rwlock_, false); - uint32_t table_index = 0; - Status s = InternalGetTargetTable(table_name, &table_index); - if (!s.ok()) { - return s; - } - // Sanity Check - for (const auto& id : partition_ids) { - if (id >= table_structs_[table_index].partition_num) { - return Status::Corruption("partition index out of range"); - } else if (is_add && table_structs_[table_index].partition_ids.count(id) != 0) { - return Status::Corruption("partition : " + std::to_string(id) + " exist"); - } else if (!is_add && table_structs_[table_index].partition_ids.count(id) == 0) { - return Status::Corruption("partition : " + std::to_string(id) + " not exist"); - } - } - return Status::OK(); -} - -Status PikaConf::AddTablePartitions(const std::string& table_name, - const std::set& partition_ids) { - Status s = TablePartitionsSanityCheck(table_name, partition_ids, true); - if (!s.ok()) { - return s; - } - - RWLock l(&rwlock_, true); - uint32_t index = 0; - s = InternalGetTargetTable(table_name, &index); - if (s.ok()) { - for (const auto& id : partition_ids) { - table_structs_[index].partition_ids.insert(id); - } - s = local_meta_->StableSave(table_structs_); - } - return s; -} - -Status PikaConf::RemoveTablePartitions(const std::string& table_name, - const std::set& partition_ids) { - Status s = TablePartitionsSanityCheck(table_name, partition_ids, false); - if (!s.ok()) { - return s; - } +using pstd::Status; +extern std::unique_ptr g_pika_cmd_table_manager; - RWLock l(&rwlock_, true); - uint32_t index = 0; - s = InternalGetTargetTable(table_name, &index); - if (s.ok()) { - for (const auto& id : partition_ids) { - table_structs_[index].partition_ids.erase(id); - } - s = local_meta_->StableSave(table_structs_); - } - return s; -} +PikaConf::PikaConf(const std::string& path) + : pstd::BaseConf(path), conf_path_(path) {} -int PikaConf::Load() -{ +int PikaConf::Load() { int ret = LoadConf(); - if (ret != 0) { + if (ret) { return ret; } GetConfInt("timeout", &timeout_); if (timeout_ < 0) { - timeout_ = 60; // 60s + timeout_ = 60; // 60s } GetConfStr("server-id", &server_id_); if (server_id_.empty()) { server_id_ = "1"; - } + } else if (PIKA_SERVER_ID_MAX < std::stoull(server_id_)) { + server_id_ = "PIKA_SERVER_ID_MAX"; + } + GetConfStr("run-id", &run_id_); + if (run_id_.empty()) { + run_id_ = pstd::getRandomHexChars(configRunIDSize); + // try rewrite run_id_ to diff_commands_ + SetRunID(run_id_); + } else if (run_id_.length() != configRunIDSize) { + LOG(FATAL) << "run-id " << run_id_ << " is invalid, its string length should be " << configRunIDSize; + } + GetConfStr("replication-id", &replication_id_); GetConfStr("requirepass", &requirepass_); GetConfStr("masterauth", &masterauth_); GetConfStr("userpass", &userpass_); @@ -124,47 +54,67 @@ int PikaConf::Load() } GetConfInt("root-connection-num", &root_connection_num_); if (root_connection_num_ < 0) { - root_connection_num_ = 2; + root_connection_num_ = 2; } std::string swe; GetConfStr("slowlog-write-errorlog", &swe); slowlog_write_errorlog_.store(swe == "yes" ? true : false); + // slot migrate + std::string smgrt; + GetConfStr("slotmigrate", &smgrt); + slotmigrate_.store(smgrt == "yes" ? true : false); + + // slow cmd thread pool + std::string slowcmdpool; + GetConfStr("slow-cmd-pool", &slowcmdpool); + slow_cmd_pool_.store(slowcmdpool == "yes" ? true : false); + + int binlog_writer_num = 1; + GetConfInt("binlog-writer-num", &binlog_writer_num); + if (binlog_writer_num <= 0 || binlog_writer_num > 24) { + binlog_writer_num_ = 1; + } else { + binlog_writer_num_ = binlog_writer_num; + } + int tmp_slowlog_log_slower_than; GetConfInt("slowlog-log-slower-than", &tmp_slowlog_log_slower_than); slowlog_log_slower_than_.store(tmp_slowlog_log_slower_than); + GetConfInt("slowlog-max-len", &slowlog_max_len_); if (slowlog_max_len_ == 0) { slowlog_max_len_ = 128; } std::string user_blacklist; GetConfStr("userblacklist", &user_blacklist); - slash::StringSplit(user_blacklist, COMMA, user_blacklist_); + pstd::StringSplit(user_blacklist, COMMA, user_blacklist_); for (auto& item : user_blacklist_) { - slash::StringToLower(item); + pstd::StringToLower(item); } - + GetConfInt("default-slot-num", &default_slot_num_); GetConfStr("dump-path", &bgsave_path_); bgsave_path_ = bgsave_path_.empty() ? "./dump/" : bgsave_path_; if (bgsave_path_[bgsave_path_.length() - 1] != '/') { bgsave_path_ += "/"; } GetConfInt("dump-expire", &expire_dump_days_); - if (expire_dump_days_ < 0 ) { - expire_dump_days_ = 0; + if (expire_dump_days_ < 0) { + expire_dump_days_ = 0; } GetConfStr("dump-prefix", &bgsave_prefix_); GetConfInt("expire-logs-nums", &expire_logs_nums_); - if (expire_logs_nums_ <= 10 ) { - expire_logs_nums_ = 10; + if (expire_logs_nums_ <= 10) { + expire_logs_nums_ = 10; } GetConfInt("expire-logs-days", &expire_logs_days_); - if (expire_logs_days_ <= 0 ) { - expire_logs_days_ = 1; + if (expire_logs_days_ <= 0) { + expire_logs_days_ = 1; } GetConfStr("compression", &compression_); + GetConfStr("compression_per_level", &compression_per_level_); // set slave read only true as default slave_read_only_ = true; GetConfInt("slave-priority", &slave_priority_); @@ -178,27 +128,75 @@ int PikaConf::Load() if (log_path_[log_path_.length() - 1] != '/') { log_path_ += "/"; } + GetConfInt("log-retention-time",&log_retention_time_); + if(log_retention_time_ < 0){ + LOG(FATAL) << "log-retention-time invalid"; + } + GetConfStr("loglevel", &log_level_); GetConfStr("db-path", &db_path_); + GetConfInt("db-instance-num", &db_instance_num_); + if (db_instance_num_ <= 0) { + LOG(FATAL) << "db-instance-num load error"; + } + int64_t t_val = 0; + GetConfInt64("rocksdb-ttl-second", &t_val); + rocksdb_ttl_second_.store(uint64_t(t_val)); + t_val = 0; + GetConfInt64("rocksdb-periodic-second", &t_val); + rocksdb_periodic_second_.store(uint64_t(t_val)); db_path_ = db_path_.empty() ? "./db/" : db_path_; if (db_path_[db_path_.length() - 1] != '/') { db_path_ += "/"; } - local_meta_->SetPath(db_path_); GetConfInt("thread-num", &thread_num_); if (thread_num_ <= 0) { thread_num_ = 12; } - if (thread_num_ > 24) { - thread_num_ = 24; - } + GetConfInt("thread-pool-size", &thread_pool_size_); if (thread_pool_size_ <= 0) { thread_pool_size_ = 12; } - if (thread_pool_size_ > 24) { - thread_pool_size_ = 24; + if (thread_pool_size_ > 100) { + thread_pool_size_ = 100; + } + + GetConfInt("slow-cmd-thread-pool-size", &slow_cmd_thread_pool_size_); + if (slow_cmd_thread_pool_size_ < 0) { + slow_cmd_thread_pool_size_ = 8; } + if (slow_cmd_thread_pool_size_ > 50) { + slow_cmd_thread_pool_size_ = 50; + } + + GetConfInt("admin-thread-pool-size", &admin_thread_pool_size_); + if (admin_thread_pool_size_ <= 0) { + admin_thread_pool_size_ = 2; + } + if (admin_thread_pool_size_ > 4) { + admin_thread_pool_size_ = 4; + } + + std::string slow_cmd_list; + GetConfStr("slow-cmd-list", &slow_cmd_list); + SetSlowCmd(slow_cmd_list); + + std::string admin_cmd_list; + GetConfStr("admin-cmd-list", &admin_cmd_list); + if (admin_cmd_list == "") { + admin_cmd_list = "info, monitor, ping"; + SetAdminCmd(admin_cmd_list); + } + + std::string unfinished_full_sync; + GetConfStr("internal-used-unfinished-full-sync", &unfinished_full_sync); + if (replication_id_.empty()) { + unfinished_full_sync.clear(); + } + SetInternalUsedUnFinishedFullSync(unfinished_full_sync); + + GetConfInt("sync-thread-num", &sync_thread_num_); if (sync_thread_num_ <= 0) { sync_thread_num_ = 3; @@ -209,45 +207,59 @@ int PikaConf::Load() std::string instance_mode; GetConfStr("instance-mode", &instance_mode); - classic_mode_.store(instance_mode.empty() - || !strcasecmp(instance_mode.data(), "classic")); + classic_mode_.store(instance_mode.empty() || !strcasecmp(instance_mode.data(), "classic")); if (classic_mode_.load()) { GetConfInt("databases", &databases_); - if (databases_ < 1 || databases_ > 8) { - LOG(FATAL) << "config databases error, limit [1 ~ 8], the actual is: " - << databases_; + if (databases_ < 1 || databases_ > MAX_DB_NUM) { + LOG(FATAL) << "config databases error, limit [1 ~ 8], the actual is: " << databases_; } for (int idx = 0; idx < databases_; ++idx) { - table_structs_.push_back({"db" + std::to_string(idx), 1, {0}}); + db_structs_.push_back({"db" + std::to_string(idx), db_instance_num_}); } + } + default_db_ = db_structs_[0].db_name; + + // sync_binlog_thread_num_ must be set after the setting of databases_ + GetConfInt("sync-binlog-thread-num", &sync_binlog_thread_num_); + if (sync_binlog_thread_num_ <= 0) { + sync_binlog_thread_num_ = databases_; } else { - GetConfInt("default-slot-num", &default_slot_num_); - if (default_slot_num_ <= 0) { - LOG(FATAL) << "config default-slot-num error," - << " it should greater than zero, the actual is: " - << default_slot_num_; - } - std::string pika_meta_path = db_path_ + kPikaMeta; - if (!slash::FileExists(pika_meta_path)) { - local_meta_->StableSave({{"db0", static_cast(default_slot_num_), {}}}); - } - Status s = local_meta_->ParseMeta(&table_structs_); - if (!s.ok()) { - LOG(FATAL) << "parse meta file error"; - } + // final value is MIN(sync_binlog_thread_num, databases_) + sync_binlog_thread_num_ = sync_binlog_thread_num_ > databases_ ? databases_ : sync_binlog_thread_num_; + } + + int tmp_replication_num = 0; + GetConfInt("replication-num", &tmp_replication_num); + if (tmp_replication_num > 4 || tmp_replication_num < 0) { + LOG(FATAL) << "replication-num " << tmp_replication_num << "is invalid, please pick from [0...4]"; + } + replication_num_.store(tmp_replication_num); + + int tmp_consensus_level = 0; + GetConfInt("consensus-level", &tmp_consensus_level); + if (tmp_consensus_level < 0 || tmp_consensus_level > replication_num_.load()) { + LOG(FATAL) << "consensus-level " << tmp_consensus_level + << " is invalid, current replication-num: " << replication_num_.load() + << ", please pick from 0 to replication-num" + << " [0..." << replication_num_.load() << "]"; + } + consensus_level_.store(tmp_consensus_level); + if (classic_mode_.load() && (consensus_level_.load() != 0 || replication_num_.load() != 0)) { + LOG(FATAL) << "consensus-level & replication-num only configurable under sharding mode," + << " set it to be 0 if you are using classic mode"; } - default_table_ = table_structs_[0].table_name; compact_cron_ = ""; GetConfStr("compact-cron", &compact_cron_); - if (compact_cron_ != "") { + if (!compact_cron_.empty()) { bool have_week = false; - std::string compact_cron, week_str; - int slash_num = count(compact_cron_.begin(), compact_cron_.end(), '/'); + std::string compact_cron; + std::string week_str; + int64_t slash_num = count(compact_cron_.begin(), compact_cron_.end(), '/'); if (slash_num == 2) { have_week = true; - std::string::size_type first_slash = compact_cron_.find("/"); + std::string::size_type first_slash = compact_cron_.find('/'); week_str = compact_cron_.substr(0, first_slash); compact_cron = compact_cron_.substr(first_slash + 1); } else { @@ -255,18 +267,18 @@ int PikaConf::Load() } std::string::size_type len = compact_cron.length(); - std::string::size_type colon = compact_cron.find("-"); - std::string::size_type underline = compact_cron.find("/"); - if (colon == std::string::npos || underline == std::string::npos || - colon >= underline || colon + 1 >= len || + std::string::size_type colon = compact_cron.find('-'); + std::string::size_type underline = compact_cron.find('/'); + if (colon == std::string::npos || underline == std::string::npos || colon >= underline || colon + 1 >= len || colon + 1 == underline || underline + 1 >= len) { - compact_cron_ = ""; + compact_cron_ = ""; } else { int week = std::atoi(week_str.c_str()); int start = std::atoi(compact_cron.substr(0, colon).c_str()); int end = std::atoi(compact_cron.substr(colon + 1, underline).c_str()); int usage = std::atoi(compact_cron.substr(underline + 1).c_str()); - if ((have_week && (week < 1 || week > 7)) || start < 0 || start > 23 || end < 0 || end > 23 || usage < 0 || usage > 100) { + if ((have_week && (week < 1 || week > 7)) || start < 0 || start > 23 || end < 0 || end > 23 || usage < 0 || + usage > 100) { compact_cron_ = ""; } } @@ -274,42 +286,185 @@ int PikaConf::Load() compact_interval_ = ""; GetConfStr("compact-interval", &compact_interval_); - if (compact_interval_ != "") { + if (!compact_interval_.empty()) { std::string::size_type len = compact_interval_.length(); - std::string::size_type slash = compact_interval_.find("/"); + std::string::size_type slash = compact_interval_.find('/'); if (slash == std::string::npos || slash + 1 >= len) { compact_interval_ = ""; } else { int interval = std::atoi(compact_interval_.substr(0, slash).c_str()); - int usage = std::atoi(compact_interval_.substr(slash+1).c_str()); + int usage = std::atoi(compact_interval_.substr(slash + 1).c_str()); if (interval <= 0 || usage < 0 || usage > 100) { compact_interval_ = ""; } } } + GetConfInt("max-subcompactions", &max_subcompactions_); + if (max_subcompactions_ < 1) { + max_subcompactions_ = 1; + } + + GetConfInt("compact-every-num-of-files", &compact_every_num_of_files_); + if (compact_every_num_of_files_ < 10) { + compact_every_num_of_files_ = 10; + } + + GetConfInt("force-compact-file-age-seconds", &force_compact_file_age_seconds_); + if (force_compact_file_age_seconds_ < 300) { + force_compact_file_age_seconds_ = 300; + } + + GetConfInt("force-compact-min-delete-ratio", &force_compact_min_delete_ratio_); + if (force_compact_min_delete_ratio_ < 10) { + force_compact_min_delete_ratio_ = 10; + } + + GetConfInt("dont-compact-sst-created-in-seconds", &dont_compact_sst_created_in_seconds_); + if (dont_compact_sst_created_in_seconds_ < 600) { + dont_compact_sst_created_in_seconds_ = 600; + } + + GetConfInt("best-delete-min-ratio", &best_delete_min_ratio_); + if (best_delete_min_ratio_ < 10) { + best_delete_min_ratio_ = 10; + } + + std::string cs_; + GetConfStr("compaction-strategy", &cs_); + if (cs_ == "full-compact") { + compaction_strategy_ = FullCompact; + } else if (cs_ == "obd-compact") { + compaction_strategy_ = OldestOrBestDeleteRatioSstCompact; + } else { + compaction_strategy_ = NONE; + } + + // least-free-disk-resume-size + GetConfInt64Human("least-free-disk-resume-size", &least_free_disk_to_resume_); + if (least_free_disk_to_resume_ <= 0) { + least_free_disk_to_resume_ = 268435456; // 256Mb + } + + GetConfInt64("manually-resume-interval", &resume_check_interval_); + if (resume_check_interval_ <= 0) { + resume_check_interval_ = 60; // seconds + } + + GetConfDouble("min-check-resume-ratio", &min_check_resume_ratio_); + if (min_check_resume_ratio_ < 0) { + min_check_resume_ratio_ = 0.7; + } + // write_buffer_size - GetConfInt64("write-buffer-size", &write_buffer_size_); - if (write_buffer_size_ <= 0 ) { - write_buffer_size_ = 268435456; // 256Mb + GetConfInt64Human("write-buffer-size", &write_buffer_size_); + if (write_buffer_size_ <= 0) { + write_buffer_size_ = 268435456; // 256Mb + } + + GetConfInt("level0-stop-writes-trigger", &level0_stop_writes_trigger_); + if (level0_stop_writes_trigger_ < 36) { + level0_stop_writes_trigger_ = 36; + } + + GetConfInt("level0-slowdown-writes-trigger", &level0_slowdown_writes_trigger_); + if (level0_slowdown_writes_trigger_ < 20) { + level0_slowdown_writes_trigger_ = 20; + } + + GetConfInt("level0-file-num-compaction-trigger", &level0_file_num_compaction_trigger_); + if (level0_file_num_compaction_trigger_ < 4) { + level0_file_num_compaction_trigger_ = 4; + } + + GetConfInt("min-write-buffer-number-to-merge", &min_write_buffer_number_to_merge_); + if (min_write_buffer_number_to_merge_ < 1) { + min_write_buffer_number_to_merge_ = 1; // 1 for immutable memtable to merge + } + + // arena_block_size + GetConfInt64Human("arena-block-size", &arena_block_size_); + if (arena_block_size_ <= 0) { + arena_block_size_ = write_buffer_size_ >> 3; // 1/8 of the write_buffer_size_ + } + + // arena_block_size + GetConfInt64Human("slotmigrate-thread-num", &slotmigrate_thread_num_); + if (slotmigrate_thread_num_ < 1 || slotmigrate_thread_num_ > 24) { + slotmigrate_thread_num_ = 8; // 1/8 of the write_buffer_size_ + } + + // arena_block_size + GetConfInt64Human("thread-migrate-keys-num", &thread_migrate_keys_num_); + if (thread_migrate_keys_num_ < 8 || thread_migrate_keys_num_ > 128) { + thread_migrate_keys_num_ = 64; // 1/8 of the write_buffer_size_ } // max_write_buffer_size - GetConfInt64("max-write-buffer-size", &max_write_buffer_size_); + GetConfInt64Human("max-write-buffer-size", &max_write_buffer_size_); if (max_write_buffer_size_ <= 0) { - max_write_buffer_size_ = 10737418240; // 10Gb + max_write_buffer_size_ = PIKA_CACHE_SIZE_DEFAULT; // 10Gb + } + + // max-total-wal-size + GetConfInt64("max-total-wal-size", &max_total_wal_size_); + if (max_total_wal_size_ < 0) { + max_total_wal_size_ = 0; + } + + // rate-limiter-mode + rate_limiter_mode_ = 1; + GetConfInt("rate-limiter-mode", &rate_limiter_mode_); + if (rate_limiter_mode_ < 0 or rate_limiter_mode_ > 2) { + rate_limiter_mode_ = 1; + } + + // rate-limiter-bandwidth + GetConfInt64("rate-limiter-bandwidth", &rate_limiter_bandwidth_); + if (rate_limiter_bandwidth_ <= 0) { + rate_limiter_bandwidth_ = 1024LL << 30; // 1024GB/s + } + + // rate-limiter-refill-period-us + GetConfInt64("rate-limiter-refill-period-us", &rate_limiter_refill_period_us_); + if (rate_limiter_refill_period_us_ <= 0) { + rate_limiter_refill_period_us_ = 100 * 1000; + } + + // rate-limiter-fairness + GetConfInt64("rate-limiter-fairness", &rate_limiter_fairness_); + if (rate_limiter_fairness_ <= 0) { + rate_limiter_fairness_ = 10; + } + + std::string at; + GetConfStr("rate-limiter-auto-tuned", &at); + // rate_limiter_auto_tuned_ will be true if user didn't config + rate_limiter_auto_tuned_ = at == "yes" || at.empty(); + + // max_write_buffer_num + max_write_buffer_num_ = 2; + GetConfInt("max-write-buffer-num", &max_write_buffer_num_); + if (max_write_buffer_num_ <= 0) { + max_write_buffer_num_ = 2; // 1 for immutable memtable, 1 for mutable memtable } // max_client_response_size - GetConfInt64("max-client-response-size", &max_client_response_size_); + GetConfInt64Human("max-client-response-size", &max_client_response_size_); if (max_client_response_size_ <= 0) { - max_client_response_size_ = 1073741824; // 1Gb + max_client_response_size_ = 1073741824; // 1Gb } // target_file_size_base - GetConfInt("target-file-size-base", &target_file_size_base_); + GetConfInt64Human("target-file-size-base", &target_file_size_base_); if (target_file_size_base_ <= 0) { - target_file_size_base_ = 1048576; // 10Mb + target_file_size_base_ = 1048576; // 10Mb + } + + GetConfInt64("max-compaction-bytes", &max_compaction_bytes_); + if (max_compaction_bytes_ <= 0) { + // RocksDB's default is 25 * target_file_size_base_ + max_compaction_bytes_ = target_file_size_base_ * 25; } max_cache_statistic_keys_ = 0; @@ -318,31 +473,58 @@ int PikaConf::Load() max_cache_statistic_keys_ = 0; } + // disable_auto_compactions + GetConfBool("disable_auto_compactions", &disable_auto_compactions_); + small_compaction_threshold_ = 5000; GetConfInt("small-compaction-threshold", &small_compaction_threshold_); - if (small_compaction_threshold_ <= 0 - || small_compaction_threshold_ >= 100000) { - small_compaction_threshold_ = 5000; + if (small_compaction_threshold_ < 0) { + small_compaction_threshold_ = 0; + } else if (small_compaction_threshold_ >= 100000) { + small_compaction_threshold_ = 100000; + } + + small_compaction_duration_threshold_ = 10000; + GetConfInt("small-compaction-duration-threshold", &small_compaction_duration_threshold_); + if (small_compaction_duration_threshold_ < 0) { + small_compaction_duration_threshold_ = 0; + } else if (small_compaction_duration_threshold_ >= 1000000) { + small_compaction_duration_threshold_ = 1000000; } - max_background_flushes_ = 1; + // max-background-flushes and max-background-compactions should both be -1 or both not GetConfInt("max-background-flushes", &max_background_flushes_); - if (max_background_flushes_ <= 0) { + if (max_background_flushes_ <= 0 && max_background_flushes_ != -1) { max_background_flushes_ = 1; } - if (max_background_flushes_ >= 4) { - max_background_flushes_ = 4; + if (max_background_flushes_ >= 6) { + max_background_flushes_ = 6; } - max_background_compactions_ = 2; GetConfInt("max-background-compactions", &max_background_compactions_); - if (max_background_compactions_ <= 0) { + if (max_background_compactions_ <= 0 && max_background_compactions_ != -1) { max_background_compactions_ = 2; } if (max_background_compactions_ >= 8) { max_background_compactions_ = 8; } + max_background_jobs_ = max_background_flushes_ + max_background_compactions_; + GetConfInt("max-background-jobs", &max_background_jobs_); + if (max_background_jobs_ <= 0) { + max_background_jobs_ = (1 + 2); + } + if (max_background_jobs_ >= (8 + 6)) { + max_background_jobs_ = (8 + 6); + } + + GetConfInt64("delayed-write-rate", &delayed_write_rate_); + if (delayed_write_rate_ <= 0) { + // set 0 means let rocksDB infer from rate-limiter(by default, rate-limiter is 1024GB, delayed_write_rate will be 512GB) + // if rate-limiter is nullptr, it would be set to 16MB by RocksDB + delayed_write_rate_ = 0; + } + max_cache_files_ = 5000; GetConfInt("max-cache-files", &max_cache_files_); if (max_cache_files_ < -1) { @@ -355,46 +537,61 @@ int PikaConf::Load() } block_size_ = 4 * 1024; - GetConfInt64("block-size", &block_size_); + GetConfInt64Human("block-size", &block_size_); if (block_size_ <= 0) { block_size_ = 4 * 1024; } block_cache_ = 8 * 1024 * 1024; - GetConfInt64("block-cache", &block_cache_); + GetConfInt64Human("block-cache", &block_cache_); if (block_cache_ < 0) { block_cache_ = 8 * 1024 * 1024; } + num_shard_bits_ = -1; + GetConfInt64("num-shard-bits", &num_shard_bits_); + std::string sbc; GetConfStr("share-block-cache", &sbc); - share_block_cache_ = (sbc == "yes") ? true : false; + share_block_cache_ = sbc == "yes"; + + std::string epif; + GetConfStr("enable-partitioned-index-filters", &epif); + enable_partitioned_index_filters_ = epif == "yes"; std::string ciafb; GetConfStr("cache-index-and-filter-blocks", &ciafb); - cache_index_and_filter_blocks_ = (ciafb == "yes") ? true : false; + cache_index_and_filter_blocks_ = ciafb == "yes"; + + std::string plfaibic; + GetConfStr("pin_l0_filter_and_index_blocks_in_cache", &plfaibic); + pin_l0_filter_and_index_blocks_in_cache_ = plfaibic == "yes"; std::string offh; GetConfStr("optimize-filters-for-hits", &offh); - optimize_filters_for_hits_ = (offh == "yes") ? true : false; + optimize_filters_for_hits_ = offh == "yes"; std::string lcdlb; GetConfStr("level-compaction-dynamic-level-bytes", &lcdlb); - level_compaction_dynamic_level_bytes_ = (lcdlb == "yes") ? true : false; + level_compaction_dynamic_level_bytes_ = lcdlb == "yes" || lcdlb.empty(); // daemonize std::string dmz; GetConfStr("daemonize", &dmz); - daemonize_ = (dmz == "yes") ? true : false; + daemonize_ = dmz == "yes"; + + // read redis cache in Net worker threads + std::string rtc_enabled; + GetConfStr("rtc-cache-read", &rtc_enabled); + rtc_cache_read_enabled_ = rtc_enabled != "no"; // binlog std::string wb; GetConfStr("write-binlog", &wb); - write_binlog_ = (wb == "no") ? false : true; - GetConfInt("binlog-file-size", &binlog_file_size_); - if (binlog_file_size_ < 1024 - || static_cast(binlog_file_size_) > (1024LL * 1024 * 1024)) { - binlog_file_size_ = 100 * 1024 * 1024; // 100M + write_binlog_ = wb != "no"; + GetConfIntHuman("binlog-file-size", &binlog_file_size_); + if (binlog_file_size_ < 1024 || static_cast(binlog_file_size_) > (1024LL * 1024 * 1024)) { + binlog_file_size_ = 100 * 1024 * 1024; // 100M } GetConfStr("pidfile", &pidfile_); @@ -412,10 +609,94 @@ int PikaConf::Load() network_interface_ = ""; GetConfStr("network-interface", &network_interface_); + // userblacklist + GetConfStr("userblacklist", &userblacklist_); + // acl users + GetConfStrMulti("user", &users_); + + GetConfStr("aclfile", &aclFile_); + GetConfStrMulti("rename-command", &cmds_); + for (const auto & i : cmds_) { + std::string before, after; + std::istringstream iss(i); + iss >> before; + if (iss) { + iss >> after; + pstd::StringToLower(before); + pstd::StringToLower(after); + std::shared_ptr c_ptr = g_pika_cmd_table_manager->GetCmd(before); + if (!c_ptr) { + LOG(ERROR) << "No such " << before << " command in pika-command"; + return -1; + } + g_pika_cmd_table_manager->RenameCommand(before, after); + } + } + std::string acl_pubsub_default; + GetConfStr("acl-pubsub-default", &acl_pubsub_default); + if (acl_pubsub_default == "allchannels") { + acl_pubsub_default_ = static_cast(AclSelectorFlag::ALL_CHANNELS); + } + + int tmp_acllog_max_len = 128; + GetConfInt("acllog-max-len", &tmp_acllog_max_len); + if (tmp_acllog_max_len < 0) { + tmp_acllog_max_len = 128; + } + acl_Log_max_len_ = tmp_acllog_max_len; + // slaveof slaveof_ = ""; GetConfStr("slaveof", &slaveof_); + int cache_num = 16; + GetConfInt("cache-num", &cache_num); + cache_num_ = (0 >= cache_num || 48 < cache_num) ? 16 : cache_num; + + int cache_mode = 0; + GetConfInt("cache-model", &cache_mode); + cache_mode_ = (PIKA_CACHE_NONE > cache_mode || PIKA_CACHE_READ < cache_mode) ? PIKA_CACHE_NONE : cache_mode; + + std::string cache_type; + GetConfStr("cache-type", &cache_type); + SetCacheType(cache_type); + + int zset_cache_start_direction = 0; + GetConfInt("zset-cache-start-direction", &zset_cache_start_direction); + if (zset_cache_start_direction != cache::CACHE_START_FROM_BEGIN && zset_cache_start_direction != cache::CACHE_START_FROM_END) { + zset_cache_start_direction = cache::CACHE_START_FROM_BEGIN; + } + zset_cache_start_direction_ = zset_cache_start_direction; + + int zset_cache_field_num_per_key = DEFAULT_CACHE_ITEMS_PER_KEY; + GetConfInt("zset-cache-field-num-per-key", &zset_cache_field_num_per_key); + if (zset_cache_field_num_per_key <= 0) { + zset_cache_field_num_per_key = DEFAULT_CACHE_ITEMS_PER_KEY; + } + zset_cache_field_num_per_key_ = zset_cache_field_num_per_key; + + int max_key_size_in_cache = DEFAULT_CACHE_MAX_KEY_SIZE; + GetConfInt("max-key-size-in-cache", &max_key_size_in_cache); + if (max_key_size_in_cache <= 0) { + max_key_size_in_cache = DEFAULT_CACHE_MAX_KEY_SIZE; + } + max_key_size_in_cache_ = max_key_size_in_cache; + + int64_t cache_maxmemory = PIKA_CACHE_SIZE_DEFAULT; + GetConfInt64("cache-maxmemory", &cache_maxmemory); + cache_maxmemory_ = (PIKA_CACHE_SIZE_MIN > cache_maxmemory) ? PIKA_CACHE_SIZE_DEFAULT : cache_maxmemory; + + int cache_maxmemory_policy = 1; + GetConfInt("cache-maxmemory-policy", &cache_maxmemory_policy); + cache_maxmemory_policy_ = (0 > cache_maxmemory_policy || 7 < cache_maxmemory_policy) ? 1 : cache_maxmemory_policy; + + int cache_maxmemory_samples = 5; + GetConfInt("cache-maxmemory-samples", &cache_maxmemory_samples); + cache_maxmemory_samples_ = (1 > cache_maxmemory_samples) ? 5 : cache_maxmemory_samples; + + int cache_lfu_decay_time = 1; + GetConfInt("cache-lfu-decay-time", &cache_lfu_decay_time); + cache_lfu_decay_time_ = (0 > cache_lfu_decay_time) ? 1 : cache_lfu_decay_time; // sync window size int tmp_sync_window_size = kBinlogReadWinDefaultSize; GetConfInt("sync-window-size", &tmp_sync_window_size); @@ -427,6 +708,7 @@ int PikaConf::Load() sync_window_size_.store(tmp_sync_window_size); } + // redis-migrate conifg args target_redis_host_ = "127.0.0.1"; GetConfStr("target-redis-host", &target_redis_host_); @@ -441,6 +723,73 @@ int PikaConf::Load() redis_sender_num_ = 8; GetConfInt("redis-sender-num", &redis_sender_num_); + + // max conn rbuf size + int tmp_max_conn_rbuf_size = PIKA_MAX_CONN_RBUF; + GetConfIntHuman("max-conn-rbuf-size", &tmp_max_conn_rbuf_size); + if (tmp_max_conn_rbuf_size <= PIKA_MAX_CONN_RBUF_LB) { + max_conn_rbuf_size_.store(PIKA_MAX_CONN_RBUF_LB); + } else if (tmp_max_conn_rbuf_size >= PIKA_MAX_CONN_RBUF_HB * 2) { + max_conn_rbuf_size_.store(PIKA_MAX_CONN_RBUF_HB * 2); + } else { + max_conn_rbuf_size_.store(tmp_max_conn_rbuf_size); + } + + // rocksdb blob configure + GetConfBool("enable-blob-files", &enable_blob_files_); + GetConfInt64Human("min-blob-size", &min_blob_size_); + if (min_blob_size_ <= 0) { + min_blob_size_ = 4096; + } + GetConfInt64Human("blob-file-size", &blob_file_size_); + if (blob_file_size_ <= 0) { + blob_file_size_ = 256 * 1024 * 1024; + } + GetConfStr("blob-compression-type", &blob_compression_type_); + GetConfBool("enable-blob-garbage-collection", &enable_blob_garbage_collection_); + GetConfDouble("blob-garbage-collection-age-cutoff", &blob_garbage_collection_age_cutoff_); + if (blob_garbage_collection_age_cutoff_ <= 0) { + blob_garbage_collection_age_cutoff_ = 0.25; + } + GetConfDouble("blob-garbage-collection-force-threshold", &blob_garbage_collection_force_threshold_); + if (blob_garbage_collection_force_threshold_ <= 0) { + blob_garbage_collection_force_threshold_ = 1.0; + } + GetConfInt64("blob-cache", &block_cache_); + GetConfInt64("blob-num-shard-bits", &blob_num_shard_bits_); + + // throttle-bytes-per-second + GetConfInt("throttle-bytes-per-second", &throttle_bytes_per_second_); + if (throttle_bytes_per_second_ <= 0) { + throttle_bytes_per_second_ = 200LL << 20; //200 MB + } + + GetConfInt("max-rsync-parallel-num", &max_rsync_parallel_num_); + if (max_rsync_parallel_num_ <= 0 || max_rsync_parallel_num_ > kMaxRsyncParallelNum) { + max_rsync_parallel_num_ = kMaxRsyncParallelNum; + } + + // rocksdb_statistics_tickers + std::string open_tickers; + GetConfStr("enable-db-statistics", &open_tickers); + enable_db_statistics_ = open_tickers == "yes"; + + db_statistics_level_ = 0; + GetConfInt("db-statistics-level", &db_statistics_level_); + if (db_statistics_level_ < 0) { + db_statistics_level_ = 0; + } + + int64_t tmp_rsync_timeout_ms = -1; + GetConfInt64("rsync-timeout-ms", &tmp_rsync_timeout_ms); + if (tmp_rsync_timeout_ms <= 0) { + rsync_timeout_ms_.store(1000); + } else { + rsync_timeout_ms_.store(tmp_rsync_timeout_ms); + } + + GetConfBool("wash-data", &wash_data_); + return ret; } @@ -450,16 +799,44 @@ void PikaConf::TryPushDiffCommands(const std::string& command, const std::string } } -int PikaConf::ConfigRewrite() { - std::string userblacklist = suser_blacklist(); +void PikaConf::SetCacheType(const std::string& value) { + cache_string_ = cache_set_ = cache_zset_ = cache_hash_ = cache_list_ = cache_bit_ = 0; + if (value == "") { + return; + } + std::lock_guard l(rwlock_); + + std::string lower_value = value; + pstd::StringToLower(lower_value); + lower_value.erase(remove_if(lower_value.begin(), lower_value.end(), isspace), lower_value.end()); + pstd::StringSplit(lower_value, COMMA, cache_type_); + for (auto& type : cache_type_) { + if (type == "string") { + cache_string_ = 1; + } else if (type == "set") { + cache_set_ = 1; + } else if (type == "zset") { + cache_zset_ = 1; + } else if (type == "hash") { + cache_hash_ = 1; + } else if (type == "list") { + cache_list_ = 1; + } else if (type == "bit") { + cache_bit_ = 1; + } + } +} - RWLock l(&rwlock_, true); +int PikaConf::ConfigRewrite() { + std::string userblacklist = user_blacklist_string(); + std::string scachetype = scache_type(); + std::lock_guard l(rwlock_); // Only set value for config item that can be config set. SetConfInt("timeout", timeout_); SetConfStr("requirepass", requirepass_); SetConfStr("masterauth", masterauth_); SetConfStr("userpass", userpass_); - SetConfStr("userblacklist", userblacklist); + SetConfStr("userblacklist", userblacklist_); SetConfStr("dump-prefix", bgsave_prefix_); SetConfInt("maxclients", maxclients_); SetConfInt("dump-expire", expire_dump_days_); @@ -470,27 +847,97 @@ int PikaConf::ConfigRewrite() { SetConfInt("slowlog-log-slower-than", slowlog_log_slower_than_.load()); SetConfInt("slowlog-max-len", slowlog_max_len_); SetConfStr("write-binlog", write_binlog_ ? "yes" : "no"); + SetConfStr("run-id", run_id_); + SetConfStr("replication-id", replication_id_); SetConfInt("max-cache-statistic-keys", max_cache_statistic_keys_); SetConfInt("small-compaction-threshold", small_compaction_threshold_); - SetConfInt("max-client-response-size", max_client_response_size_); + SetConfInt("small-compaction-duration-threshold", small_compaction_duration_threshold_); + SetConfInt("max-client-response-size", static_cast(max_client_response_size_)); SetConfInt("db-sync-speed", db_sync_speed_); SetConfStr("compact-cron", compact_cron_); SetConfStr("compact-interval", compact_interval_); + SetConfInt("compact-every-num-of-files", compact_every_num_of_files_); + if (compact_every_num_of_files_ < 1) { + compact_every_num_of_files_ = 1; + } + SetConfInt("force-compact-file-age-seconds", force_compact_file_age_seconds_); + if (force_compact_file_age_seconds_ < 300) { + force_compact_file_age_seconds_ = 300; + } + SetConfInt("force-compact-min-delete-ratio", force_compact_min_delete_ratio_); + if (force_compact_min_delete_ratio_ < 5) { + force_compact_min_delete_ratio_ = 5; + } + SetConfInt("dont-compact-sst-created-in-seconds", dont_compact_sst_created_in_seconds_); + if (dont_compact_sst_created_in_seconds_ < 300) { + dont_compact_sst_created_in_seconds_ = 300; + } + SetConfInt("best-delete-min-ratio", best_delete_min_ratio_); + if (best_delete_min_ratio_ < 10) { + best_delete_min_ratio_ = 10; + } + + std::string cs_; + SetConfStr("compaction-strategy", cs_); + if (cs_ == "full-compact") { + compaction_strategy_ = FullCompact; + } else if (cs_ == "obd-compact") { + compaction_strategy_ = OldestOrBestDeleteRatioSstCompact; + } else { + compaction_strategy_ = NONE; + } + + SetConfStr("disable_auto_compactions", disable_auto_compactions_ ? "true" : "false"); + SetConfStr("cache-type", scachetype); + SetConfInt64("least-free-disk-resume-size", least_free_disk_to_resume_); + SetConfInt64("manually-resume-interval", resume_check_interval_); + SetConfDouble("min-check-resume-ratio", min_check_resume_ratio_); SetConfInt("slave-priority", slave_priority_); + SetConfInt("throttle-bytes-per-second", throttle_bytes_per_second_); + SetConfStr("internal-used-unfinished-full-sync", pstd::Set2String(internal_used_unfinished_full_sync_, ',')); + SetConfInt("max-rsync-parallel-num", max_rsync_parallel_num_); SetConfInt("sync-window-size", sync_window_size_.load()); + SetConfInt("consensus-level", consensus_level_.load()); + SetConfInt("replication-num", replication_num_.load()); + SetConfStr("slow-cmd-list", pstd::Set2String(slow_cmd_set_, ',')); + SetConfInt("max-conn-rbuf-size", max_conn_rbuf_size_.load()); + // options for storage engine + SetConfInt("max-cache-files", max_cache_files_); + SetConfInt("max-background-compactions", max_background_compactions_); + SetConfInt("max-background-jobs", max_background_jobs_); + SetConfInt64("rate-limiter-bandwidth", rate_limiter_bandwidth_); + SetConfInt64("delayed-write-rate", delayed_write_rate_); + SetConfInt64("max-compaction-bytes", max_compaction_bytes_); + SetConfInt("max-write-buffer-num", max_write_buffer_num_); + SetConfInt64("write-buffer-size", write_buffer_size_); + SetConfInt("min-write-buffer-number-to-merge", min_write_buffer_number_to_merge_); + SetConfInt("level0-stop-writes-trigger", level0_stop_writes_trigger_); + SetConfInt("level0-slowdown-writes-trigger", level0_slowdown_writes_trigger_); + SetConfInt("level0-file-num-compaction-trigger", level0_file_num_compaction_trigger_); + SetConfInt64("arena-block-size", arena_block_size_); + SetConfStr("slotmigrate", slotmigrate_.load() ? "yes" : "no"); + SetConfInt64("slotmigrate-thread-num", slotmigrate_thread_num_); + SetConfInt64("thread-migrate-keys-num", thread_migrate_keys_num_); + SetConfStr("enable-db-statistics", enable_db_statistics_ ? "yes" : "no"); + SetConfInt("db-statistics-level", db_statistics_level_); // slaveof config item is special SetConfStr("slaveof", slaveof_); + // cache config + SetConfStr("cache-index-and-filter-blocks", cache_index_and_filter_blocks_ ? "yes" : "no"); + SetConfInt("cache-model", cache_mode_); + SetConfInt("zset-cache-start-direction", zset_cache_start_direction_); + SetConfInt("zset_cache_field_num_per_key", zset_cache_field_num_per_key_); if (!diff_commands_.empty()) { - std::vector filtered_items; + std::vector filtered_items; for (const auto& diff_command : diff_commands_) { if (!diff_command.second.empty()) { - slash::BaseConf::Rep::ConfItem item(slash::BaseConf::Rep::kConf, diff_command.first, diff_command.second); + pstd::BaseConf::Rep::ConfItem item(pstd::BaseConf::Rep::kConf, diff_command.first, diff_command.second); filtered_items.push_back(item); } } if (!filtered_items.empty()) { - slash::BaseConf::Rep::ConfItem comment_item(slash::BaseConf::Rep::kComment, "# Generated by CONFIG REWRITE\n"); + pstd::BaseConf::Rep::ConfItem comment_item(pstd::BaseConf::Rep::kComment, "# Generated by CONFIG REWRITE\n"); PushConfItem(comment_item); for (const auto& item : filtered_items) { PushConfItem(item); @@ -498,5 +945,63 @@ int PikaConf::ConfigRewrite() { } diff_commands_.clear(); } - return WriteBack(); + return static_cast(WriteBack()); +} + +int PikaConf::ConfigRewriteReplicationID() { + std::lock_guard l(rwlock_); + SetConfStr("replication-id", replication_id_); + SetConfStr("internal-used-unfinished-full-sync", pstd::Set2String(internal_used_unfinished_full_sync_, ',')); + if (!diff_commands_.empty()) { + std::vector filtered_items; + for (const auto& diff_command : diff_commands_) { + if (!diff_command.second.empty()) { + pstd::BaseConf::Rep::ConfItem item(pstd::BaseConf::Rep::kConf, diff_command.first, diff_command.second); + filtered_items.push_back(item); + } + } + if (!filtered_items.empty()) { + pstd::BaseConf::Rep::ConfItem comment_item(pstd::BaseConf::Rep::kComment, + "# Generated by ReplicationID CONFIG REWRITE\n"); + PushConfItem(comment_item); + for (const auto& item : filtered_items) { + PushConfItem(item); + } + } + diff_commands_.clear(); + } + return static_cast(WriteBack()); +} + +rocksdb::CompressionType PikaConf::GetCompression(const std::string& value) { + if (value == "snappy") { + return rocksdb::CompressionType::kSnappyCompression; + } else if (value == "zlib") { + return rocksdb::CompressionType::kZlibCompression; + } else if (value == "lz4") { + return rocksdb::CompressionType::kLZ4Compression; + } else if (value == "zstd") { + return rocksdb::CompressionType::kZSTD; + } + return rocksdb::CompressionType::kNoCompression; +} + +std::vector PikaConf::compression_per_level() { + std::shared_lock l(rwlock_); + std::vector types; + if (compression_per_level_.empty()) { + return types; + } + auto left = compression_per_level_.find_first_of('['); + auto right = compression_per_level_.find_first_of(']'); + + if (left == std::string::npos || right == std::string::npos || right <= left + 1) { + return types; + } + std::vector strings; + pstd::StringSplit(compression_per_level_.substr(left + 1, right - left - 1), ':', strings); + for (const auto& item : strings) { + types.push_back(GetCompression(pstd::StringTrim(item))); + } + return types; } diff --git a/tools/pika_migrate/src/pika_consensus.cc b/tools/pika_migrate/src/pika_consensus.cc new file mode 100644 index 0000000000..89f10e0317 --- /dev/null +++ b/tools/pika_migrate/src/pika_consensus.cc @@ -0,0 +1,783 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include + +#include "include/pika_consensus.h" + +#include "include/pika_client_conn.h" +#include "include/pika_cmd_table_manager.h" +#include "include/pika_conf.h" +#include "include/pika_rm.h" +#include "include/pika_server.h" + +using pstd::Status; + +extern PikaServer* g_pika_server; +extern std::unique_ptr g_pika_conf; +extern std::unique_ptr g_pika_rm; +extern std::unique_ptr g_pika_cmd_table_manager; + +/* Context */ + +Context::Context(std::string path) : path_(std::move(path)) {} + +Status Context::StableSave() { + char* p = save_->GetData(); + memcpy(p, &(applied_index_.b_offset.filenum), sizeof(uint32_t)); + p += 4; + memcpy(p, &(applied_index_.b_offset.offset), sizeof(uint64_t)); + p += 8; + memcpy(p, &(applied_index_.l_offset.term), sizeof(uint32_t)); + p += 4; + memcpy(p, &(applied_index_.l_offset.index), sizeof(uint64_t)); + return Status::OK(); +} + +Status Context::Init() { + if (!pstd::FileExists(path_)) { + Status s = pstd::NewRWFile(path_, save_); + if (!s.ok()) { + LOG(FATAL) << "Context new file failed " << s.ToString(); + } + StableSave(); + } else { + std::unique_ptr tmp_file; + Status s = pstd::NewRWFile(path_, tmp_file); + save_.reset(tmp_file.release()); + if (!s.ok()) { + LOG(FATAL) << "Context new file failed " << s.ToString(); + } + } + if (save_->GetData()) { + memcpy(reinterpret_cast(&(applied_index_.b_offset.filenum)), save_->GetData(), sizeof(uint32_t)); + memcpy(reinterpret_cast(&(applied_index_.b_offset.offset)), save_->GetData() + 4, sizeof(uint64_t)); + memcpy(reinterpret_cast(&(applied_index_.l_offset.term)), save_->GetData() + 12, sizeof(uint32_t)); + memcpy(reinterpret_cast(&(applied_index_.l_offset.index)), save_->GetData() + 16, sizeof(uint64_t)); + return Status::OK(); + } else { + return Status::Corruption("Context init error"); + } +} + +void Context::UpdateAppliedIndex(const LogOffset& offset) { + std::lock_guard l(rwlock_); + LogOffset cur_offset; + applied_win_.Update(SyncWinItem(offset), SyncWinItem(offset), &cur_offset); + if (cur_offset > applied_index_) { + applied_index_ = cur_offset; + StableSave(); + } +} + +void Context::Reset(const LogOffset& offset) { + std::lock_guard l(rwlock_); + applied_index_ = offset; + applied_win_.Reset(); + StableSave(); +} + +/* SyncProgress */ + +std::string MakeSlaveKey(const std::string& ip, int port) { + return ip + ":" + std::to_string(port); +} + +std::shared_ptr SyncProgress::GetSlaveNode(const std::string& ip, int port) { + std::string slave_key = MakeSlaveKey(ip, port); + std::shared_lock l(rwlock_); + if (slaves_.find(slave_key) == slaves_.end()) { + return nullptr; + } + return slaves_[slave_key]; +} + +std::unordered_map> SyncProgress::GetAllSlaveNodes() { + std::shared_lock l(rwlock_); + return slaves_; +} + +Status SyncProgress::AddSlaveNode(const std::string& ip, int port, const std::string& db_name, int session_id) { + std::string slave_key = MakeSlaveKey(ip, port); + std::shared_ptr exist_ptr = GetSlaveNode(ip, port); + if (exist_ptr) { + LOG(WARNING) << "SlaveNode " << exist_ptr->ToString() << " already exist, set new session " << session_id; + exist_ptr->SetSessionId(session_id); + return Status::OK(); + } + std::shared_ptr slave_ptr = std::make_shared(ip, port, db_name, session_id); + slave_ptr->SetLastSendTime(pstd::NowMicros()); + slave_ptr->SetLastRecvTime(pstd::NowMicros()); + + { + std::lock_guard l(rwlock_); + slaves_[slave_key] = slave_ptr; + // add slave to match_index + match_index_[slave_key] = LogOffset(); + } + return Status::OK(); +} + +Status SyncProgress::RemoveSlaveNode(const std::string& ip, int port) { + std::string slave_key = MakeSlaveKey(ip, port); + { + std::lock_guard l(rwlock_); + slaves_.erase(slave_key); + // remove slave to match_index + match_index_.erase(slave_key); + } + return Status::OK(); +} + +Status SyncProgress::Update(const std::string& ip, int port, const LogOffset& start, const LogOffset& end, + LogOffset* committed_index) { + std::shared_ptr slave_ptr = GetSlaveNode(ip, port); + if (!slave_ptr) { + return Status::NotFound("ip " + ip + " port " + std::to_string(port)); + } + + LogOffset acked_offset; + { + // update slave_ptr + std::lock_guard l(slave_ptr->slave_mu); + Status s = slave_ptr->Update(start, end, &acked_offset); + if (!s.ok()) { + return s; + } + // update match_index_ + // shared slave_ptr->slave_mu + match_index_[ip + std::to_string(port)] = acked_offset; + } + + return Status::OK(); +} + +int SyncProgress::SlaveSize() { + std::shared_lock l(rwlock_); + return static_cast(slaves_.size()); +} + +/* MemLog */ + +MemLog::MemLog() = default; + +int MemLog::Size() { return static_cast(logs_.size()); } + +// keep mem_log [mem_log.begin, offset] +Status MemLog::TruncateTo(const LogOffset& offset) { + std::lock_guard l_logs(logs_mu_); + int index = InternalFindLogByBinlogOffset(offset); + if (index < 0) { + return Status::Corruption("Cant find correct index"); + } + last_offset_ = logs_[index].offset; + logs_.erase(logs_.begin() + index + 1, logs_.end()); + return Status::OK(); +} + +void MemLog::Reset(const LogOffset& offset) { + std::lock_guard l_logs(logs_mu_); + logs_.erase(logs_.begin(), logs_.end()); + last_offset_ = offset; +} + +bool MemLog::FindLogItem(const LogOffset& offset, LogOffset* found_offset) { + std::lock_guard l_logs(logs_mu_); + int index = InternalFindLogByLogicIndex(offset); + if (index < 0) { + return false; + } + *found_offset = logs_[index].offset; + return true; +} + +int MemLog::InternalFindLogByLogicIndex(const LogOffset& offset) { + for (size_t i = 0; i < logs_.size(); ++i) { + if (logs_[i].offset.l_offset.index > offset.l_offset.index) { + return -1; + } + if (logs_[i].offset.l_offset.index == offset.l_offset.index) { + return static_cast(i); + } + } + return -1; +} + +int MemLog::InternalFindLogByBinlogOffset(const LogOffset& offset) { + for (size_t i = 0; i < logs_.size(); ++i) { + if (logs_[i].offset > offset) { + return -1; + } + if (logs_[i].offset == offset) { + return static_cast(i); + } + } + return -1; +} + +/* ConsensusCoordinator */ + +ConsensusCoordinator::ConsensusCoordinator(const std::string& db_name) + : db_name_(db_name) { + std::string db_log_path = g_pika_conf->log_path() + "log_" + db_name + "/"; + std::string log_path = db_log_path; + context_ = std::make_shared(log_path + kContext); + stable_logger_ = std::make_shared(db_name, log_path); + mem_logger_ = std::make_shared(); +} + +ConsensusCoordinator::~ConsensusCoordinator() = default; + +// since it is invoked in constructor all locks not hold +void ConsensusCoordinator::Init() { + // load committed_index_ & applied_index + context_->Init(); + committed_index_ = context_->applied_index_; + + // load term_ + term_ = stable_logger_->Logger()->term(); + + LOG(INFO) << DBInfo(db_name_).ToString() << "Restore applied index " + << context_->applied_index_.ToString() << " current term " << term_; + if (committed_index_ == LogOffset()) { + return; + } + // load mem_logger_ + mem_logger_->SetLastOffset(committed_index_); + net::RedisParserSettings settings; + settings.DealMessage = &(ConsensusCoordinator::InitCmd); + net::RedisParser redis_parser; + redis_parser.RedisParserInit(REDIS_PARSER_REQUEST, settings); + PikaBinlogReader binlog_reader; + int res = + binlog_reader.Seek(stable_logger_->Logger(), committed_index_.b_offset.filenum, committed_index_.b_offset.offset); + if (res != 0) { + LOG(FATAL) << DBInfo(db_name_).ToString() << "Binlog reader init failed"; + } + + while (true) { + LogOffset offset; + std::string binlog; + Status s = binlog_reader.Get(&binlog, &(offset.b_offset.filenum), &(offset.b_offset.offset)); + if (s.IsEndFile()) { + break; + } else if (s.IsCorruption() || s.IsIOError()) { + LOG(FATAL) << DBInfo(db_name_).ToString() << "Read Binlog error"; + } + BinlogItem item; + if (!PikaBinlogTransverter::BinlogItemWithoutContentDecode(TypeFirst, binlog, &item)) { + LOG(FATAL) << DBInfo(db_name_).ToString() << "Binlog item decode failed"; + } + offset.l_offset.term = item.term_id(); + offset.l_offset.index = item.logic_id(); + + redis_parser.data = static_cast(&db_name_); + const char* redis_parser_start = binlog.data() + BINLOG_ENCODE_LEN; + int redis_parser_len = static_cast(binlog.size()) - BINLOG_ENCODE_LEN; + int processed_len = 0; + net::RedisParserStatus ret = redis_parser.ProcessInputBuffer(redis_parser_start, redis_parser_len, &processed_len); + if (ret != net::kRedisParserDone) { + LOG(FATAL) << DBInfo(db_name_).ToString() << "Redis parser parse failed"; + return; + } + auto arg = static_cast(redis_parser.data); + std::shared_ptr cmd_ptr = arg->cmd_ptr; + delete arg; + redis_parser.data = nullptr; + + mem_logger_->AppendLog(MemLog::LogItem(offset, cmd_ptr, nullptr, nullptr)); + } +} + +Status ConsensusCoordinator::Reset(const LogOffset& offset) { + context_->Reset(offset); + { + std::lock_guard l(index_mu_); + committed_index_ = offset; + } + + UpdateTerm(offset.l_offset.term); + Status s = stable_logger_->Logger()->SetProducerStatus(offset.b_offset.filenum, offset.b_offset.offset, + offset.l_offset.term, offset.l_offset.index); + if (!s.ok()) { + LOG(WARNING) << DBInfo(db_name_).ToString() << "Consensus reset status failed " + << s.ToString(); + return s; + } + + stable_logger_->SetFirstOffset(offset); + + stable_logger_->Logger()->Lock(); + mem_logger_->Reset(offset); + stable_logger_->Logger()->Unlock(); + return Status::OK(); +} + +Status ConsensusCoordinator::ProposeLog(const std::shared_ptr& cmd_ptr) { + std::vector keys = cmd_ptr->current_key(); + // slotkey shouldn't add binlog + if (cmd_ptr->name() == kCmdNameSAdd && !keys.empty() && + (keys[0].compare(0, SlotKeyPrefix.length(), SlotKeyPrefix) == 0 || keys[0].compare(0, SlotTagPrefix.length(), SlotTagPrefix) == 0)) { + return Status::OK(); + } + + // make sure stable log and mem log consistent + Status s = InternalAppendLog(cmd_ptr); + if (!s.ok()) { + return s; + } + + g_pika_server->SignalAuxiliary(); + return Status::OK(); +} + +Status ConsensusCoordinator::InternalAppendLog(const std::shared_ptr& cmd_ptr) { + return InternalAppendBinlog(cmd_ptr); +} + +// precheck if prev_offset match && drop this log if this log exist +Status ConsensusCoordinator::ProcessLeaderLog(const std::shared_ptr& cmd_ptr, const BinlogItem& attribute) { + LogOffset last_index = mem_logger_->last_offset(); + if (attribute.logic_id() < last_index.l_offset.index) { + LOG(WARNING) << DBInfo(db_name_).ToString() << "Drop log from leader logic_id " + << attribute.logic_id() << " cur last index " << last_index.l_offset.index; + return Status::OK(); + } + + auto opt = cmd_ptr->argv()[0]; + if (pstd::StringToLower(opt) != kCmdNameFlushdb) { + // apply binlog in sync way + Status s = InternalAppendLog(cmd_ptr); + // apply db in async way + InternalApplyFollower(cmd_ptr); + } else { + // this is a flushdb-binlog, both apply binlog and apply db are in sync way + // ensure all writeDB task that submitted before has finished before we exec this flushdb + int32_t wait_ms = 250; + while (g_pika_rm->GetUnfinishedAsyncWriteDBTaskCount(db_name_) > 0) { + std::this_thread::sleep_for(std::chrono::milliseconds(wait_ms)); + wait_ms *= 2; + wait_ms = wait_ms < 3000 ? wait_ms : 3000; + } + // apply flushdb-binlog in sync way + Status s = InternalAppendLog(cmd_ptr); + // applyDB in sync way + PikaReplBgWorker::WriteDBInSyncWay(cmd_ptr); + } + return Status::OK(); +} + +Status ConsensusCoordinator::UpdateSlave(const std::string& ip, int port, const LogOffset& start, + const LogOffset& end) { + LogOffset committed_index; + Status s = sync_pros_.Update(ip, port, start, end, &committed_index); + if (!s.ok()) { + return s; + } + + return Status::OK(); +} + +Status ConsensusCoordinator::InternalAppendBinlog(const std::shared_ptr& cmd_ptr) { + std::string content = cmd_ptr->ToRedisProtocol(); + Status s = stable_logger_->Logger()->Put(content); + if (!s.ok()) { + std::string db_name = cmd_ptr->db_name().empty() ? g_pika_conf->default_db() : cmd_ptr->db_name(); + std::shared_ptr db = g_pika_server->GetDB(db_name); + if (db) { + db->SetBinlogIoError(); + } + return s; + } + return stable_logger_->Logger()->IsOpened(); +} + +Status ConsensusCoordinator::AddSlaveNode(const std::string& ip, int port, int session_id) { + Status s = sync_pros_.AddSlaveNode(ip, port, db_name_, session_id); + if (!s.ok()) { + return s; + } + return Status::OK(); +} + +Status ConsensusCoordinator::RemoveSlaveNode(const std::string& ip, int port) { + Status s = sync_pros_.RemoveSlaveNode(ip, port); + if (!s.ok()) { + return s; + } + return Status::OK(); +} + +void ConsensusCoordinator::UpdateTerm(uint32_t term) { + stable_logger_->Logger()->Lock(); + std::lock_guard l(term_rwlock_); + term_ = term; + stable_logger_->Logger()->SetTerm(term); + stable_logger_->Logger()->Unlock(); +} + +uint32_t ConsensusCoordinator::term() { + std::shared_lock l(term_rwlock_); + return term_; +} + +void ConsensusCoordinator::InternalApplyFollower(const std::shared_ptr& cmd_ptr) { + g_pika_rm->ScheduleWriteDBTask(cmd_ptr, db_name_); +} + +int ConsensusCoordinator::InitCmd(net::RedisParser* parser, const net::RedisCmdArgsType& argv) { + auto db_name = static_cast(parser->data); + std::string opt = argv[0]; + std::shared_ptr c_ptr = g_pika_cmd_table_manager->GetCmd(pstd::StringToLower(opt)); + if (!c_ptr) { + LOG(WARNING) << "Command " << opt << " not in the command table"; + return -1; + } + // Initial + c_ptr->Initial(argv, *db_name); + if (!c_ptr->res().ok()) { + LOG(WARNING) << "Fail to initial command from binlog: " << opt; + return -1; + } + parser->data = static_cast(new CmdPtrArg(c_ptr)); + return 0; +} + +Status ConsensusCoordinator::TruncateTo(const LogOffset& offset) { + LOG(INFO) << DBInfo(db_name_).ToString() << "Truncate to " << offset.ToString(); + LogOffset founded_offset; + Status s = FindLogicOffset(offset.b_offset, offset.l_offset.index, &founded_offset); + if (!s.ok()) { + return s; + } + LOG(INFO) << DBInfo(db_name_).ToString() << " Founded truncate pos " + << founded_offset.ToString(); + LogOffset committed = committed_index(); + stable_logger_->Logger()->Lock(); + if (founded_offset.l_offset.index == committed.l_offset.index) { + mem_logger_->Reset(committed); + } else { + Status s = mem_logger_->TruncateTo(founded_offset); + if (!s.ok()) { + stable_logger_->Logger()->Unlock(); + return s; + } + } + s = stable_logger_->TruncateTo(founded_offset); + if (!s.ok()) { + stable_logger_->Logger()->Unlock(); + return s; + } + stable_logger_->Logger()->Unlock(); + return Status::OK(); +} + +Status ConsensusCoordinator::GetBinlogOffset(const BinlogOffset& start_offset, LogOffset* log_offset) { + PikaBinlogReader binlog_reader; + int res = binlog_reader.Seek(stable_logger_->Logger(), start_offset.filenum, start_offset.offset); + if (res != 0) { + return Status::Corruption("Binlog reader init failed"); + } + std::string binlog; + BinlogOffset offset; + Status s = binlog_reader.Get(&binlog, &(offset.filenum), &(offset.offset)); + if (!s.ok()) { + return Status::Corruption("Binlog reader get failed"); + } + BinlogItem item; + if (!PikaBinlogTransverter::BinlogItemWithoutContentDecode(TypeFirst, binlog, &item)) { + return Status::Corruption("Binlog item decode failed"); + } + log_offset->b_offset = offset; + log_offset->l_offset.term = item.term_id(); + log_offset->l_offset.index = item.logic_id(); + return Status::OK(); +} + +// get binlog offset range [start_offset, end_offset] +// start_offset 0,0 end_offset 1,129, result will include binlog (1,129) +// start_offset 0,0 end_offset 1,0, result will NOT include binlog (1,xxx) +// start_offset 0,0 end_offset 0,0, resulet will NOT include binlog(0,xxx) +Status ConsensusCoordinator::GetBinlogOffset(const BinlogOffset& start_offset, const BinlogOffset& end_offset, + std::vector* log_offset) { + PikaBinlogReader binlog_reader; + int res = binlog_reader.Seek(stable_logger_->Logger(), start_offset.filenum, start_offset.offset); + if (res != 0) { + return Status::Corruption("Binlog reader init failed"); + } + while (true) { + BinlogOffset b_offset; + std::string binlog; + Status s = binlog_reader.Get(&binlog, &(b_offset.filenum), &(b_offset.offset)); + if (s.IsEndFile()) { + return Status::OK(); + } else if (s.IsCorruption() || s.IsIOError()) { + return Status::Corruption("Read Binlog error"); + } + BinlogItem item; + if (!PikaBinlogTransverter::BinlogItemWithoutContentDecode(TypeFirst, binlog, &item)) { + return Status::Corruption("Binlog item decode failed"); + } + LogOffset offset; + offset.b_offset = b_offset; + offset.l_offset.term = item.term_id(); + offset.l_offset.index = item.logic_id(); + if (offset.b_offset > end_offset) { + return Status::OK(); + } + log_offset->push_back(offset); + } + return Status::OK(); +} + +Status ConsensusCoordinator::FindBinlogFileNum(const std::map& binlogs, uint64_t target_index, + uint32_t start_filenum, uint32_t* founded_filenum) { + // low boundary & high boundary + uint32_t lb_binlogs = binlogs.begin()->first; + uint32_t hb_binlogs = binlogs.rbegin()->first; + bool first_time_left = false; + bool first_time_right = false; + uint32_t filenum = start_filenum; + while (true) { + LogOffset first_offset; + Status s = GetBinlogOffset(BinlogOffset(filenum, 0), &first_offset); + if (!s.ok()) { + return s; + } + if (target_index < first_offset.l_offset.index) { + if (first_time_right) { + // last filenum + filenum = filenum - 1; + break; + } + // move left + first_time_left = true; + if (filenum == 0 || filenum - 1 < lb_binlogs) { + return Status::NotFound(std::to_string(target_index) + " hit low boundary"); + } + filenum = filenum - 1; + } else if (target_index > first_offset.l_offset.index) { + if (first_time_left) { + break; + } + // move right + first_time_right = true; + if (filenum + 1 > hb_binlogs) { + break; + } + filenum = filenum + 1; + } else { + break; + } + } + *founded_filenum = filenum; + return Status::OK(); +} + +Status ConsensusCoordinator::FindLogicOffsetBySearchingBinlog(const BinlogOffset& hint_offset, uint64_t target_index, + LogOffset* found_offset) { + LOG(INFO) << DBInfo(db_name_).ToString() << "FindLogicOffsetBySearchingBinlog hint offset " + << hint_offset.ToString() << " target_index " << target_index; + BinlogOffset start_offset; + std::map binlogs; + if (!stable_logger_->GetBinlogFiles(&binlogs)) { + return Status::Corruption("Get binlog files failed"); + } + if (binlogs.empty()) { + return Status::NotFound("Binlogs is empty"); + } + if (binlogs.find(hint_offset.filenum) == binlogs.end()) { + start_offset = BinlogOffset(binlogs.crbegin()->first, 0); + } else { + start_offset = hint_offset; + } + + uint32_t found_filenum; + Status s = FindBinlogFileNum(binlogs, target_index, start_offset.filenum, &found_filenum); + if (!s.ok()) { + return s; + } + + LOG(INFO) << DBInfo(db_name_).ToString() << "FindBinlogFilenum res " // NOLINT + << found_filenum; + BinlogOffset traversal_start(found_filenum, 0); + BinlogOffset traversal_end(found_filenum + 1, 0); + std::vector offsets; + s = GetBinlogOffset(traversal_start, traversal_end, &offsets); + if (!s.ok()) { + return s; + } + for (auto& offset : offsets) { + if (offset.l_offset.index == target_index) { + LOG(INFO) << DBInfo(db_name_).ToString() << "Founded " << target_index << " " + << offset.ToString(); + *found_offset = offset; + return Status::OK(); + } + } + return Status::NotFound("Logic index not found"); +} + +Status ConsensusCoordinator::FindLogicOffset(const BinlogOffset& start_offset, uint64_t target_index, + LogOffset* found_offset) { + LogOffset possible_offset; + Status s = GetBinlogOffset(start_offset, &possible_offset); + if (!s.ok() || possible_offset.l_offset.index != target_index) { + if (!s.ok()) { + LOG(INFO) << DBInfo(db_name_).ToString() << "GetBinlogOffset res: " << s.ToString(); + } else { + LOG(INFO) << DBInfo(db_name_).ToString() << "GetBInlogOffset res: " << s.ToString() + << " possible_offset " << possible_offset.ToString() << " target_index " << target_index; + } + return FindLogicOffsetBySearchingBinlog(start_offset, target_index, found_offset); + } + *found_offset = possible_offset; + return Status::OK(); +} + +Status ConsensusCoordinator::GetLogsBefore(const BinlogOffset& start_offset, std::vector* hints) { + BinlogOffset traversal_end = start_offset; + BinlogOffset traversal_start(traversal_end.filenum, 0); + traversal_start.filenum = traversal_start.filenum == 0 ? 0 : traversal_start.filenum - 1; + std::map binlogs; + if (!stable_logger_->GetBinlogFiles(&binlogs)) { + return Status::Corruption("Get binlog files failed"); + } + if (binlogs.find(traversal_start.filenum) == binlogs.end()) { + traversal_start.filenum = traversal_end.filenum; + } + std::vector res; + Status s = GetBinlogOffset(traversal_start, traversal_end, &res); + if (!s.ok()) { + return s; + } + if (res.size() > 100) { + res.assign(res.end() - 100, res.end()); + } + *hints = res; + return Status::OK(); +} + +Status ConsensusCoordinator::LeaderNegotiate(const LogOffset& f_last_offset, bool* reject, + std::vector* hints) { + uint64_t f_index = f_last_offset.l_offset.index; + LOG(INFO) << DBInfo(db_name_).ToString() << "LeaderNeotiate follower last offset " + << f_last_offset.ToString() << " first_offsert " << stable_logger_->first_offset().ToString() + << " last_offset " << mem_logger_->last_offset().ToString(); + *reject = true; + if (f_index > mem_logger_->last_offset().l_offset.index) { + // hints starts from last_offset() - 100; + Status s = GetLogsBefore(mem_logger_->last_offset().b_offset, hints); + if (!s.ok()) { + LOG(WARNING) << f_index << " is larger than last index " << mem_logger_->last_offset().ToString() + << " get logs before last index failed " << s.ToString(); + return s; + } + LOG(INFO) << DBInfo(db_name_).ToString() + << "follower index larger then last_offset index, get logs before " + << mem_logger_->last_offset().ToString(); + return Status::OK(); + } + if (f_index < stable_logger_->first_offset().l_offset.index) { + // need full sync + LOG(INFO) << DBInfo(db_name_).ToString() << f_index << " not found current first index" + << stable_logger_->first_offset().ToString(); + return Status::NotFound("logic index"); + } + if (f_last_offset.l_offset.index == 0) { + *reject = false; + return Status::OK(); + } + + LogOffset found_offset; + Status s = FindLogicOffset(f_last_offset.b_offset, f_index, &found_offset); + if (!s.ok()) { + if (s.IsNotFound()) { + LOG(INFO) << DBInfo(db_name_).ToString() << f_last_offset.ToString() << " not found " + << s.ToString(); + return s; + } else { + LOG(WARNING) << DBInfo(db_name_).ToString() << "find logic offset failed" + << s.ToString(); + return s; + } + } + + if (found_offset.l_offset.term != f_last_offset.l_offset.term || !(f_last_offset.b_offset == found_offset.b_offset)) { + Status s = GetLogsBefore(found_offset.b_offset, hints); + if (!s.ok()) { + LOG(WARNING) << DBInfo(db_name_).ToString() << "Try to get logs before " + << found_offset.ToString() << " failed"; + return s; + } + return Status::OK(); + } + + LOG(INFO) << DBInfo(db_name_).ToString() << "Found equal offset " << found_offset.ToString(); + *reject = false; + return Status::OK(); +} + +// memlog order: committed_index , [committed_index + 1, memlogger.end()] +Status ConsensusCoordinator::FollowerNegotiate(const std::vector& hints, LogOffset* reply_offset) { + if (hints.empty()) { + return Status::Corruption("hints empty"); + } + LOG(INFO) << DBInfo(db_name_).ToString() << "FollowerNegotiate from " << hints[0].ToString() + << " to " << hints[hints.size() - 1].ToString(); + if (mem_logger_->last_offset().l_offset.index < hints[0].l_offset.index) { + *reply_offset = mem_logger_->last_offset(); + return Status::OK(); + } + if (committed_index().l_offset.index > hints[hints.size() - 1].l_offset.index) { + return Status::Corruption("invalid hints all smaller than committed_index"); + } + if (mem_logger_->last_offset().l_offset.index > hints[hints.size() - 1].l_offset.index) { + const auto &truncate_offset = hints[hints.size() - 1]; + // trunck to hints end + Status s = TruncateTo(truncate_offset); + if (!s.ok()) { + return s; + } + } + + LogOffset committed = committed_index(); + for (size_t i = hints.size() - 1; i >= 0; i--) { + if (hints[i].l_offset.index < committed.l_offset.index) { + return Status::Corruption("hints less than committed index"); + } + if (hints[i].l_offset.index == committed.l_offset.index) { + if (hints[i].l_offset.term == committed.l_offset.term) { + Status s = TruncateTo(hints[i]); + if (!s.ok()) { + return s; + } + *reply_offset = mem_logger_->last_offset(); + return Status::OK(); + } + } + LogOffset found_offset; + bool res = mem_logger_->FindLogItem(hints[i], &found_offset); + if (!res) { + return Status::Corruption("hints not found " + hints[i].ToString()); + } + if (found_offset.l_offset.term == hints[i].l_offset.term) { + // trunk to found_offsett + Status s = TruncateTo(found_offset); + if (!s.ok()) { + return s; + } + *reply_offset = mem_logger_->last_offset(); + return Status::OK(); + } + } + + Status s = TruncateTo(hints[0]); + if (!s.ok()) { + return s; + } + *reply_offset = mem_logger_->last_offset(); + return Status::OK(); +} diff --git a/tools/pika_migrate/src/pika_data_distribution.cc b/tools/pika_migrate/src/pika_data_distribution.cc index e5e55dc51a..49d6af125e 100644 --- a/tools/pika_migrate/src/pika_data_distribution.cc +++ b/tools/pika_migrate/src/pika_data_distribution.cc @@ -5,44 +5,7 @@ #include "include/pika_data_distribution.h" -void HashModulo::Init() { -} +void HashModulo::Init() {} -uint32_t HashModulo::Distribute(const std::string& str, uint32_t partition_num) { - return std::hash()(str) % partition_num; -} -void Crc32::Init() { - Crc32TableInit(IEEE_POLY); -} -void Crc32::Crc32TableInit(uint32_t poly) { - int i, j; - for (i = 0; i < 256; i ++) { - uint32_t crc = i; - for (j = 0; j < 8; j ++) { - if (crc & 1) { - crc = (crc >> 1) ^ poly; - } else { - crc = (crc >> 1); - } - } - crc32tab[i] = crc; - } -} - -uint32_t Crc32::Distribute(const std::string &str, uint32_t partition_num) { - uint32_t crc = Crc32Update(0, str.data(), (int)str.size()); - // partition_num need to minus 1 - assert(partition_num > 1); - return (int)(crc & (partition_num == 0 ? 0 : (partition_num - 1))); -} - -uint32_t Crc32::Crc32Update(uint32_t crc, const char* buf, int len) { - int i; - crc = ~crc; - for (i = 0; i < len; i ++) { - crc = crc32tab[(uint8_t)((char)crc ^ buf[i])] ^ (crc >> 8); - } - return ~crc; -} diff --git a/tools/pika_migrate/src/pika_db.cc b/tools/pika_migrate/src/pika_db.cc new file mode 100644 index 0000000000..6830b63571 --- /dev/null +++ b/tools/pika_migrate/src/pika_db.cc @@ -0,0 +1,643 @@ +// Copyright (c) 2018-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include + +#include "include/pika_db.h" + +#include "include/pika_cmd_table_manager.h" +#include "include/pika_rm.h" +#include "include/pika_server.h" +#include "mutex_impl.h" + +using pstd::Status; +extern PikaServer* g_pika_server; +extern std::unique_ptr g_pika_rm; +extern std::unique_ptr g_pika_cmd_table_manager; + +std::string DBPath(const std::string& path, const std::string& db_name) { + char buf[100]; + snprintf(buf, sizeof(buf), "%s/", db_name.data()); + return path + buf; +} + +std::string DbSyncPath(const std::string& sync_path, const std::string& db_name) { + char buf[256]; + snprintf(buf, sizeof(buf), "%s/", db_name.data()); + return sync_path + buf; +} + +DB::DB(std::string db_name, const std::string& db_path, + const std::string& log_path) + : db_name_(db_name), bgsave_engine_(nullptr) { + db_path_ = DBPath(db_path, db_name_); + bgsave_sub_path_ = db_name; + dbsync_path_ = DbSyncPath(g_pika_conf->db_sync_path(), db_name); + log_path_ = DBPath(log_path, "log_" + db_name_); + storage_ = std::make_shared(g_pika_conf->db_instance_num(), + g_pika_conf->default_slot_num(), g_pika_conf->classic_mode()); + rocksdb::Status s = storage_->Open(g_pika_server->storage_options(), db_path_); + pstd::CreatePath(db_path_); + pstd::CreatePath(log_path_); + lock_mgr_ = std::make_shared(1000, 0, std::make_shared()); + binlog_io_error_.store(false); + opened_ = s.ok(); + assert(storage_); + assert(s.ok()); + LOG(INFO) << db_name_ << " DB Success"; +} + +DB::~DB() { + StopKeyScan(); +} + +bool DB::WashData() { + rocksdb::ReadOptions read_options; + rocksdb::Status s; + auto suffix_len = storage::ParsedBaseDataValue::GetkBaseDataValueSuffixLength(); + for (int i = 0; i < g_pika_conf->db_instance_num(); i++) { + rocksdb::WriteBatch batch; + auto handle = storage_->GetHashCFHandles(i)[1]; + auto db = storage_->GetDBByIndex(i); + auto it(db->NewIterator(read_options, handle)); + for (it->SeekToFirst(); it->Valid(); it->Next()) { + std::string key = it->key().ToString(); + std::string value = it->value().ToString(); + if (value.size() < suffix_len) { + // need to wash + storage::BaseDataValue internal_value(value); + batch.Put(handle, key, internal_value.Encode()); + } + } + delete it; + s = db->Write(storage_->GetDefaultWriteOptions(i), &batch); + if (!s.ok()) { + return false; + } + } + return true; +} + +std::string DB::GetDBName() { return db_name_; } + +void DB::BgSaveDB() { + std::shared_lock l(dbs_rw_); + std::lock_guard ml(bgsave_protector_); + if (bgsave_info_.bgsaving) { + return; + } + bgsave_info_.bgsaving = true; + auto bg_task_arg = new BgTaskArg(); + bg_task_arg->db = shared_from_this(); + g_pika_server->BGSaveTaskSchedule(&DoBgSave, static_cast(bg_task_arg)); +} + +void DB::SetBinlogIoError() { return binlog_io_error_.store(true); } +void DB::SetBinlogIoErrorrelieve() { return binlog_io_error_.store(false); } +bool DB::IsBinlogIoError() { return binlog_io_error_.load(); } +std::shared_ptr DB::LockMgr() { return lock_mgr_; } +std::shared_ptr DB::cache() const { return cache_; } +std::shared_ptr DB::storage() const { return storage_; } + +void DB::KeyScan() { + std::lock_guard ml(key_scan_protector_); + if (key_scan_info_.key_scaning_) { + return; + } + + key_scan_info_.duration = -2; // duration -2 mean the task in waiting status, + // has not been scheduled for exec + auto bg_task_arg = new BgTaskArg(); + bg_task_arg->db = shared_from_this(); + g_pika_server->KeyScanTaskSchedule(&DoKeyScan, reinterpret_cast(bg_task_arg)); +} + +bool DB::IsKeyScaning() { + std::lock_guard ml(key_scan_protector_); + return key_scan_info_.key_scaning_; +} + +void DB::RunKeyScan() { + Status s; + std::vector new_key_infos; + + InitKeyScan(); + std::shared_lock l(dbs_rw_); + s = GetKeyNum(&new_key_infos); + key_scan_info_.duration = static_cast(time(nullptr) - key_scan_info_.start_time); + + std::lock_guard lm(key_scan_protector_); + if (s.ok()) { + key_scan_info_.key_infos = new_key_infos; + } + key_scan_info_.key_scaning_ = false; +} + +Status DB::GetKeyNum(std::vector* key_info) { + std::lock_guard l(key_info_protector_); + if (key_scan_info_.key_scaning_) { + *key_info = key_scan_info_.key_infos; + return Status::OK(); + } + InitKeyScan(); + key_scan_info_.key_scaning_ = true; + key_scan_info_.duration = -2; // duration -2 mean the task in waiting status, + // has not been scheduled for exec + rocksdb::Status s = storage_->GetKeyNum(key_info); + key_scan_info_.key_scaning_ = false; + if (!s.ok()) { + return Status::Corruption(s.ToString()); + } + key_scan_info_.key_infos = *key_info; + key_scan_info_.duration = static_cast(time(nullptr) - key_scan_info_.start_time); + return Status::OK(); +} + +void DB::StopKeyScan() { + std::shared_lock rwl(dbs_rw_); + std::lock_guard ml(key_scan_protector_); + + if (!key_scan_info_.key_scaning_) { + return; + } + storage_->StopScanKeyNum(); + key_scan_info_.key_scaning_ = false; +} + +void DB::ScanDatabase(const storage::DataType& type) { + std::shared_lock l(dbs_rw_); + storage_->ScanDatabase(type); +} + +KeyScanInfo DB::GetKeyScanInfo() { + std::lock_guard lm(key_scan_protector_); + return key_scan_info_; +} + +void DB::Compact(const storage::DataType& type) { + std::lock_guard rwl(dbs_rw_); + if (!opened_) { + return; + } + storage_->Compact(type); +} + +void DB::CompactRange(const storage::DataType& type, const std::string& start, const std::string& end) { + std::lock_guard rwl(dbs_rw_); + if (!opened_) { + return; + } + storage_->CompactRange(type, start, end); +} + +void DB::LongestNotCompactionSstCompact(const storage::DataType& type) { + std::lock_guard rwl(dbs_rw_); + if (!opened_) { + return; + } + storage_->LongestNotCompactionSstCompact(type); +} + +void DB::DoKeyScan(void* arg) { + std::unique_ptr bg_task_arg(static_cast(arg)); + bg_task_arg->db->RunKeyScan(); +} + +void DB::InitKeyScan() { + key_scan_info_.start_time = time(nullptr); + char s_time[32]; + size_t len = strftime(s_time, sizeof(s_time), "%Y-%m-%d %H:%M:%S", localtime(&key_scan_info_.start_time)); + key_scan_info_.s_start_time.assign(s_time, len); + key_scan_info_.duration = -1; // duration -1 mean the task in processing +} + +void DB::SetCompactRangeOptions(const bool is_canceled) { + if (!opened_) { + return; + } + storage_->SetCompactRangeOptions(is_canceled); +} + +DisplayCacheInfo DB::GetCacheInfo() { + std::lock_guard l(cache_info_rwlock_); + return cache_info_; +} + +bool DB::FlushDBWithoutLock() { + std::lock_guard l(bgsave_protector_); + if (bgsave_info_.bgsaving) { + return false; + } + + LOG(INFO) << db_name_ << " Delete old db..."; + storage_.reset(); + + std::string dbpath = db_path_; + if (dbpath[dbpath.length() - 1] == '/') { + dbpath.erase(dbpath.length() - 1); + } + std::string delete_suffix("_deleting_"); + delete_suffix.append(std::to_string(NowMicros())); + delete_suffix.append("/"); + dbpath.append(delete_suffix); + auto rename_success = pstd::RenameFile(db_path_, dbpath); + storage_ = std::make_shared(g_pika_conf->db_instance_num(), + g_pika_conf->default_slot_num(), g_pika_conf->classic_mode()); + rocksdb::Status s = storage_->Open(g_pika_server->storage_options(), db_path_); + assert(storage_); + assert(s.ok()); + if (rename_success == -1) { + //the storage_->Open actually opened old RocksDB instance, so flushdb failed + LOG(WARNING) << db_name_ << " FlushDB failed due to rename old db_path_ failed"; + return false; + } + LOG(INFO) << db_name_ << " Open new db success"; + + g_pika_server->PurgeDir(dbpath); + return true; +} + +void DB::DoBgSave(void* arg) { + std::unique_ptr bg_task_arg(static_cast(arg)); + + // Do BgSave + bool success = bg_task_arg->db->RunBgsaveEngine(); + + // Some output + BgSaveInfo info = bg_task_arg->db->bgsave_info(); + std::stringstream info_content; + std::ofstream out; + out.open(info.path + "/" + kBgsaveInfoFile, std::ios::in | std::ios::trunc); + if (out.is_open()) { + info_content << (time(nullptr) - info.start_time) << "s\n" + << g_pika_server->host() << "\n" + << g_pika_server->port() << "\n" + << info.offset.b_offset.filenum << "\n" + << info.offset.b_offset.offset << "\n"; + bg_task_arg->db->snapshot_uuid_ = md5(info_content.str()); + out << info_content.rdbuf(); + out.close(); + } + if (!success) { + std::string fail_path = info.path + "_FAILED"; + pstd::RenameFile(info.path, fail_path); + } + bg_task_arg->db->FinishBgsave(); +} + +bool DB::RunBgsaveEngine() { + // Prepare for Bgsaving + if (!InitBgsaveEnv() || !InitBgsaveEngine()) { + ClearBgsave(); + return false; + } + LOG(INFO) << db_name_ << " after prepare bgsave"; + + BgSaveInfo info = bgsave_info(); + LOG(INFO) << db_name_ << " bgsave_info: path=" << info.path << ", filenum=" << info.offset.b_offset.filenum + << ", offset=" << info.offset.b_offset.offset; + + // Backup to tmp dir + rocksdb::Status s = bgsave_engine_->CreateNewBackup(info.path); + + if (!s.ok()) { + LOG(WARNING) << db_name_ << " create new backup failed :" << s.ToString(); + return false; + } + LOG(INFO) << db_name_ << " create new backup finished."; + + return true; +} + +BgSaveInfo DB::bgsave_info() { + std::lock_guard l(bgsave_protector_); + return bgsave_info_; +} + +void DB::FinishBgsave() { + std::lock_guard l(bgsave_protector_); + bgsave_info_.bgsaving = false; + g_pika_server->UpdateLastSave(time(nullptr)); +} + +// Prepare engine, need bgsave_protector protect +bool DB::InitBgsaveEnv() { + std::lock_guard l(bgsave_protector_); + // Prepare for bgsave dir + bgsave_info_.start_time = time(nullptr); + char s_time[32]; + int len = static_cast(strftime(s_time, sizeof(s_time), "%Y%m%d%H%M%S", localtime(&bgsave_info_.start_time))); + bgsave_info_.s_start_time.assign(s_time, len); + std::string time_sub_path = g_pika_conf->bgsave_prefix() + std::string(s_time, 8); + bgsave_info_.path = g_pika_conf->bgsave_path() + time_sub_path + "/" + bgsave_sub_path_; + if (!pstd::DeleteDirIfExist(bgsave_info_.path)) { + LOG(WARNING) << db_name_ << " remove exist bgsave dir failed"; + return false; + } + pstd::CreatePath(bgsave_info_.path, 0755); + // Prepare for failed dir + if (!pstd::DeleteDirIfExist(bgsave_info_.path + "_FAILED")) { + LOG(WARNING) << db_name_ << " remove exist fail bgsave dir failed :"; + return false; + } + return true; +} + +// Prepare bgsave env, need bgsave_protector protect +bool DB::InitBgsaveEngine() { + bgsave_engine_.reset(); + rocksdb::Status s = storage::BackupEngine::Open(storage().get(), bgsave_engine_, g_pika_conf->db_instance_num()); + if (!s.ok()) { + LOG(WARNING) << db_name_ << " open backup engine failed " << s.ToString(); + return false; + } + + std::shared_ptr db = + g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name_)); + if (!db) { + LOG(WARNING) << db_name_ << " not found"; + return false; + } + + { + std::lock_guard lock(dbs_rw_); + LogOffset bgsave_offset; + // term, index are 0 + db->Logger()->GetProducerStatus(&(bgsave_offset.b_offset.filenum), &(bgsave_offset.b_offset.offset)); + { + std::lock_guard l(bgsave_protector_); + bgsave_info_.offset = bgsave_offset; + } + s = bgsave_engine_->SetBackupContent(); + if (!s.ok()) { + LOG(WARNING) << db_name_ << " set backup content failed " << s.ToString(); + return false; + } + } + return true; +} + +void DB::Init() { + cache_ = std::make_shared(g_pika_conf->zset_cache_start_direction(), g_pika_conf->zset_cache_field_num_per_key()); + // Create cache + cache::CacheConfig cache_cfg; + g_pika_server->CacheConfigInit(cache_cfg); + cache_->Init(g_pika_conf->GetCacheNum(), &cache_cfg); +} + +void DB::GetBgSaveMetaData(std::vector* fileNames, std::string* snapshot_uuid) { + const std::string dbPath = bgsave_info().path; + + int db_instance_num = g_pika_conf->db_instance_num(); + for (int index = 0; index < db_instance_num; index++) { + std::string instPath = dbPath + ((dbPath.back() != '/') ? "/" : "") + std::to_string(index); + if (!pstd::FileExists(instPath)) { + continue ; + } + + std::vector tmpFileNames; + int ret = pstd::GetChildren(instPath, tmpFileNames); + if (ret) { + LOG(WARNING) << dbPath << " read dump meta files failed, path " << instPath; + return; + } + + for (const std::string fileName : tmpFileNames) { + fileNames -> push_back(std::to_string(index) + "/" + fileName); + } + } + fileNames->push_back(kBgsaveInfoFile); + pstd::Status s = GetBgSaveUUID(snapshot_uuid); + if (!s.ok()) { + LOG(WARNING) << "read dump meta info failed! error:" << s.ToString(); + return; + } +} + +Status DB::GetBgSaveUUID(std::string* snapshot_uuid) { + if (snapshot_uuid_.empty()) { + std::string info_data; + const std::string infoPath = bgsave_info().path + "/info"; + //TODO: using file read function to replace rocksdb::ReadFileToString + rocksdb::Status s = rocksdb::ReadFileToString(rocksdb::Env::Default(), infoPath, &info_data); + if (!s.ok()) { + LOG(WARNING) << "read dump meta info failed! error:" << s.ToString(); + return Status::IOError("read dump meta info failed", infoPath); + } + pstd::MD5 md5 = pstd::MD5(info_data); + snapshot_uuid_ = md5.hexdigest(); + } + *snapshot_uuid = snapshot_uuid_; + return Status::OK(); +} + +// Try to update master offset +// This may happend when dbsync from master finished +// Here we do: +// 1, Check dbsync finished, got the new binlog offset +// 2, Replace the old db +// 3, Update master offset, and the PikaAuxiliaryThread cron will connect and do slaveof task with master +bool DB::TryUpdateMasterOffset() { + std::shared_ptr slave_db = + g_pika_rm->GetSyncSlaveDBByName(DBInfo(db_name_)); + if (!slave_db) { + LOG(ERROR) << "Slave DB: " << db_name_ << " not exist"; + slave_db->SetReplState(ReplState::kError); + return false; + } + + std::string info_path = dbsync_path_ + kBgsaveInfoFile; + if (!pstd::FileExists(info_path)) { + LOG(WARNING) << "info path: " << info_path << " not exist, Slave DB:" << GetDBName() << " will restart the sync process..."; + // May failed in RsyncClient, thus the complete snapshot dir got deleted + slave_db->SetReplState(ReplState::kTryConnect); + return false; + } + + // Got new binlog offset + std::ifstream is(info_path); + if (!is) { + LOG(WARNING) << "DB: " << db_name_ << ", Failed to open info file after db sync"; + slave_db->SetReplState(ReplState::kError); + return false; + } + std::string line; + std::string master_ip; + int lineno = 0; + int64_t filenum = 0; + int64_t offset = 0; + int64_t term = 0; + int64_t index = 0; + int64_t tmp = 0; + int64_t master_port = 0; + while (std::getline(is, line)) { + lineno++; + if (lineno == 2) { + master_ip = line; + } else if (lineno > 2 && lineno < 8) { + if ((pstd::string2int(line.data(), line.size(), &tmp) == 0) || tmp < 0) { + LOG(WARNING) << "DB: " << db_name_ + << ", Format of info file after db sync error, line : " << line; + is.close(); + slave_db->SetReplState(ReplState::kError); + return false; + } + if (lineno == 3) { + master_port = tmp; + } else if (lineno == 4) { + filenum = tmp; + } else if (lineno == 5) { + offset = tmp; + } else if (lineno == 6) { + term = tmp; + } else if (lineno == 7) { + index = tmp; + } + } else if (lineno > 8) { + LOG(WARNING) << "DB: " << db_name_ << ", Format of info file after db sync error, line : " << line; + is.close(); + slave_db->SetReplState(ReplState::kError); + return false; + } + } + is.close(); + + LOG(INFO) << "DB: " << db_name_ << " Information from dbsync info" + << ", master_ip: " << master_ip << ", master_port: " << master_port << ", filenum: " << filenum + << ", offset: " << offset << ", term: " << term << ", index: " << index; + + // Retransmit Data to target redis + g_pika_server->RetransmitData(dbsync_path_); + + pstd::DeleteFile(info_path); + if (!ChangeDb(dbsync_path_)) { + LOG(WARNING) << "DB: " << db_name_ << ", Failed to change db"; + slave_db->SetReplState(ReplState::kError); + return false; + } + + // Update master offset + std::shared_ptr master_db = + g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name_)); + if (!master_db) { + LOG(WARNING) << "Master DB: " << db_name_ << " not exist"; + slave_db->SetReplState(ReplState::kError); + return false; + } + master_db->Logger()->SetProducerStatus(filenum, offset); + slave_db->SetReplState(ReplState::kTryConnect); + + //now full sync is finished, remove unfinished full sync count + g_pika_conf->RemoveInternalUsedUnfinishedFullSync(slave_db->DBName()); + + return true; +} + +void DB::PrepareRsync() { + pstd::DeleteDirIfExist(dbsync_path_); + int db_instance_num = g_pika_conf->db_instance_num(); + for (int index = 0; index < db_instance_num; index++) { + pstd::CreatePath(dbsync_path_ + std::to_string(index)); + } +} + +bool DB::IsBgSaving() { + std::lock_guard ml(bgsave_protector_); + return bgsave_info_.bgsaving; +} + +/* + * Change a new db locate in new_path + * return true when change success + * db remain the old one if return false + */ +bool DB::ChangeDb(const std::string& new_path) { + std::string tmp_path(db_path_); + if (tmp_path.back() == '/') { + tmp_path.resize(tmp_path.size() - 1); + } + tmp_path += "_bak"; + pstd::DeleteDirIfExist(tmp_path); + + std::lock_guard l(dbs_rw_); + LOG(INFO) << "DB: " << db_name_ << ", Prepare change db from: " << tmp_path; + storage_.reset(); + + if (0 != pstd::RenameFile(db_path_, tmp_path)) { + LOG(WARNING) << "DB: " << db_name_ + << ", Failed to rename db path when change db, error: " << strerror(errno); + return false; + } + + if (0 != pstd::RenameFile(new_path, db_path_)) { + LOG(WARNING) << "DB: " << db_name_ + << ", Failed to rename new db path when change db, error: " << strerror(errno); + return false; + } + + storage_ = std::make_shared(g_pika_conf->db_instance_num(), + g_pika_conf->default_slot_num(), g_pika_conf->classic_mode()); + rocksdb::Status s = storage_->Open(g_pika_server->storage_options(), db_path_); + assert(storage_); + assert(s.ok()); + pstd::DeleteDirIfExist(tmp_path); + LOG(INFO) << "DB: " << db_name_ << ", Change db success"; + return true; +} + +void DB::ClearBgsave() { + std::lock_guard l(bgsave_protector_); + bgsave_info_.Clear(); +} + +void DB::UpdateCacheInfo(CacheInfo& cache_info) { + std::unique_lock lock(cache_info_rwlock_); + + cache_info_.status = cache_info.status; + cache_info_.cache_num = cache_info.cache_num; + cache_info_.keys_num = cache_info.keys_num; + cache_info_.used_memory = cache_info.used_memory; + cache_info_.waitting_load_keys_num = cache_info.waitting_load_keys_num; + cache_usage_ = cache_info.used_memory; + + uint64_t all_cmds = cache_info.hits + cache_info.misses; + cache_info_.hitratio_all = (0 >= all_cmds) ? 0.0 : (cache_info.hits * 100.0) / all_cmds; + + uint64_t cur_time_us = pstd::NowMicros(); + uint64_t delta_time = cur_time_us - cache_info_.last_time_us + 1; + uint64_t delta_hits = cache_info.hits - cache_info_.hits; + cache_info_.hits_per_sec = delta_hits * 1000000 / delta_time; + + uint64_t delta_all_cmds = all_cmds - (cache_info_.hits + cache_info_.misses); + cache_info_.read_cmd_per_sec = delta_all_cmds * 1000000 / delta_time; + + cache_info_.hitratio_per_sec = (0 >= delta_all_cmds) ? 0.0 : (delta_hits * 100.0) / delta_all_cmds; + + uint64_t delta_load_keys = cache_info.async_load_keys_num - cache_info_.last_load_keys_num; + cache_info_.load_keys_per_sec = delta_load_keys * 1000000 / delta_time; + + cache_info_.hits = cache_info.hits; + cache_info_.misses = cache_info.misses; + cache_info_.last_time_us = cur_time_us; + cache_info_.last_load_keys_num = cache_info.async_load_keys_num; +} + +void DB::ResetDisplayCacheInfo(int status) { + std::unique_lock lock(cache_info_rwlock_); + cache_info_.status = status; + cache_info_.cache_num = 0; + cache_info_.keys_num = 0; + cache_info_.used_memory = 0; + cache_info_.hits = 0; + cache_info_.misses = 0; + cache_info_.hits_per_sec = 0; + cache_info_.read_cmd_per_sec = 0; + cache_info_.hitratio_per_sec = 0.0; + cache_info_.hitratio_all = 0.0; + cache_info_.load_keys_per_sec = 0; + cache_info_.waitting_load_keys_num = 0; + cache_usage_ = 0; +} diff --git a/tools/pika_migrate/src/pika_dispatch_thread.cc b/tools/pika_migrate/src/pika_dispatch_thread.cc index 676f34843e..bc892e23e4 100644 --- a/tools/pika_migrate/src/pika_dispatch_thread.cc +++ b/tools/pika_migrate/src/pika_dispatch_thread.cc @@ -9,15 +9,15 @@ #include "include/pika_conf.h" #include "include/pika_server.h" +#include "net/src/dispatch_thread.h" +#include "pstd/include/testutil.h" -extern PikaConf* g_pika_conf; extern PikaServer* g_pika_server; -PikaDispatchThread::PikaDispatchThread(std::set &ips, int port, int work_num, - int cron_interval, int queue_limit) - : handles_(this) { - thread_rep_ = pink::NewDispatchThread(ips, port, work_num, &conn_factory_, - cron_interval, queue_limit, &handles_); +PikaDispatchThread::PikaDispatchThread(std::set& ips, int port, int work_num, int cron_interval, + int queue_limit, int max_conn_rbuf_size) + : conn_factory_(max_conn_rbuf_size), handles_(this) { + thread_rep_ = net::NewDispatchThread(ips, port, work_num, &conn_factory_, cron_interval, queue_limit, &handles_); thread_rep_->set_thread_name("Dispatcher"); } @@ -27,32 +27,40 @@ PikaDispatchThread::~PikaDispatchThread() { delete thread_rep_; } -int PikaDispatchThread::StartThread() { - return thread_rep_->StartThread(); -} +int PikaDispatchThread::StartThread() { return thread_rep_->StartThread(); } -int64_t PikaDispatchThread::ThreadClientList(std::vector *clients) { - std::vector conns_info = - thread_rep_->conns_info(); - if (clients != nullptr) { +uint64_t PikaDispatchThread::ThreadClientList(std::vector* clients) { + std::vector conns_info = thread_rep_->conns_info(); + if (clients) { for (auto& info : conns_info) { clients->push_back({ - info.fd, - info.ip_port, - info.last_interaction.tv_sec, - nullptr /* PinkConn pointer, doesn't need here */ - }); + info.fd, info.ip_port, info.last_interaction.tv_sec, nullptr /* NetConn pointer, doesn't need here */ + }); } } return conns_info.size(); } -bool PikaDispatchThread::ClientKill(const std::string& ip_port) { - return thread_rep_->KillConn(ip_port); +bool PikaDispatchThread::ClientKill(const std::string& ip_port) { return thread_rep_->KillConn(ip_port); } + +void PikaDispatchThread::ClientKillAll() { thread_rep_->KillAllConns(); } + +void PikaDispatchThread::UnAuthUserAndKillClient(const std::set& users, + const std::shared_ptr& defaultUser) { + auto dispatchThread = dynamic_cast(thread_rep_); + if (dispatchThread) { + dispatchThread->AllConn([&](const std::shared_ptr& conn) { + auto pikaClientConn = std::dynamic_pointer_cast(conn); + if (pikaClientConn && users.count(pikaClientConn->UserName())) { + pikaClientConn->UnAuth(defaultUser); + conn->SetClose(true); + } + }); + } } -void PikaDispatchThread::ClientKillAll() { - thread_rep_->KillAllConns(); +void PikaDispatchThread::StopThread() { + thread_rep_->StopThread(); } bool PikaDispatchThread::Handles::AccessHandle(std::string& ip) const { @@ -61,18 +69,17 @@ bool PikaDispatchThread::Handles::AccessHandle(std::string& ip) const { } int client_num = pika_disptcher_->thread_rep_->conn_num(); - if ((client_num >= g_pika_conf->maxclients() + g_pika_conf->root_connection_num()) - || (client_num >= g_pika_conf->maxclients() && ip != g_pika_server->host())) { + if ((client_num >= g_pika_conf->maxclients() + g_pika_conf->root_connection_num()) || + (client_num >= g_pika_conf->maxclients() && ip != g_pika_server->host())) { LOG(WARNING) << "Max connections reach, Deny new comming: " << ip; return false; } - DLOG(INFO) << "new clinet comming, ip: " << ip; + DLOG(INFO) << "new client comming, ip: " << ip; g_pika_server->incr_accumulative_connections(); return true; } void PikaDispatchThread::Handles::CronHandle() const { pika_disptcher_->thread_rep_->set_keepalive_timeout(g_pika_conf->timeout()); - g_pika_server->ResetLastSecQuerynum(); } diff --git a/tools/pika_migrate/src/pika_geo.cc b/tools/pika_migrate/src/pika_geo.cc index 64005920c3..7e7575eca1 100644 --- a/tools/pika_migrate/src/pika_geo.cc +++ b/tools/pika_migrate/src/pika_geo.cc @@ -7,9 +7,10 @@ #include -#include "slash/include/slash_string.h" +#include "pstd/include/pstd_string.h" #include "include/pika_geohash_helper.h" +#include "rocksdb/status.h" void GeoAddCmd::DoInitial() { if (!CheckArg(argv_.size())) { @@ -24,13 +25,14 @@ void GeoAddCmd::DoInitial() { key_ = argv_[1]; pos_.clear(); struct GeoPoint point; - double longitude, latitude; + double longitude; + double latitude; for (size_t index = 2; index < argc; index += 3) { - if (!slash::string2d(argv_[index].data(), argv_[index].size(), &longitude)) { + if (pstd::string2d(argv_[index].data(), argv_[index].size(), &longitude) == 0) { res_.SetRes(CmdRes::kInvalidFloat); return; } - if (!slash::string2d(argv_[index + 1].data(), argv_[index + 1].size(), &latitude)) { + if (pstd::string2d(argv_[index + 1].data(), argv_[index + 1].size(), &latitude) == 0) { res_.SetRes(CmdRes::kInvalidFloat); return; } @@ -39,11 +41,10 @@ void GeoAddCmd::DoInitial() { point.latitude = latitude; pos_.push_back(point); } - return; } -void GeoAddCmd::Do(std::shared_ptr partition) { - std::vector score_members; +void GeoAddCmd::Do() { + std::vector score_members; for (const auto& geo_point : pos_) { // Convert coordinates to geohash GeoHashBits hash; @@ -52,17 +53,18 @@ void GeoAddCmd::Do(std::shared_ptr partition) { // Convert uint64 to double double score; std::string str_bits = std::to_string(bits); - slash::string2d(str_bits.data(), str_bits.size(), &score); + pstd::string2d(str_bits.data(), str_bits.size(), &score); score_members.push_back({score, geo_point.member}); } int32_t count = 0; - rocksdb::Status s = partition->db()->ZAdd(key_, score_members, &count); + rocksdb::Status s = db_->storage()->ZAdd(key_, score_members, &count); if (s.ok()) { res_.AppendInteger(count); + } else if (s.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } - return; } void GeoPosCmd::DoInitial() { @@ -78,38 +80,41 @@ void GeoPosCmd::DoInitial() { } } -void GeoPosCmd::Do(std::shared_ptr partition) { - double score; - res_.AppendArrayLen(members_.size()); +void GeoPosCmd::Do() { + double score = 0.0; + res_.AppendArrayLenUint64(members_.size()); for (const auto& member : members_) { - rocksdb::Status s = partition->db()->ZScore(key_, member, &score); + rocksdb::Status s = db_->storage()->ZScore(key_, member, &score); if (s.ok()) { double xy[2]; - GeoHashBits hash = { .bits = (uint64_t)score, .step = GEO_STEP_MAX }; + GeoHashBits hash = {.bits = static_cast(score), .step = GEO_STEP_MAX}; geohashDecodeToLongLatWGS84(hash, xy); res_.AppendArrayLen(2); char longitude[32]; - int64_t len = slash::d2string(longitude, sizeof(longitude), xy[0]); + int64_t len = pstd::d2string(longitude, sizeof(longitude), xy[0]); res_.AppendStringLen(len); res_.AppendContent(longitude); char latitude[32]; - len = slash::d2string(latitude, sizeof(latitude), xy[1]); + len = pstd::d2string(latitude, sizeof(latitude), xy[1]); res_.AppendStringLen(len); res_.AppendContent(latitude); - + } else if (s.IsNotFound()) { res_.AppendStringLen(-1); continue; + } else if (s.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + continue; } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); - continue; + continue; } } } -static double length_converter(double meters, const std::string & unit) { +static double length_converter(double meters, const std::string& unit) { if (unit == "m") { return meters; } else if (unit == "km") { @@ -123,12 +128,8 @@ static double length_converter(double meters, const std::string & unit) { } } -static bool check_unit(const std::string & unit) { - if (unit == "m" || unit == "km" || unit == "ft" || unit == "mi") { - return true; - } else { - return false; - } +static bool check_unit(const std::string& unit) { + return unit == "m" || unit == "km" || unit == "ft" || unit == "mi"; } void GeoDistCmd::DoInitial() { @@ -157,37 +158,44 @@ void GeoDistCmd::DoInitial() { } } -void GeoDistCmd::Do(std::shared_ptr partition) { - double first_score, second_score, first_xy[2], second_xy[2]; - rocksdb::Status s = partition->db()->ZScore(key_, first_pos_, &first_score); +void GeoDistCmd::Do() { + double first_score = 0.0; + double second_score = 0.0; + double first_xy[2]; + double second_xy[2]; + rocksdb::Status s = db_->storage()->ZScore(key_, first_pos_, &first_score); + if (s.ok()) { - GeoHashBits hash = { .bits = (uint64_t)first_score, .step = GEO_STEP_MAX }; + GeoHashBits hash = {.bits = static_cast(first_score), .step = GEO_STEP_MAX}; geohashDecodeToLongLatWGS84(hash, first_xy); } else if (s.IsNotFound()) { res_.AppendStringLen(-1); return; + } else if (s.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); - return; + return; } - s = partition->db()->ZScore(key_, second_pos_, &second_score); + s = db_->storage()->ZScore(key_, second_pos_, &second_score); if (s.ok()) { - GeoHashBits hash = { .bits = (uint64_t)second_score, .step = GEO_STEP_MAX }; + GeoHashBits hash = {.bits = static_cast(second_score), .step = GEO_STEP_MAX}; geohashDecodeToLongLatWGS84(hash, second_xy); } else if (s.IsNotFound()) { res_.AppendStringLen(-1); return; } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); - return; + return; } double distance = geohashGetDistance(first_xy[0], first_xy[1], second_xy[0], second_xy[1]); distance = length_converter(distance, unit_); char buf[32]; - sprintf(buf, "%.4f", distance); - res_.AppendStringLen(strlen(buf)); + snprintf(buf, sizeof(buf), "%.4f", distance); + res_.AppendStringLenUint64(strlen(buf)); res_.AppendContent(buf); } @@ -204,15 +212,15 @@ void GeoHashCmd::DoInitial() { } } -void GeoHashCmd::Do(std::shared_ptr partition) { - const char * geoalphabet= "0123456789bcdefghjkmnpqrstuvwxyz"; - res_.AppendArrayLen(members_.size()); +void GeoHashCmd::Do() { + const char* geoalphabet = "0123456789bcdefghjkmnpqrstuvwxyz"; + res_.AppendArrayLenUint64(members_.size()); for (const auto& member : members_) { - double score; - rocksdb::Status s = partition->db()->ZScore(key_, member, &score); + double score = 0.0; + rocksdb::Status s = db_->storage()->ZScore(key_, member, &score); if (s.ok()) { double xy[2]; - GeoHashBits hash = { .bits = (uint64_t)score, .step = GEO_STEP_MAX }; + GeoHashBits hash = {.bits = static_cast(score), .step = GEO_STEP_MAX}; geohashDecodeToLongLatWGS84(hash, xy); GeoHashRange r[2]; GeoHashBits encode_hash; @@ -225,7 +233,7 @@ void GeoHashCmd::Do(std::shared_ptr partition) { char buf[12]; int i; for (i = 0; i < 11; i++) { - int idx = (encode_hash.bits >> (52-((i+1)*5))) & 0x1f; + uint64_t idx = (encode_hash.bits >> (52 - ((i + 1) * 5))) & 0x1f; buf[i] = geoalphabet[idx]; } buf[11] = '\0'; @@ -235,24 +243,29 @@ void GeoHashCmd::Do(std::shared_ptr partition) { } else if (s.IsNotFound()) { res_.AppendStringLen(-1); continue; + } else if (s.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + continue; } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); - continue; + continue; } } } -static bool sort_distance_asc(const NeighborPoint & pos1, const NeighborPoint & pos2) { +static bool sort_distance_asc(const NeighborPoint& pos1, const NeighborPoint& pos2) { return pos1.distance < pos2.distance; } -static bool sort_distance_desc(const NeighborPoint & pos1, const NeighborPoint & pos2) { +static bool sort_distance_desc(const NeighborPoint& pos1, const NeighborPoint& pos2) { return pos1.distance > pos2.distance; } -static void GetAllNeighbors(std::shared_ptr partition, std::string & key, GeoRange & range, CmdRes & res) { +static void GetAllNeighbors(const std::shared_ptr& db, std::string& key, GeoRange& range, CmdRes& res) { rocksdb::Status s; - double longitude = range.longitude, latitude = range.latitude, distance = range.distance; + double longitude = range.longitude; + double latitude = range.latitude; + double distance = range.distance; int count_limit = 0; // Convert other units to meters if (range.unit == "m") { @@ -282,113 +295,133 @@ static void GetAllNeighbors(std::shared_ptr partition, std::string & // For each neighbor, get all the matching // members and add them to the potential result list. std::vector result; - int last_processed = 0; + size_t last_processed = 0; for (size_t i = 0; i < sizeof(neighbors) / sizeof(*neighbors); i++) { - GeoHashFix52Bits min, max; - if (HASHISZERO(neighbors[i])) + GeoHashFix52Bits min; + GeoHashFix52Bits max; + if (HASHISZERO(neighbors[i])) { continue; + } + min = geohashAlign52Bits(neighbors[i]); neighbors[i].bits++; max = geohashAlign52Bits(neighbors[i]); // When a huge Radius (in the 5000 km range or more) is used, // adjacent neighbors can be the same, so need to remove duplicated elements - if(last_processed && neighbors[i].bits == neighbors[last_processed].bits && neighbors[i].step == neighbors[last_processed].step) { - continue; + if ((last_processed != 0) && neighbors[i].bits == neighbors[last_processed].bits && + neighbors[i].step == neighbors[last_processed].step) { + continue; } - std::vector score_members; - s = partition->db()->ZRangebyscore(key, (double)min, (double)max, true, true, &score_members); + std::vector score_members; + s = db->storage()->ZRangebyscore(key, static_cast(min), static_cast(max), true, true, &score_members); if (!s.ok() && !s.IsNotFound()) { - res.SetRes(CmdRes::kErrOther, s.ToString()); - return; + if (s.IsInvalidArgument()) { + res.SetRes(CmdRes::kMultiKey); + return; + } else { + res.SetRes(CmdRes::kErrOther, s.ToString()); + return; + } } // Insert into result only if the point is within the search area. - for (size_t i = 0; i < score_members.size(); ++i) { - double xy[2], real_distance; - GeoHashBits hash = { .bits = (uint64_t)score_members[i].score, .step = GEO_STEP_MAX }; + for (auto & score_member : score_members) { + double xy[2]; + double real_distance = 0.0; + GeoHashBits hash = {.bits = static_cast(score_member.score), .step = GEO_STEP_MAX}; geohashDecodeToLongLatWGS84(hash, xy); - if(geohashGetDistanceIfInRadiusWGS84(longitude, latitude, xy[0], xy[1], distance, &real_distance)) { + if (geohashGetDistanceIfInRadiusWGS84(longitude, latitude, xy[0], xy[1], distance, &real_distance) != 0) { NeighborPoint item; - item.member = score_members[i].member; - item.score = score_members[i].score; + item.member = score_member.member; + item.score = score_member.score; item.distance = real_distance; result.push_back(item); } } last_processed = i; } - + // If using the count opiton if (range.count) { - count_limit = static_cast(result.size()) < range.count_limit ? result.size() : range.count_limit; + count_limit = static_cast(result.size() < range.count_limit ? result.size() : range.count_limit); } else { - count_limit = result.size(); + count_limit = static_cast(result.size()); } // If using sort option - if (range.sort == Asc) { - std::sort(result.begin(), result.end(), sort_distance_asc); - } else if(range.sort == Desc) { - std::sort(result.begin(), result.end(), sort_distance_desc); + if (range.sort != Unsort) { + if (range.sort == Asc) { + std::sort(result.begin(), result.end(), sort_distance_asc); + } else if (range.sort == Desc) { + std::sort(result.begin(), result.end(), sort_distance_desc); + } } if (range.store || range.storedist) { // Target key, create a sorted set with the results. - std::vector score_members; + std::vector score_members; for (int i = 0; i < count_limit; ++i) { double distance = length_converter(result[i].distance, range.unit); double score = range.store ? result[i].score : distance; score_members.push_back({score, result[i].member}); } int32_t count = 0; - s = partition->db()->ZAdd(range.storekey, score_members, &count); + int32_t card = db->storage()->Exists({range.storekey}); + if (card) { + if (db->storage()->Del({range.storekey}) > 0) { + db->cache()->Del({range.storekey}); + } + } + s = db->storage()->ZAdd(range.storekey, score_members, &count); if (!s.ok()) { res.SetRes(CmdRes::kErrOther, s.ToString()); return; + } else { + s = db->cache()->ZAdd(range.storekey, score_members); } res.AppendInteger(count_limit); return; } else { // No target key, return results to user. - + // For each the result res.AppendArrayLen(count_limit); for (int i = 0; i < count_limit; ++i) { if (range.option_num != 0) { - res.AppendArrayLen(range.option_num+1); + res.AppendArrayLen(range.option_num + 1); } // Member - res.AppendStringLen(result[i].member.size()); + res.AppendStringLenUint64(result[i].member.size()); res.AppendContent(result[i].member); - + // If using withdist option - if (range.withdist) { + if (range.withdist) { double xy[2]; - GeoHashBits hash = { .bits = (uint64_t)result[i].score, .step = GEO_STEP_MAX }; + GeoHashBits hash = {.bits = static_cast(result[i].score), .step = GEO_STEP_MAX}; geohashDecodeToLongLatWGS84(hash, xy); double distance = geohashGetDistance(longitude, latitude, xy[0], xy[1]); distance = length_converter(distance, range.unit); char buf[32]; - sprintf(buf, "%.4f", distance); - res.AppendStringLen(strlen(buf)); + snprintf(buf, sizeof(buf), "%.4f", distance); + res.AppendStringLenUint64(strlen(buf)); res.AppendContent(buf); } // If using withhash option if (range.withhash) { - res.AppendInteger(result[i].score); + res.AppendInteger(static_cast(result[i].score)); } // If using withcoord option if (range.withcoord) { - res.AppendArrayLen(2); + res.AppendArrayLen(2); double xy[2]; - GeoHashBits hash = { .bits = (uint64_t)result[i].score, .step = GEO_STEP_MAX }; + GeoHashBits hash = {.bits = static_cast(result[i].score), .step = GEO_STEP_MAX}; geohashDecodeToLongLatWGS84(hash, xy); char longitude[32]; - int64_t len = slash::d2string(longitude, sizeof(longitude), xy[0]); + int64_t len = pstd::d2string(longitude, sizeof(longitude), xy[0]); res.AppendStringLen(len); res.AppendContent(longitude); char latitude[32]; - len = slash::d2string(latitude, sizeof(latitude), xy[1]); + len = pstd::d2string(latitude, sizeof(latitude), xy[1]); res.AppendStringLen(len); res.AppendContent(latitude); } @@ -402,57 +435,58 @@ void GeoRadiusCmd::DoInitial() { return; } key_ = argv_[1]; - slash::string2d(argv_[2].data(), argv_[2].size(), &range_.longitude); - slash::string2d(argv_[3].data(), argv_[3].size(), &range_.latitude); - slash::string2d(argv_[4].data(), argv_[4].size(), &range_.distance); + pstd::string2d(argv_[2].data(), argv_[2].size(), &range_.longitude); + pstd::string2d(argv_[3].data(), argv_[3].size(), &range_.latitude); + pstd::string2d(argv_[4].data(), argv_[4].size(), &range_.distance); range_.unit = argv_[5]; if (!check_unit(range_.unit)) { res_.SetRes(CmdRes::kErrOther, "unsupported unit provided. please use m, km, ft, mi"); return; } size_t pos = 6; + range_.sort = Asc; while (pos < argv_.size()) { - if (!strcasecmp(argv_[pos].c_str(), "withdist")) { + if (strcasecmp(argv_[pos].c_str(), "withdist") == 0) { range_.withdist = true; range_.option_num++; - } else if (!strcasecmp(argv_[pos].c_str(), "withhash")) { - range_.withhash = true; + } else if (strcasecmp(argv_[pos].c_str(), "withhash") == 0) { + range_.withhash = true; range_.option_num++; - } else if (!strcasecmp(argv_[pos].c_str(), "withcoord")) { - range_.withcoord = true; + } else if (strcasecmp(argv_[pos].c_str(), "withcoord") == 0) { + range_.withcoord = true; range_.option_num++; - } else if (!strcasecmp(argv_[pos].c_str(), "count")) { - range_.count = true; - if (argv_.size() < (pos+2)) { + } else if (strcasecmp(argv_[pos].c_str(), "count") == 0) { + range_.count = true; + if (argv_.size() < (pos + 2)) { res_.SetRes(CmdRes::kSyntaxErr); - return; + return; } std::string str_count = argv_[++pos]; for (auto s : str_count) { - if (!isdigit(s)) { + if (isdigit(s) == 0) { res_.SetRes(CmdRes::kErrOther, "value is not an integer or out of range"); return; } - } + } range_.count_limit = std::stoi(str_count); - } else if (!strcasecmp(argv_[pos].c_str(), "store")) { + } else if (strcasecmp(argv_[pos].c_str(), "store") == 0) { range_.store = true; - if (argv_.size() < (pos+2)) { + if (argv_.size() < (pos + 2)) { res_.SetRes(CmdRes::kSyntaxErr); - return; + return; } range_.storekey = argv_[++pos]; - } else if (!strcasecmp(argv_[pos].c_str(), "storedist")) { + } else if (strcasecmp(argv_[pos].c_str(), "storedist") == 0) { range_.storedist = true; - if (argv_.size() < (pos+2)) { + if (argv_.size() < (pos + 2)) { res_.SetRes(CmdRes::kSyntaxErr); - return; + return; } range_.storekey = argv_[++pos]; - } else if (!strcasecmp(argv_[pos].c_str(), "asc")) { - range_.sort = Asc; - } else if (!strcasecmp(argv_[pos].c_str(), "desc")) { - range_.sort = Desc; + } else if (strcasecmp(argv_[pos].c_str(), "asc") == 0) { + range_.sort = Asc; + } else if (strcasecmp(argv_[pos].c_str(), "desc") == 0) { + range_.sort = Desc; } else { res_.SetRes(CmdRes::kSyntaxErr); return; @@ -460,14 +494,13 @@ void GeoRadiusCmd::DoInitial() { pos++; } if (range_.store && (range_.withdist || range_.withcoord || range_.withhash)) { - res_.SetRes(CmdRes::kErrOther, "STORE option in GEORADIUS is not compatible with WITHDIST, WITHHASH and WITHCOORDS options"); + res_.SetRes(CmdRes::kErrOther, + "STORE option in GEORADIUS is not compatible with WITHDIST, WITHHASH and WITHCOORDS options"); return; } } -void GeoRadiusCmd::Do(std::shared_ptr partition) { - GetAllNeighbors(partition, key_, range_, this->res_); -} +void GeoRadiusCmd::Do() { GetAllNeighbors(db_, key_, range_, this->res_); } void GeoRadiusByMemberCmd::DoInitial() { if (!CheckArg(argv_.size())) { @@ -476,7 +509,7 @@ void GeoRadiusByMemberCmd::DoInitial() { } key_ = argv_[1]; range_.member = argv_[2]; - slash::string2d(argv_[3].data(), argv_[3].size(), &range_.distance); + pstd::string2d(argv_[3].data(), argv_[3].size(), &range_.distance); range_.unit = argv_[4]; if (!check_unit(range_.unit)) { res_.SetRes(CmdRes::kErrOther, "unsupported unit provided. please use m, km, ft, mi"); @@ -484,47 +517,47 @@ void GeoRadiusByMemberCmd::DoInitial() { } size_t pos = 5; while (pos < argv_.size()) { - if (!strcasecmp(argv_[pos].c_str(), "withdist")) { + if (strcasecmp(argv_[pos].c_str(), "withdist") == 0) { range_.withdist = true; range_.option_num++; - } else if (!strcasecmp(argv_[pos].c_str(), "withhash")) { - range_.withhash = true; + } else if (strcasecmp(argv_[pos].c_str(), "withhash") == 0) { + range_.withhash = true; range_.option_num++; - } else if (!strcasecmp(argv_[pos].c_str(), "withcoord")) { - range_.withcoord = true; + } else if (strcasecmp(argv_[pos].c_str(), "withcoord") == 0) { + range_.withcoord = true; range_.option_num++; - } else if (!strcasecmp(argv_[pos].c_str(), "count")) { - range_.count = true; - if (argv_.size() < (pos+2)) { + } else if (strcasecmp(argv_[pos].c_str(), "count") == 0) { + range_.count = true; + if (argv_.size() < (pos + 2)) { res_.SetRes(CmdRes::kSyntaxErr); - return; + return; } std::string str_count = argv_[++pos]; for (auto s : str_count) { - if (!isdigit(s)) { + if (isdigit(s) == 0) { res_.SetRes(CmdRes::kErrOther, "value is not an integer or out of range"); return; } - } + } range_.count_limit = std::stoi(str_count); - } else if (!strcasecmp(argv_[pos].c_str(), "store")) { + } else if (strcasecmp(argv_[pos].c_str(), "store") == 0) { range_.store = true; - if (argv_.size() < (pos+2)) { + if (argv_.size() < (pos + 2)) { res_.SetRes(CmdRes::kSyntaxErr); - return; + return; } range_.storekey = argv_[++pos]; - } else if (!strcasecmp(argv_[pos].c_str(), "storedist")) { + } else if (strcasecmp(argv_[pos].c_str(), "storedist") == 0) { range_.storedist = true; - if (argv_.size() < (pos+2)) { + if (argv_.size() < (pos + 2)) { res_.SetRes(CmdRes::kSyntaxErr); - return; + return; } range_.storekey = argv_[++pos]; - } else if (!strcasecmp(argv_[pos].c_str(), "asc")) { - range_.sort = Asc; - } else if (!strcasecmp(argv_[pos].c_str(), "desc")) { - range_.sort = Desc; + } else if (strcasecmp(argv_[pos].c_str(), "asc") == 0) { + range_.sort = Asc; + } else if (strcasecmp(argv_[pos].c_str(), "desc") == 0) { + range_.sort = Desc; } else { res_.SetRes(CmdRes::kSyntaxErr); return; @@ -532,20 +565,25 @@ void GeoRadiusByMemberCmd::DoInitial() { pos++; } if (range_.store && (range_.withdist || range_.withcoord || range_.withhash)) { - res_.SetRes(CmdRes::kErrOther, "STORE option in GEORADIUS is not compatible with WITHDIST, WITHHASH and WITHCOORDS options"); + res_.SetRes(CmdRes::kErrOther, + "STORE option in GEORADIUS is not compatible with WITHDIST, WITHHASH and WITHCOORDS options"); return; } } -void GeoRadiusByMemberCmd::Do(std::shared_ptr partition) { - double score; - rocksdb::Status s = partition->db()->ZScore(key_, range_.member, &score); +void GeoRadiusByMemberCmd::Do() { + double score = 0.0; + rocksdb::Status s = db_->storage()->ZScore(key_, range_.member, &score); + if (s.IsNotFound() && !s.ToString().compare("NotFound: Invalid member")) { + res_.SetRes(CmdRes::kErrOther, "could not decode requested zset member"); + return; + } if (s.ok()) { double xy[2]; - GeoHashBits hash = { .bits = (uint64_t)score, .step = GEO_STEP_MAX }; + GeoHashBits hash = {.bits = static_cast(score), .step = GEO_STEP_MAX}; geohashDecodeToLongLatWGS84(hash, xy); range_.longitude = xy[0]; range_.latitude = xy[1]; } - GetAllNeighbors(partition, key_, range_, this->res_); + GetAllNeighbors(db_, key_, range_, this->res_); } diff --git a/tools/pika_migrate/src/pika_geohash.cc b/tools/pika_migrate/src/pika_geohash.cc index 2ad66314b8..a59d0cf1cb 100644 --- a/tools/pika_migrate/src/pika_geohash.cc +++ b/tools/pika_migrate/src/pika_geohash.cc @@ -50,246 +50,238 @@ * From: https://graphics.stanford.edu/~seander/bithacks.html#InterleaveBMN */ static inline uint64_t interleave64(uint32_t xlo, uint32_t ylo) { - static const uint64_t B[] = {0x5555555555555555ULL, 0x3333333333333333ULL, - 0x0F0F0F0F0F0F0F0FULL, 0x00FF00FF00FF00FFULL, - 0x0000FFFF0000FFFFULL}; - static const unsigned int S[] = {1, 2, 4, 8, 16}; + static const uint64_t B[] = {0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL, + 0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL}; + static const unsigned int S[] = {1, 2, 4, 8, 16}; - uint64_t x = xlo; - uint64_t y = ylo; + uint64_t x = xlo; + uint64_t y = ylo; - x = (x | (x << S[4])) & B[4]; - y = (y | (y << S[4])) & B[4]; + x = (x | (x << S[4])) & B[4]; + y = (y | (y << S[4])) & B[4]; - x = (x | (x << S[3])) & B[3]; - y = (y | (y << S[3])) & B[3]; + x = (x | (x << S[3])) & B[3]; + y = (y | (y << S[3])) & B[3]; - x = (x | (x << S[2])) & B[2]; - y = (y | (y << S[2])) & B[2]; + x = (x | (x << S[2])) & B[2]; + y = (y | (y << S[2])) & B[2]; - x = (x | (x << S[1])) & B[1]; - y = (y | (y << S[1])) & B[1]; + x = (x | (x << S[1])) & B[1]; + y = (y | (y << S[1])) & B[1]; - x = (x | (x << S[0])) & B[0]; - y = (y | (y << S[0])) & B[0]; + x = (x | (x << S[0])) & B[0]; + y = (y | (y << S[0])) & B[0]; - return x | (y << 1); + return x | (y << 1); } /* reverse the interleave process * derived from http://stackoverflow.com/questions/4909263 */ static inline uint64_t deinterleave64(uint64_t interleaved) { - static const uint64_t B[] = {0x5555555555555555ULL, 0x3333333333333333ULL, - 0x0F0F0F0F0F0F0F0FULL, 0x00FF00FF00FF00FFULL, - 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL}; - static const unsigned int S[] = {0, 1, 2, 4, 8, 16}; + static const uint64_t B[] = {0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL, + 0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL}; + static const unsigned int S[] = {0, 1, 2, 4, 8, 16}; - uint64_t x = interleaved; - uint64_t y = interleaved >> 1; + uint64_t x = interleaved; + uint64_t y = interleaved >> 1; - x = (x | (x >> S[0])) & B[0]; - y = (y | (y >> S[0])) & B[0]; + x = (x | (x >> S[0])) & B[0]; + y = (y | (y >> S[0])) & B[0]; - x = (x | (x >> S[1])) & B[1]; - y = (y | (y >> S[1])) & B[1]; + x = (x | (x >> S[1])) & B[1]; + y = (y | (y >> S[1])) & B[1]; - x = (x | (x >> S[2])) & B[2]; - y = (y | (y >> S[2])) & B[2]; + x = (x | (x >> S[2])) & B[2]; + y = (y | (y >> S[2])) & B[2]; - x = (x | (x >> S[3])) & B[3]; - y = (y | (y >> S[3])) & B[3]; + x = (x | (x >> S[3])) & B[3]; + y = (y | (y >> S[3])) & B[3]; - x = (x | (x >> S[4])) & B[4]; - y = (y | (y >> S[4])) & B[4]; + x = (x | (x >> S[4])) & B[4]; + y = (y | (y >> S[4])) & B[4]; - x = (x | (x >> S[5])) & B[5]; - y = (y | (y >> S[5])) & B[5]; + x = (x | (x >> S[5])) & B[5]; + y = (y | (y >> S[5])) & B[5]; - return x | (y << 32); + return x | (y << 32); } -void geohashGetCoordRange(GeoHashRange *long_range, GeoHashRange *lat_range) { - /* These are constraints from EPSG:900913 / EPSG:3785 / OSGEO:41001 */ - /* We can't geocode at the north/south pole. */ - long_range->max = GEO_LONG_MAX; - long_range->min = GEO_LONG_MIN; - lat_range->max = GEO_LAT_MAX; - lat_range->min = GEO_LAT_MIN; +void geohashGetCoordRange(GeoHashRange* long_range, GeoHashRange* lat_range) { + /* These are constraints from EPSG:900913 / EPSG:3785 / OSGEO:41001 */ + /* We can't geocode at the north/south pole. */ + long_range->max = GEO_LONG_MAX; + long_range->min = GEO_LONG_MIN; + lat_range->max = GEO_LAT_MAX; + lat_range->min = GEO_LAT_MIN; } -int geohashEncode(const GeoHashRange *long_range, const GeoHashRange *lat_range, - double longitude, double latitude, uint8_t step, - GeoHashBits *hash) { - /* Check basic arguments sanity. */ - if (hash == NULL || step > 32 || step == 0 || - RANGEPISZERO(lat_range) || RANGEPISZERO(long_range)) return 0; - - /* Return an error when trying to index outside the supported - * constraints. */ - if (longitude > 180 || longitude < -180 || - latitude > 85.05112878 || latitude < -85.05112878) return 0; - - hash->bits = 0; - hash->step = step; - - if (latitude < lat_range->min || latitude > lat_range->max || - longitude < long_range->min || longitude > long_range->max) { - return 0; - } - - double lat_offset = - (latitude - lat_range->min) / (lat_range->max - lat_range->min); - double long_offset = - (longitude - long_range->min) / (long_range->max - long_range->min); - - /* convert to fixed point based on the step size */ - lat_offset *= (1ULL << step); - long_offset *= (1ULL << step); - hash->bits = interleave64(lat_offset, long_offset); - return 1; +int geohashEncode(const GeoHashRange* long_range, const GeoHashRange* lat_range, double longitude, double latitude, + uint8_t step, GeoHashBits* hash) { + /* Check basic arguments sanity. */ + if (!hash || step > 32 || step == 0 || RANGEPISZERO(lat_range) || RANGEPISZERO(long_range)) { + return 0; + } + + /* Return an error when trying to index outside the supported + * constraints. */ + if (longitude > 180 || longitude < -180 || latitude > 85.05112878 || latitude < -85.05112878) { + return 0; + } + + hash->bits = 0; + hash->step = step; + + if (latitude < lat_range->min || latitude > lat_range->max || longitude < long_range->min || + longitude > long_range->max) { + return 0; + } + + double lat_offset = (latitude - lat_range->min) / (lat_range->max - lat_range->min); + double long_offset = (longitude - long_range->min) / (long_range->max - long_range->min); + + /* convert to fixed point based on the step size */ + auto lat_offset_step = static_cast(lat_offset * static_cast(1ULL << step)); + auto long_offset_step = static_cast(long_offset * static_cast(1ULL << step)); + hash->bits = interleave64(lat_offset_step, long_offset_step); + return 1; } -int geohashEncodeType(double longitude, double latitude, uint8_t step, GeoHashBits *hash) { - GeoHashRange r[2] = {{0}}; - geohashGetCoordRange(&r[0], &r[1]); - return geohashEncode(&r[0], &r[1], longitude, latitude, step, hash); +int geohashEncodeType(double longitude, double latitude, uint8_t step, GeoHashBits* hash) { + GeoHashRange r[2] = {{0}}; + geohashGetCoordRange(&r[0], &r[1]); + return geohashEncode(&r[0], &r[1], longitude, latitude, step, hash); } -int geohashEncodeWGS84(double longitude, double latitude, uint8_t step, - GeoHashBits *hash) { - return geohashEncodeType(longitude, latitude, step, hash); +int geohashEncodeWGS84(double longitude, double latitude, uint8_t step, GeoHashBits* hash) { + return geohashEncodeType(longitude, latitude, step, hash); } -int geohashDecode(const GeoHashRange long_range, const GeoHashRange lat_range, - const GeoHashBits hash, GeoHashArea *area) { - if (HASHISZERO(hash) || NULL == area || RANGEISZERO(lat_range) || - RANGEISZERO(long_range)) { - return 0; - } - - area->hash = hash; - uint8_t step = hash.step; - uint64_t hash_sep = deinterleave64(hash.bits); /* hash = [LAT][LONG] */ - - double lat_scale = lat_range.max - lat_range.min; - double long_scale = long_range.max - long_range.min; - - uint32_t ilato = hash_sep; /* get lat part of deinterleaved hash */ - uint32_t ilono = hash_sep >> 32; /* shift over to get long part of hash */ - - /* divide by 2**step. - * Then, for 0-1 coordinate, multiply times scale and add - to the min to get the absolute coordinate. */ - area->latitude.min = - lat_range.min + (ilato * 1.0 / (1ull << step)) * lat_scale; - area->latitude.max = - lat_range.min + ((ilato + 1) * 1.0 / (1ull << step)) * lat_scale; - area->longitude.min = - long_range.min + (ilono * 1.0 / (1ull << step)) * long_scale; - area->longitude.max = - long_range.min + ((ilono + 1) * 1.0 / (1ull << step)) * long_scale; - - return 1; -} +int geohashDecode(const GeoHashRange long_range, const GeoHashRange lat_range, const GeoHashBits hash, + GeoHashArea* area) { + if (HASHISZERO(hash) || nullptr == area || RANGEISZERO(lat_range) || RANGEISZERO(long_range)) { + return 0; + } -int geohashDecodeType(const GeoHashBits hash, GeoHashArea *area) { - GeoHashRange r[2] = {{0}}; - geohashGetCoordRange(&r[0], &r[1]); - return geohashDecode(r[0], r[1], hash, area); -} + area->hash = hash; + uint8_t step = hash.step; + uint64_t hash_sep = deinterleave64(hash.bits); /* hash = [LAT][LONG] */ + + double lat_scale = lat_range.max - lat_range.min; + double long_scale = long_range.max - long_range.min; + + uint32_t ilato = hash_sep; /* get lat part of deinterleaved hash */ + uint32_t ilono = hash_sep >> 32; /* shift over to get long part of hash */ -int geohashDecodeWGS84(const GeoHashBits hash, GeoHashArea *area) { - return geohashDecodeType(hash, area); + /* divide by 2**step. + * Then, for 0-1 coordinate, multiply times scale and add + to the min to get the absolute coordinate. */ + area->latitude.min = lat_range.min + (ilato * 1.0 / static_cast(1ULL << step)) * lat_scale; + area->latitude.max = lat_range.min + ((ilato + 1) * 1.0 / static_cast(1ULL << step)) * lat_scale; + area->longitude.min = long_range.min + (ilono * 1.0 / static_cast(1ULL << step)) * long_scale; + area->longitude.max = long_range.min + ((ilono + 1) * 1.0 / static_cast(1ULL << step)) * long_scale; + + return 1; } -int geohashDecodeAreaToLongLat(const GeoHashArea *area, double *xy) { - if (!xy) return 0; - xy[0] = (area->longitude.min + area->longitude.max) / 2; - xy[1] = (area->latitude.min + area->latitude.max) / 2; - return 1; +int geohashDecodeType(const GeoHashBits hash, GeoHashArea* area) { + GeoHashRange r[2] = {{0}}; + geohashGetCoordRange(&r[0], &r[1]); + return geohashDecode(r[0], r[1], hash, area); } -int geohashDecodeToLongLatType(const GeoHashBits hash, double *xy) { - GeoHashArea area = {{0}}; - if (!xy || !geohashDecodeType(hash, &area)) - return 0; - return geohashDecodeAreaToLongLat(&area, xy); +int geohashDecodeWGS84(const GeoHashBits hash, GeoHashArea* area) { return geohashDecodeType(hash, area); } + +int geohashDecodeAreaToLongLat(const GeoHashArea* area, double* xy) { + if (!xy) { + return 0; + } + xy[0] = (area->longitude.min + area->longitude.max) / 2; + xy[1] = (area->latitude.min + area->latitude.max) / 2; + return 1; } -int geohashDecodeToLongLatWGS84(const GeoHashBits hash, double *xy) { - return geohashDecodeToLongLatType(hash, xy); +int geohashDecodeToLongLatType(const GeoHashBits hash, double* xy) { + GeoHashArea area = {{0}}; + if (!xy || !(geohashDecodeType(hash, &area))) { + return 0; + } + return geohashDecodeAreaToLongLat(&area, xy); } -static void geohash_move_x(GeoHashBits *hash, int8_t d) { - if (d == 0) - return; +int geohashDecodeToLongLatWGS84(const GeoHashBits hash, double* xy) { return geohashDecodeToLongLatType(hash, xy); } + +static void geohash_move_x(GeoHashBits* hash, int8_t d) { + if (d == 0) { + return; + } - uint64_t x = hash->bits & 0xaaaaaaaaaaaaaaaaULL; - uint64_t y = hash->bits & 0x5555555555555555ULL; + uint64_t x = hash->bits & 0xaaaaaaaaaaaaaaaaULL; + uint64_t y = hash->bits & 0x5555555555555555ULL; - uint64_t zz = 0x5555555555555555ULL >> (64 - hash->step * 2); + uint64_t zz = 0x5555555555555555ULL >> (64 - hash->step * 2); - if (d > 0) { - x = x + (zz + 1); - } else { - x = x | zz; - x = x - (zz + 1); - } + if (d > 0) { + x = x + (zz + 1); + } else { + x = x | zz; + x = x - (zz + 1); + } - x &= (0xaaaaaaaaaaaaaaaaULL >> (64 - hash->step * 2)); - hash->bits = (x | y); + x &= (0xaaaaaaaaaaaaaaaaULL >> (64 - hash->step * 2)); + hash->bits = (x | y); } -static void geohash_move_y(GeoHashBits *hash, int8_t d) { - if (d == 0) - return; - - uint64_t x = hash->bits & 0xaaaaaaaaaaaaaaaaULL; - uint64_t y = hash->bits & 0x5555555555555555ULL; - - uint64_t zz = 0xaaaaaaaaaaaaaaaaULL >> (64 - hash->step * 2); - if (d > 0) { - y = y + (zz + 1); - } else { - y = y | zz; - y = y - (zz + 1); - } - y &= (0x5555555555555555ULL >> (64 - hash->step * 2)); - hash->bits = (x | y); +static void geohash_move_y(GeoHashBits* hash, int8_t d) { + if (d == 0) { + return; + } + + uint64_t x = hash->bits & 0xaaaaaaaaaaaaaaaaULL; + uint64_t y = hash->bits & 0x5555555555555555ULL; + + uint64_t zz = 0xaaaaaaaaaaaaaaaaULL >> (64 - hash->step * 2); + if (d > 0) { + y = y + (zz + 1); + } else { + y = y | zz; + y = y - (zz + 1); + } + y &= (0x5555555555555555ULL >> (64 - hash->step * 2)); + hash->bits = (x | y); } -void geohashNeighbors(const GeoHashBits *hash, GeoHashNeighbors *neighbors) { - neighbors->east = *hash; - neighbors->west = *hash; - neighbors->north = *hash; - neighbors->south = *hash; - neighbors->south_east = *hash; - neighbors->south_west = *hash; - neighbors->north_east = *hash; - neighbors->north_west = *hash; +void geohashNeighbors(const GeoHashBits* hash, GeoHashNeighbors* neighbors) { + neighbors->east = *hash; + neighbors->west = *hash; + neighbors->north = *hash; + neighbors->south = *hash; + neighbors->south_east = *hash; + neighbors->south_west = *hash; + neighbors->north_east = *hash; + neighbors->north_west = *hash; - geohash_move_x(&neighbors->east, 1); - geohash_move_y(&neighbors->east, 0); + geohash_move_x(&neighbors->east, 1); + geohash_move_y(&neighbors->east, 0); - geohash_move_x(&neighbors->west, -1); - geohash_move_y(&neighbors->west, 0); + geohash_move_x(&neighbors->west, -1); + geohash_move_y(&neighbors->west, 0); - geohash_move_x(&neighbors->south, 0); - geohash_move_y(&neighbors->south, -1); + geohash_move_x(&neighbors->south, 0); + geohash_move_y(&neighbors->south, -1); - geohash_move_x(&neighbors->north, 0); - geohash_move_y(&neighbors->north, 1); + geohash_move_x(&neighbors->north, 0); + geohash_move_y(&neighbors->north, 1); - geohash_move_x(&neighbors->north_west, -1); - geohash_move_y(&neighbors->north_west, 1); + geohash_move_x(&neighbors->north_west, -1); + geohash_move_y(&neighbors->north_west, 1); - geohash_move_x(&neighbors->north_east, 1); - geohash_move_y(&neighbors->north_east, 1); + geohash_move_x(&neighbors->north_east, 1); + geohash_move_y(&neighbors->north_east, 1); - geohash_move_x(&neighbors->south_east, 1); - geohash_move_y(&neighbors->south_east, -1); + geohash_move_x(&neighbors->south_east, 1); + geohash_move_y(&neighbors->south_east, -1); - geohash_move_x(&neighbors->south_west, -1); - geohash_move_y(&neighbors->south_west, -1); + geohash_move_x(&neighbors->south_west, -1); + geohash_move_y(&neighbors->south_west, -1); } diff --git a/tools/pika_migrate/src/pika_geohash_helper.cc b/tools/pika_migrate/src/pika_geohash_helper.cc index a2f18d7090..bc671de7dc 100644 --- a/tools/pika_migrate/src/pika_geohash_helper.cc +++ b/tools/pika_migrate/src/pika_geohash_helper.cc @@ -34,16 +34,15 @@ * https://github.com/yinqiwen/ardb/blob/d42503/src/geo/geohash_helper.cpp */ -//#include "fmacros.h" +// #include "fmacros.h" #include "include/pika_geohash_helper.h" -//#include "debugmacro.h" -#include - +// #include "debugmacro.h" +#include #define D_R (M_PI / 180.0) #define R_MAJOR 6378137.0 #define R_MINOR 6356752.3142 #define RATIO (R_MINOR / R_MAJOR) -#define ECCENT (sqrt(1.0 - (RATIO *RATIO))) +#define ECCENT (sqrt(1.0 - (RATIO * RATIO))) #define COM (0.5 * ECCENT) /// @brief The usual PI/180 constant @@ -60,26 +59,33 @@ static inline double rad_deg(double ang) { return ang / D_R; } /* This function is used in order to estimate the step (bits precision) * of the 9 search area boxes during radius queries. */ uint8_t geohashEstimateStepsByRadius(double range_meters, double lat) { - if (range_meters == 0) return 26; - int step = 1; - while (range_meters < MERCATOR_MAX) { - range_meters *= 2; - step++; - } - step -= 2; /* Make sure range is included in most of the base cases. */ - - /* Wider range torwards the poles... Note: it is possible to do better - * than this approximation by computing the distance between meridians - * at this latitude, but this does the trick for now. */ - if (lat > 66 || lat < -66) { - step--; - if (lat > 80 || lat < -80) step--; - } + if (range_meters == 0) { + return 26; + } + int step = 1; + while (range_meters < MERCATOR_MAX) { + range_meters *= 2; + step++; + } + step -= 2; /* Make sure range is included in most of the base cases. */ - /* Frame to valid range. */ - if (step < 1) step = 1; - if (step > 26) step = 26; - return step; + /* Wider range torwards the poles... Note: it is possible to do better + * than this approximation by computing the distance between meridians + * at this latitude, but this does the trick for now. */ + if (lat > 66 || lat < -66) { + step--; + if (lat > 80 || lat < -80) { + step--; + } + } + /* Frame to valid range. */ + if (step < 1) { + step = 1; + } + if (step > 26) { + step = 26; + } + return step; } /* Return the bounding box of the search area centered at latitude,longitude @@ -100,136 +106,160 @@ uint8_t geohashEstimateStepsByRadius(double range_meters, double lat) { * Since this function is currently only used as an optimization, the * optimization is not used for very big radiuses, however the function * should be fixed. */ -int geohashBoundingBox(double longitude, double latitude, double radius_meters, - double *bounds) { - if (!bounds) return 0; - - bounds[0] = longitude - rad_deg(radius_meters/EARTH_RADIUS_IN_METERS/cos(deg_rad(latitude))); - bounds[2] = longitude + rad_deg(radius_meters/EARTH_RADIUS_IN_METERS/cos(deg_rad(latitude))); - bounds[1] = latitude - rad_deg(radius_meters/EARTH_RADIUS_IN_METERS); - bounds[3] = latitude + rad_deg(radius_meters/EARTH_RADIUS_IN_METERS); - return 1; +int geohashBoundingBox(double longitude, double latitude, double radius_meters, double* bounds) { + if (!bounds) { + return 0; + } + double height = radius_meters; + double width = radius_meters; + + const double lat_delta = rad_deg(height/EARTH_RADIUS_IN_METERS); + const double long_delta_top = rad_deg(width/EARTH_RADIUS_IN_METERS/cos(deg_rad(latitude+lat_delta))); + const double long_delta_bottom = rad_deg(width/EARTH_RADIUS_IN_METERS/cos(deg_rad(latitude-lat_delta))); + + int southern_hemisphere = latitude < 0 ? 1 : 0; + bounds[0] = southern_hemisphere ? longitude-long_delta_bottom : longitude-long_delta_top; + bounds[2] = southern_hemisphere ? longitude+long_delta_bottom : longitude+long_delta_top; + bounds[1] = latitude - lat_delta; + bounds[3] = latitude + lat_delta; + + return 1; } /* Return a set of areas (center + 8) that are able to cover a range query * for the specified position and radius. */ GeoHashRadius geohashGetAreasByRadius(double longitude, double latitude, double radius_meters) { - GeoHashRange long_range, lat_range; - GeoHashRadius radius; - GeoHashBits hash; - GeoHashNeighbors neighbors; - GeoHashArea area; - double min_lon, max_lon, min_lat, max_lat; - double bounds[4]; - int steps; - - geohashBoundingBox(longitude, latitude, radius_meters, bounds); - min_lon = bounds[0]; - min_lat = bounds[1]; - max_lon = bounds[2]; - max_lat = bounds[3]; - - steps = geohashEstimateStepsByRadius(radius_meters,latitude); - - geohashGetCoordRange(&long_range,&lat_range); - geohashEncode(&long_range,&lat_range,longitude,latitude,steps,&hash); - geohashNeighbors(&hash,&neighbors); - geohashDecode(long_range,lat_range,hash,&area); - - /* Check if the step is enough at the limits of the covered area. - * Sometimes when the search area is near an edge of the - * area, the estimated step is not small enough, since one of the - * north / south / west / east square is too near to the search area - * to cover everything. */ - int decrease_step = 0; - { - GeoHashArea north, south, east, west; - - geohashDecode(long_range, lat_range, neighbors.north, &north); - geohashDecode(long_range, lat_range, neighbors.south, &south); - geohashDecode(long_range, lat_range, neighbors.east, &east); - geohashDecode(long_range, lat_range, neighbors.west, &west); - - if (geohashGetDistance(longitude,latitude,longitude,north.latitude.max) - < radius_meters) decrease_step = 1; - if (geohashGetDistance(longitude,latitude,longitude,south.latitude.min) - < radius_meters) decrease_step = 1; - if (geohashGetDistance(longitude,latitude,east.longitude.max,latitude) - < radius_meters) decrease_step = 1; - if (geohashGetDistance(longitude,latitude,west.longitude.min,latitude) - < radius_meters) decrease_step = 1; - } + GeoHashRange long_range; + GeoHashRange lat_range; + GeoHashRadius radius; + GeoHashBits hash; + GeoHashNeighbors neighbors; + GeoHashArea area; + double min_lon; + double max_lon; + double min_lat; + double max_lat; + double bounds[4]; + int steps; + + geohashBoundingBox(longitude, latitude, radius_meters, bounds); + min_lon = bounds[0]; + min_lat = bounds[1]; + max_lon = bounds[2]; + max_lat = bounds[3]; + steps = geohashEstimateStepsByRadius(radius_meters, latitude); + + geohashGetCoordRange(&long_range, &lat_range); + geohashEncode(&long_range, &lat_range, longitude, latitude, steps, &hash); + geohashNeighbors(&hash, &neighbors); + geohashDecode(long_range, lat_range, hash, &area); + /* Check if the step is enough at the limits of the covered area. + * Sometimes when the search area is near an edge of the + * area, the estimated step is not small enough, since one of the + * north / south / west / east square is too near to the search area + * to cover everything. */ + int decrease_step = 0; + { + GeoHashArea north; + GeoHashArea south; + GeoHashArea east; + GeoHashArea west; + + geohashDecode(long_range, lat_range, neighbors.north, &north); + geohashDecode(long_range, lat_range, neighbors.south, &south); + geohashDecode(long_range, lat_range, neighbors.east, &east); + geohashDecode(long_range, lat_range, neighbors.west, &west); - if (steps > 1 && decrease_step) { - steps--; - geohashEncode(&long_range,&lat_range,longitude,latitude,steps,&hash); - geohashNeighbors(&hash,&neighbors); - geohashDecode(long_range,lat_range,hash,&area); + if (north.latitude.max < max_lat) { + decrease_step = 1; + } + if (south.latitude.min > min_lat) { + decrease_step = 1; + } + if (east.longitude.max < max_lon) { + decrease_step = 1; + } + if (west.longitude.min > min_lon) { + decrease_step = 1; } + } + if (steps > 1 && (decrease_step != 0)) { + steps--; + geohashEncode(&long_range, &lat_range, longitude, latitude, steps, &hash); + geohashNeighbors(&hash, &neighbors); + geohashDecode(long_range, lat_range, hash, &area); + } - /* Exclude the search areas that are useless. */ - if (steps >= 2) { - if (area.latitude.min < min_lat) { - GZERO(neighbors.south); - GZERO(neighbors.south_west); - GZERO(neighbors.south_east); - } - if (area.latitude.max > max_lat) { - GZERO(neighbors.north); - GZERO(neighbors.north_east); - GZERO(neighbors.north_west); - } - if (area.longitude.min < min_lon) { - GZERO(neighbors.west); - GZERO(neighbors.south_west); - GZERO(neighbors.north_west); - } - if (area.longitude.max > max_lon) { - GZERO(neighbors.east); - GZERO(neighbors.south_east); - GZERO(neighbors.north_east); - } + /* Exclude the search areas that are useless. */ + if (steps >= 2) { + if (area.latitude.min < min_lat) { + GZERO(neighbors.south); + GZERO(neighbors.south_west); + GZERO(neighbors.south_east); } - radius.hash = hash; - radius.neighbors = neighbors; - radius.area = area; - return radius; + if (area.latitude.max > max_lat) { + GZERO(neighbors.north); + GZERO(neighbors.north_east); + GZERO(neighbors.north_west); + } + if (area.longitude.min < min_lon) { + GZERO(neighbors.west); + GZERO(neighbors.south_west); + GZERO(neighbors.north_west); + } + if (area.longitude.max > max_lon) { + GZERO(neighbors.east); + GZERO(neighbors.south_east); + GZERO(neighbors.north_east); + } + } + radius.hash = hash; + radius.neighbors = neighbors; + radius.area = area; + return radius; } -GeoHashRadius geohashGetAreasByRadiusWGS84(double longitude, double latitude, - double radius_meters) { - return geohashGetAreasByRadius(longitude, latitude, radius_meters); +GeoHashRadius geohashGetAreasByRadiusWGS84(double longitude, double latitude, double radius_meters) { + return geohashGetAreasByRadius(longitude, latitude, radius_meters); } -GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits hash) { - uint64_t bits = hash.bits; - bits <<= (52 - hash.step * 2); - return bits; +GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits& hash) { + uint64_t bits = hash.bits; + bits <<= (52 - hash.step * 2); + return bits; } - -/* Calculate distance using haversin great circle distance formula. */ +/* Calculate distance using simplified haversine great circle distance formula. + * Given longitude diff is 0 the asin(sqrt(a)) on the haversine is asin(sin(abs(u))). + * arcsin(sin(x)) equal to x when x ∈[−𝜋/2,𝜋/2]. Given latitude is between [−𝜋/2,𝜋/2] + * we can simplify arcsin(sin(x)) to x. + */ +double geohashGetLatDistance(double lat1d, double lat2d) { + return EARTH_RADIUS_IN_METERS * fabs(deg_rad(lat2d) - deg_rad(lat1d)); +} +/* Calculate distance using haversine great circle distance formula. */ double geohashGetDistance(double lon1d, double lat1d, double lon2d, double lat2d) { - double lat1r, lon1r, lat2r, lon2r, u, v; - lat1r = deg_rad(lat1d); + double lat1r, lon1r, lat2r, lon2r, u, v, a; lon1r = deg_rad(lon1d); - lat2r = deg_rad(lat2d); lon2r = deg_rad(lon2d); - u = sin((lat2r - lat1r) / 2); v = sin((lon2r - lon1r) / 2); - return 2.0 * EARTH_RADIUS_IN_METERS * - asin(sqrt(u * u + cos(lat1r) * cos(lat2r) * v * v)); + /* if v == 0 we can avoid doing expensive math when lons are practically the same */ + if (v == 0.0) + return geohashGetLatDistance(lat1d, lat2d); + lat1r = deg_rad(lat1d); + lat2r = deg_rad(lat2d); + u = sin((lat2r - lat1r) / 2); + a = u * u + cos(lat1r) * cos(lat2r) * v * v; + return 2.0 * EARTH_RADIUS_IN_METERS * asin(sqrt(a)); } -int geohashGetDistanceIfInRadius(double x1, double y1, - double x2, double y2, double radius, - double *distance) { - *distance = geohashGetDistance(x1, y1, x2, y2); - if (*distance > radius) return 0; - return 1; +int geohashGetDistanceIfInRadius(double x1, double y1, double x2, double y2, double radius, double* distance) { + *distance = geohashGetDistance(x1, y1, x2, y2); + if (*distance > radius) { + return 0; + } + return 1; } -int geohashGetDistanceIfInRadiusWGS84(double x1, double y1, double x2, - double y2, double radius, - double *distance) { - return geohashGetDistanceIfInRadius(x1, y1, x2, y2, radius, distance); +int geohashGetDistanceIfInRadiusWGS84(double x1, double y1, double x2, double y2, double radius, double* distance) { + return geohashGetDistanceIfInRadius(x1, y1, x2, y2, radius, distance); } diff --git a/tools/pika_migrate/src/pika_hash.cc b/tools/pika_migrate/src/pika_hash.cc index 2f0e4fdf7d..d1b7498bcb 100644 --- a/tools/pika_migrate/src/pika_hash.cc +++ b/tools/pika_migrate/src/pika_hash.cc @@ -5,11 +5,13 @@ #include "include/pika_hash.h" -#include "slash/include/slash_string.h" +#include "pstd/include/pstd_string.h" #include "include/pika_conf.h" +#include "include/pika_slot_command.h" +#include "include/pika_cache.h" -extern PikaConf *g_pika_conf; +extern std::unique_ptr g_pika_conf; void HDelCmd::DoInitial() { if (!CheckArg(argv_.size())) { @@ -17,22 +19,32 @@ void HDelCmd::DoInitial() { return; } key_ = argv_[1]; - PikaCmdArgsType::iterator iter = argv_.begin(); - iter++; + auto iter = argv_.begin(); + iter++; iter++; fields_.assign(iter, argv_.end()); - return; } -void HDelCmd::Do(std::shared_ptr partition) { - int32_t num = 0; - rocksdb::Status s = partition->db()->HDel(key_, fields_, &num); - if (s.ok() || s.IsNotFound()) { - res_.AppendInteger(num); +void HDelCmd::Do() { + s_ = db_->storage()->HDel(key_, fields_, &deleted_); + + if (s_.ok() || s_.IsNotFound()) { + res_.AppendInteger(deleted_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void HDelCmd::DoThroughDB() { + Do(); +} + +void HDelCmd::DoUpdateCache() { + if (s_.ok() && deleted_ > 0) { + db_->cache()->HDel(key_, fields_); } - return; } void HSetCmd::DoInitial() { @@ -43,18 +55,33 @@ void HSetCmd::DoInitial() { key_ = argv_[1]; field_ = argv_[2]; value_ = argv_[3]; - return; } -void HSetCmd::Do(std::shared_ptr partition) { +void HSetCmd::Do() { int32_t ret = 0; - rocksdb::Status s = partition->db()->HSet(key_, field_, value_, &ret); - if (s.ok()) { + s_ = db_->storage()->HSet(key_, field_, value_, &ret); + if (s_.ok()) { res_.AppendContent(":" + std::to_string(ret)); + AddSlotKey("h", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void HSetCmd::DoThroughDB() { + Do(); +} + +void HSetCmd::DoUpdateCache() { + // HSetIfKeyExist() can void storing large key, but IsTooLargeKey() can speed up it + if (IsTooLargeKey(g_pika_conf->max_key_size_in_cache())) { + return; + } + if (s_.ok()) { + db_->cache()->HSetIfKeyExist(key_, field_, value_); } - return; } void HGetCmd::DoInitial() { @@ -64,71 +91,125 @@ void HGetCmd::DoInitial() { } key_ = argv_[1]; field_ = argv_[2]; - return; } -void HGetCmd::Do(std::shared_ptr partition) { +void HGetCmd::Do() { + std::string value; + s_ = db_->storage()->HGet(key_, field_, &value); + if (s_.ok()) { + res_.AppendStringLenUint64(value.size()); + res_.AppendContent(value); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else if (s_.IsNotFound()) { + res_.AppendContent("$-1"); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void HGetCmd::ReadCache() { std::string value; - rocksdb::Status s = partition->db()->HGet(key_, field_, &value); + auto s = db_->cache()->HGet(key_, field_, &value); if (s.ok()) { res_.AppendStringLen(value.size()); res_.AppendContent(value); } else if (s.IsNotFound()) { - res_.AppendContent("$-1"); + res_.SetRes(CmdRes::kCacheMiss); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } } +void HGetCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void HGetCmd::DoUpdateCache() { + if (IsTooLargeKey(g_pika_conf->max_key_size_in_cache())) { + return; + } + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_HASH, key_, db_); + } +} + void HGetallCmd::DoInitial() { if (!CheckArg(argv_.size())) { res_.SetRes(CmdRes::kWrongNum, kCmdNameHGetall); return; } key_ = argv_[1]; - return; } -void HGetallCmd::Do(std::shared_ptr partition) { +void HGetallCmd::Do() { int64_t total_fv = 0; - int64_t cursor = 0, next_cursor = 0; + int64_t cursor = 0; + int64_t next_cursor = 0; size_t raw_limit = g_pika_conf->max_client_response_size(); std::string raw; - rocksdb::Status s; - std::vector fvs; + std::vector fvs; do { fvs.clear(); - s = partition->db()->HScan(key_, cursor, "*", PIKA_SCAN_STEP_LENGTH, &fvs, &next_cursor); - if (!s.ok()) { + s_ = db_->storage()->HScan(key_, cursor, "*", PIKA_SCAN_STEP_LENGTH, &fvs, &next_cursor); + if (!s_.ok()) { raw.clear(); total_fv = 0; break; } else { for (const auto& fv : fvs) { - RedisAppendLen(raw, fv.field.size(), "$"); + RedisAppendLenUint64(raw, fv.field.size(), "$"); RedisAppendContent(raw, fv.field); - RedisAppendLen(raw, fv.value.size(), "$"); + RedisAppendLenUint64(raw, fv.value.size(), "$"); RedisAppendContent(raw, fv.value); } if (raw.size() >= raw_limit) { res_.SetRes(CmdRes::kErrOther, "Response exceeds the max-client-response-size limit"); return; } - total_fv += fvs.size(); + total_fv += static_cast(fvs.size()); cursor = next_cursor; } } while (cursor != 0); - if (s.ok() || s.IsNotFound()) { + if (s_.ok() || s_.IsNotFound()) { res_.AppendArrayLen(total_fv * 2); res_.AppendStringRaw(raw); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void HGetallCmd::ReadCache() { + std::vector fvs; + auto s = db_->cache()->HGetall(key_, &fvs); + if (s.ok()) { + res_.AppendArrayLen(fvs.size() * 2); + for (const auto& fv : fvs) { + res_.AppendStringLen(fv.field.size()); + res_.AppendContent(fv.field); + res_.AppendStringLen(fv.value.size()); + res_.AppendContent(fv.value); + } + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } - return; } +void HGetallCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void HGetallCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_HASH, key_, db_); + } +} void HExistsCmd::DoInitial() { if (!CheckArg(argv_.size())) { @@ -137,20 +218,43 @@ void HExistsCmd::DoInitial() { } key_ = argv_[1]; field_ = argv_[2]; - return; } -void HExistsCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->HExists(key_, field_); +void HExistsCmd::Do() { + s_ = db_->storage()->HExists(key_, field_); + if (s_.ok()) { + res_.AppendContent(":1"); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else if (s_.IsNotFound()) { + res_.AppendContent(":0"); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void HExistsCmd::ReadCache() { + auto s = db_->cache()->HExists(key_, field_); if (s.ok()) { res_.AppendContent(":1"); } else if (s.IsNotFound()) { - res_.AppendContent(":0"); + res_.SetRes(CmdRes::kCacheMiss); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } } +void HExistsCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void HExistsCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_HASH, key_, db_); + } +} + void HIncrbyCmd::DoInitial() { if (!CheckArg(argv_.size())) { res_.SetRes(CmdRes::kWrongNum, kCmdNameHIncrby); @@ -158,26 +262,37 @@ void HIncrbyCmd::DoInitial() { } key_ = argv_[1]; field_ = argv_[2]; - if (argv_[3].find(" ") != std::string::npos || !slash::string2l(argv_[3].data(), argv_[3].size(), &by_)) { + if (argv_[3].find(' ') != std::string::npos || (pstd::string2int(argv_[3].data(), argv_[3].size(), &by_) == 0)) { res_.SetRes(CmdRes::kInvalidInt); return; } - return; } -void HIncrbyCmd::Do(std::shared_ptr partition) { - int64_t new_value; - rocksdb::Status s = partition->db()->HIncrby(key_, field_, by_, &new_value); - if (s.ok() || s.IsNotFound()) { +void HIncrbyCmd::Do() { + int64_t new_value = 0; + s_ = db_->storage()->HIncrby(key_, field_, by_, &new_value); + if (s_.ok() || s_.IsNotFound()) { res_.AppendContent(":" + std::to_string(new_value)); - } else if (s.IsCorruption() && s.ToString() == "Corruption: hash value is not an integer") { + AddSlotKey("h", key_, db_); + } else if (s_.IsInvalidArgument() && s_.ToString().substr(0, std::char_traits::length(ErrTypeMessage)) == ErrTypeMessage) { + res_.SetRes(CmdRes::kMultiKey); + } else if (s_.IsCorruption() && s_.ToString() == "Corruption: hash value is not an integer") { res_.SetRes(CmdRes::kInvalidInt); - } else if (s.IsInvalidArgument()) { + } else if (s_.IsInvalidArgument()) { res_.SetRes(CmdRes::kOverFlow); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void HIncrbyCmd::DoThroughDB() { + Do(); +} + +void HIncrbyCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->HIncrbyxx(key_, field_, by_); } - return; } void HIncrbyfloatCmd::DoInitial() { @@ -188,23 +303,37 @@ void HIncrbyfloatCmd::DoInitial() { key_ = argv_[1]; field_ = argv_[2]; by_ = argv_[3]; - return; } -void HIncrbyfloatCmd::Do(std::shared_ptr partition) { +void HIncrbyfloatCmd::Do() { std::string new_value; - rocksdb::Status s = partition->db()->HIncrbyfloat(key_, field_, by_, &new_value); - if (s.ok()) { - res_.AppendStringLen(new_value.size()); + s_ = db_->storage()->HIncrbyfloat(key_, field_, by_, &new_value); + if (s_.ok()) { + res_.AppendStringLenUint64(new_value.size()); res_.AppendContent(new_value); - } else if (s.IsCorruption() && s.ToString() == "Corruption: value is not a vaild float") { + AddSlotKey("h", key_, db_); + } else if (s_.IsInvalidArgument() && s_.ToString().substr(0, std::char_traits::length(ErrTypeMessage)) == ErrTypeMessage) { + res_.SetRes(CmdRes::kMultiKey); + } else if (s_.IsCorruption() && s_.ToString() == "Corruption: value is not a vaild float") { res_.SetRes(CmdRes::kInvalidFloat); - } else if (s.IsInvalidArgument()) { + } else if (s_.IsInvalidArgument()) { res_.SetRes(CmdRes::kOverFlow); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void HIncrbyfloatCmd::DoThroughDB() { + Do(); +} + +void HIncrbyfloatCmd::DoUpdateCache() { + if (s_.ok()) { + long double long_double_by; + if (storage::StrToLongDouble(by_.data(), by_.size(), &long_double_by) != -1) { + db_->cache()->HIncrbyfloatxx(key_, field_, long_double_by); + } } - return; } void HKeysCmd::DoInitial() { @@ -213,21 +342,47 @@ void HKeysCmd::DoInitial() { return; } key_ = argv_[1]; - return; } -void HKeysCmd::Do(std::shared_ptr partition) { +void HKeysCmd::Do() { std::vector fields; - rocksdb::Status s = partition->db()->HKeys(key_, &fields); - if (s.ok() || s.IsNotFound()) { + s_ = db_->storage()->HKeys(key_, &fields); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendArrayLenUint64(fields.size()); + for (const auto& field : fields) { + res_.AppendString(field); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void HKeysCmd::ReadCache() { + std::vector fields; + auto s = db_->cache()->HKeys(key_, &fields); + if (s.ok()) { res_.AppendArrayLen(fields.size()); for (const auto& field : fields) { res_.AppendString(field); } + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } - return; +} + +void HKeysCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void HKeysCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_HASH, key_, db_); + } } void HLenCmd::DoInitial() { @@ -236,18 +391,41 @@ void HLenCmd::DoInitial() { return; } key_ = argv_[1]; - return; } -void HLenCmd::Do(std::shared_ptr partition) { +void HLenCmd::Do() { int32_t len = 0; - rocksdb::Status s = partition->db()->HLen(key_, &len); - if (s.ok() || s.IsNotFound()) { + s_ = db_->storage()->HLen(key_, &len); + if (s_.ok() || s_.IsNotFound()) { res_.AppendInteger(len); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { res_.SetRes(CmdRes::kErrOther, "something wrong in hlen"); } - return; +} + +void HLenCmd::ReadCache() { + uint64_t len = 0; + auto s = db_->cache()->HLen(key_, &len); + if (s.ok()) { + res_.AppendInteger(len); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, "something wrong in hlen"); + } +} + +void HLenCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void HLenCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_HASH, key_, db_); + } } void HMgetCmd::DoInitial() { @@ -256,17 +434,36 @@ void HMgetCmd::DoInitial() { return; } key_ = argv_[1]; - PikaCmdArgsType::iterator iter = argv_.begin(); + auto iter = argv_.begin(); iter++; iter++; - fields_.assign(iter, argv_.end()); - return; + fields_.assign(iter, argv_.end()); } -void HMgetCmd::Do(std::shared_ptr partition) { - std::vector vss; - rocksdb::Status s = partition->db()->HMGet(key_, fields_, &vss); - if (s.ok() || s.IsNotFound()) { +void HMgetCmd::Do() { + std::vector vss; + s_ = db_->storage()->HMGet(key_, fields_, &vss); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendArrayLenUint64(vss.size()); + for (const auto& vs : vss) { + if (vs.status.ok()) { + res_.AppendStringLenUint64(vs.value.size()); + res_.AppendContent(vs.value); + } else { + res_.AppendContent("$-1"); + } + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void HMgetCmd::ReadCache() { + std::vector vss; + auto s = db_->cache()->HMGet(key_, fields_, &vss); + if (s.ok()) { res_.AppendArrayLen(vss.size()); for (const auto& vs : vss) { if (vs.status.ok()) { @@ -276,10 +473,22 @@ void HMgetCmd::Do(std::shared_ptr partition) { res_.AppendContent("$-1"); } } + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } - return; +} + +void HMgetCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void HMgetCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_HASH, key_, db_); + } } void HMsetCmd::DoInitial() { @@ -298,17 +507,28 @@ void HMsetCmd::DoInitial() { for (; index < argc; index += 2) { fvs_.push_back({argv_[index], argv_[index + 1]}); } - return; } -void HMsetCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->HMSet(key_, fvs_); - if (s.ok()) { +void HMsetCmd::Do() { + s_ = db_->storage()->HMSet(key_, fvs_); + if (s_.ok()) { res_.SetRes(CmdRes::kOk); + AddSlotKey("h", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void HMsetCmd::DoThroughDB() { + Do(); +} + +void HMsetCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->HMSetxx(key_, fvs_); } - return; } void HSetnxCmd::DoInitial() { @@ -319,16 +539,28 @@ void HSetnxCmd::DoInitial() { key_ = argv_[1]; field_ = argv_[2]; value_ = argv_[3]; - return; } -void HSetnxCmd::Do(std::shared_ptr partition) { +void HSetnxCmd::Do() { int32_t ret = 0; - rocksdb::Status s = partition->db()->HSetnx(key_, field_, value_, &ret); - if (s.ok()) { + s_ = db_->storage()->HSetnx(key_, field_, value_, &ret); + if (s_.ok()) { res_.AppendContent(":" + std::to_string(ret)); + AddSlotKey("h", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void HSetnxCmd::DoThroughDB() { + Do(); +} + +void HSetnxCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->HSetIfKeyExistAndFieldNotExist(key_, field_, value_); } } @@ -339,42 +571,93 @@ void HStrlenCmd::DoInitial() { } key_ = argv_[1]; field_ = argv_[2]; - return; } -void HStrlenCmd::Do(std::shared_ptr partition) { +void HStrlenCmd::Do() { int32_t len = 0; - rocksdb::Status s = partition->db()->HStrlen(key_, field_, &len); - if (s.ok() || s.IsNotFound()) { + s_ = db_->storage()->HStrlen(key_, field_, &len); + if (s_.ok() || s_.IsNotFound()) { res_.AppendInteger(len); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, "something wrong in hstrlen"); + } +} + +void HStrlenCmd::ReadCache() { + uint64_t len = 0; + auto s = db_->cache()->HStrlen(key_, field_, &len); + if (s.ok()) { + res_.AppendInteger(len); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); } else { res_.SetRes(CmdRes::kErrOther, "something wrong in hstrlen"); } return; } +void HStrlenCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void HStrlenCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_HASH, key_, db_); + } +} + void HValsCmd::DoInitial() { if (!CheckArg(argv_.size())) { res_.SetRes(CmdRes::kWrongNum, kCmdNameHVals); return; } key_ = argv_[1]; - return; } -void HValsCmd::Do(std::shared_ptr partition) { +void HValsCmd::Do() { std::vector values; - rocksdb::Status s = partition->db()->HVals(key_, &values); - if (s.ok() || s.IsNotFound()) { + s_ = db_->storage()->HVals(key_, &values); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendArrayLenUint64(values.size()); + for (const auto& value : values) { + res_.AppendStringLenUint64(value.size()); + res_.AppendContent(value); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void HValsCmd::ReadCache() { + std::vector values; + auto s = db_->cache()->HVals(key_, &values); + if (s.ok()) { res_.AppendArrayLen(values.size()); for (const auto& value : values) { res_.AppendStringLen(value.size()); res_.AppendContent(value); } + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } - return; +} + +void HValsCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void HValsCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_HASH, key_, db_); + } } void HScanCmd::DoInitial() { @@ -383,24 +666,24 @@ void HScanCmd::DoInitial() { return; } key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &cursor_)) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &cursor_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } - size_t index = 3, argc = argv_.size(); + size_t index = 3; + size_t argc = argv_.size(); while (index < argc) { std::string opt = argv_[index]; - if (!strcasecmp(opt.data(), "match") - || !strcasecmp(opt.data(), "count")) { + if ((strcasecmp(opt.data(), "match") == 0) || (strcasecmp(opt.data(), "count") == 0)) { index++; if (index >= argc) { res_.SetRes(CmdRes::kSyntaxErr); return; } - if (!strcasecmp(opt.data(), "match")) { + if (strcasecmp(opt.data(), "match") == 0) { pattern_ = argv_[index]; - } else if (!slash::string2l(argv_[index].data(), argv_[index].size(), &count_)) { + } else if (pstd::string2int(argv_[index].data(), argv_[index].size(), &count_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } @@ -414,30 +697,30 @@ void HScanCmd::DoInitial() { res_.SetRes(CmdRes::kSyntaxErr); return; } - return; } -void HScanCmd::Do(std::shared_ptr partition) { +void HScanCmd::Do() { int64_t next_cursor = 0; - std::vector field_values; - rocksdb::Status s = partition->db()->HScan(key_, cursor_, pattern_, count_, &field_values, &next_cursor); + std::vector field_values; + auto s = db_->storage()->HScan(key_, cursor_, pattern_, count_, &field_values, &next_cursor); if (s.ok() || s.IsNotFound()) { res_.AppendContent("*2"); char buf[32]; - int32_t len = slash::ll2string(buf, sizeof(buf), next_cursor); + int32_t len = pstd::ll2string(buf, sizeof(buf), next_cursor); res_.AppendStringLen(len); res_.AppendContent(buf); - res_.AppendArrayLen(field_values.size()*2); + res_.AppendArrayLenUint64(field_values.size() * 2); for (const auto& field_value : field_values) { res_.AppendString(field_value.field); res_.AppendString(field_value.value); } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } - return; } void HScanxCmd::DoInitial() { @@ -448,19 +731,19 @@ void HScanxCmd::DoInitial() { key_ = argv_[1]; start_field_ = argv_[2]; - size_t index = 3, argc = argv_.size(); + size_t index = 3; + size_t argc = argv_.size(); while (index < argc) { std::string opt = argv_[index]; - if (!strcasecmp(opt.data(), "match") - || !strcasecmp(opt.data(), "count")) { + if ((strcasecmp(opt.data(), "match") == 0) || (strcasecmp(opt.data(), "count") == 0)) { index++; if (index >= argc) { res_.SetRes(CmdRes::kSyntaxErr); return; } - if (!strcasecmp(opt.data(), "match")) { + if (strcasecmp(opt.data(), "match") == 0) { pattern_ = argv_[index]; - } else if (!slash::string2l(argv_[index].data(), argv_[index].size(), &count_)) { + } else if (pstd::string2int(argv_[index].data(), argv_[index].size(), &count_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } @@ -474,28 +757,28 @@ void HScanxCmd::DoInitial() { res_.SetRes(CmdRes::kSyntaxErr); return; } - return; } -void HScanxCmd::Do(std::shared_ptr partition) { +void HScanxCmd::Do() { std::string next_field; - std::vector field_values; - rocksdb::Status s = partition->db()->HScanx(key_, start_field_, pattern_, count_, &field_values, &next_field); + std::vector field_values; + rocksdb::Status s = db_->storage()->HScanx(key_, start_field_, pattern_, count_, &field_values, &next_field); if (s.ok() || s.IsNotFound()) { res_.AppendArrayLen(2); - res_.AppendStringLen(next_field.size()); + res_.AppendStringLenUint64(next_field.size()); res_.AppendContent(next_field); - res_.AppendArrayLen(2 * field_values.size()); + res_.AppendArrayLenUint64(2 * field_values.size()); for (const auto& field_value : field_values) { res_.AppendString(field_value.field); res_.AppendString(field_value.value); } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); } - return; } void PKHScanRangeCmd::DoInitial() { @@ -507,19 +790,19 @@ void PKHScanRangeCmd::DoInitial() { field_start_ = argv_[2]; field_end_ = argv_[3]; - size_t index = 4, argc = argv_.size(); + size_t index = 4; + size_t argc = argv_.size(); while (index < argc) { std::string opt = argv_[index]; - if (!strcasecmp(opt.data(), "match") - || !strcasecmp(opt.data(), "limit")) { + if ((strcasecmp(opt.data(), "match") == 0) || (strcasecmp(opt.data(), "limit") == 0)) { index++; if (index >= argc) { res_.SetRes(CmdRes::kSyntaxErr); return; } - if (!strcasecmp(opt.data(), "match")) { + if (strcasecmp(opt.data(), "match") == 0) { pattern_ = argv_[index]; - } else if (!slash::string2l(argv_[index].data(), argv_[index].size(), &limit_) || limit_ <= 0) { + } else if ((pstd::string2int(argv_[index].data(), argv_[index].size(), &limit_) == 0) || limit_ <= 0) { res_.SetRes(CmdRes::kInvalidInt); return; } @@ -529,28 +812,28 @@ void PKHScanRangeCmd::DoInitial() { } index++; } - return; } -void PKHScanRangeCmd::Do(std::shared_ptr partition) { +void PKHScanRangeCmd::Do() { std::string next_field; - std::vector field_values; - rocksdb::Status s = partition->db()->PKHScanRange(key_, field_start_, field_end_, - pattern_, limit_, &field_values, &next_field); + std::vector field_values; + rocksdb::Status s = + db_->storage()->PKHScanRange(key_, field_start_, field_end_, pattern_, static_cast(limit_), &field_values, &next_field); if (s.ok() || s.IsNotFound()) { res_.AppendArrayLen(2); res_.AppendString(next_field); - res_.AppendArrayLen(2 * field_values.size()); + res_.AppendArrayLenUint64(2 * field_values.size()); for (const auto& field_value : field_values) { res_.AppendString(field_value.field); res_.AppendString(field_value.value); } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } - return; } void PKHRScanRangeCmd::DoInitial() { @@ -562,19 +845,19 @@ void PKHRScanRangeCmd::DoInitial() { field_start_ = argv_[2]; field_end_ = argv_[3]; - size_t index = 4, argc = argv_.size(); + size_t index = 4; + size_t argc = argv_.size(); while (index < argc) { std::string opt = argv_[index]; - if (!strcasecmp(opt.data(), "match") - || !strcasecmp(opt.data(), "limit")) { + if ((strcasecmp(opt.data(), "match") == 0) || (strcasecmp(opt.data(), "limit") == 0)) { index++; if (index >= argc) { res_.SetRes(CmdRes::kSyntaxErr); return; } - if (!strcasecmp(opt.data(), "match")) { + if (strcasecmp(opt.data(), "match") == 0) { pattern_ = argv_[index]; - } else if (!slash::string2l(argv_[index].data(), argv_[index].size(), &limit_) || limit_ <= 0) { + } else if ((pstd::string2int(argv_[index].data(), argv_[index].size(), &limit_) == 0) || limit_ <= 0) { res_.SetRes(CmdRes::kInvalidInt); return; } @@ -584,26 +867,26 @@ void PKHRScanRangeCmd::DoInitial() { } index++; } - return; } -void PKHRScanRangeCmd::Do(std::shared_ptr partition) { +void PKHRScanRangeCmd::Do() { std::string next_field; - std::vector field_values; - rocksdb::Status s = partition->db()->PKHRScanRange(key_, field_start_, field_end_, - pattern_, limit_, &field_values, &next_field); + std::vector field_values; + rocksdb::Status s = + db_->storage()->PKHRScanRange(key_, field_start_, field_end_, pattern_, static_cast(limit_), &field_values, &next_field); - if (s.ok() || s.IsNotFound()) { + if (s_.ok() || s_.IsNotFound()) { res_.AppendArrayLen(2); res_.AppendString(next_field); - res_.AppendArrayLen(2 * field_values.size()); + res_.AppendArrayLenUint64(2 * field_values.size()); for (const auto& field_value : field_values) { res_.AppendString(field_value.field); res_.AppendString(field_value.value); } - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); } - return; } diff --git a/tools/pika_migrate/src/pika_hyperloglog.cc b/tools/pika_migrate/src/pika_hyperloglog.cc index e36cff7d81..5b333934cc 100644 --- a/tools/pika_migrate/src/pika_hyperloglog.cc +++ b/tools/pika_migrate/src/pika_hyperloglog.cc @@ -19,13 +19,15 @@ void PfAddCmd::DoInitial() { } } -void PfAddCmd::Do(std::shared_ptr partition) { +void PfAddCmd::Do() { bool update = false; - rocksdb::Status s = partition->db()->PfAdd(key_, values_, &update); + rocksdb::Status s = db_->storage()->PfAdd(key_, values_, &update); if (s.ok() && update) { res_.AppendInteger(1); } else if (s.ok() && !update) { res_.AppendInteger(0); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } @@ -42,11 +44,13 @@ void PfCountCmd::DoInitial() { } } -void PfCountCmd::Do(std::shared_ptr partition) { +void PfCountCmd::Do() { int64_t value_ = 0; - rocksdb::Status s = partition->db()->PfCount(keys_, &value_); + rocksdb::Status s = db_->storage()->PfCount(keys_, &value_); if (s.ok()) { res_.AppendInteger(value_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } @@ -63,11 +67,25 @@ void PfMergeCmd::DoInitial() { } } -void PfMergeCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->PfMerge(keys_); +void PfMergeCmd::Do() { + rocksdb::Status s = db_->storage()->PfMerge(keys_, value_to_dest_); if (s.ok()) { res_.SetRes(CmdRes::kOk); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } } +void PfMergeCmd::DoBinlog() { + PikaCmdArgsType set_args; + //used "set" instead of "SET" to distinguish the binlog of SetCmd + set_args.emplace_back("set"); + set_args.emplace_back(keys_[0]); + set_args.emplace_back(value_to_dest_); + set_cmd_->Initial(set_args, db_name_); + set_cmd_->SetConn(GetConn()); + set_cmd_->SetResp(resp_.lock()); + //value of this binlog might be strange, it's an string with size of 128KB + set_cmd_->DoBinlog(); +} diff --git a/tools/pika_migrate/src/pika_inner_message.proto b/tools/pika_migrate/src/pika_inner_message.proto index 713c05077a..9e2a3ef04c 100644 --- a/tools/pika_migrate/src/pika_inner_message.proto +++ b/tools/pika_migrate/src/pika_inner_message.proto @@ -1,3 +1,4 @@ +syntax = "proto2"; package InnerMessage; enum Type { @@ -12,11 +13,15 @@ enum Type { enum StatusCode { kOk = 1; kError = 2; + kOther = 3; } message BinlogOffset { required uint32 filenum = 1; required uint64 offset = 2; + // consensus use + optional uint32 term = 3; + optional uint64 index = 4; } message Node { @@ -24,19 +29,29 @@ message Node { required int32 port = 2; } -message Partition { - required string table_name = 1; - required uint32 partition_id = 2; +message Slot { + required string db_name = 1; + required uint32 slot_id = 2; } -message TableInfo { - required string table_name = 1; - required uint32 partition_num = 2; - repeated uint32 partition_ids = 3; +message DBInfo { + required string db_name = 1; + required uint32 slot_num = 2; + repeated uint32 slot_ids = 3; } message PikaMeta { - repeated TableInfo table_infos = 1; + repeated DBInfo db_infos = 1; +} + +message ConsensusMeta { + optional uint32 term = 1; + // Leader -> Follower prev_log_offset + // Follower -> Leader last_log_offset + optional BinlogOffset log_offset = 2; + optional BinlogOffset commit = 3; + optional bool reject = 4; + repeated BinlogOffset hint = 5; } // Request message @@ -50,21 +65,21 @@ message InnerRequest { // slave to master message TrySync { required Node node = 1; - required Partition partition = 2; + required Slot slot = 2; required BinlogOffset binlog_offset = 3; } // slave to master message DBSync { required Node node = 1; - required Partition partition = 2; + required Slot slot = 2; required BinlogOffset binlog_offset = 3; } message BinlogSync { required Node node = 1; - required string table_name = 2; - required uint32 partition_id = 3; + required string db_name = 2; + required uint32 slot_id = 3; required BinlogOffset ack_range_start = 4; required BinlogOffset ack_range_end = 5; required int32 session_id = 6; @@ -73,7 +88,7 @@ message InnerRequest { message RemoveSlaveNode { required Node node = 1; - required Partition partition = 2; + required Slot slot = 2; } required Type type = 1; @@ -82,10 +97,11 @@ message InnerRequest { optional DBSync db_sync = 4; optional BinlogSync binlog_sync = 5; repeated RemoveSlaveNode remove_slave_node = 6; + optional ConsensusMeta consensus_meta = 7; } -message PartitionInfo { - required uint32 partition_id = 1; +message SlotInfo { + required uint32 slot_id = 1; required Node master = 2; repeated Node slaves = 3; } @@ -94,12 +110,15 @@ message PartitionInfo { message InnerResponse { // master to slave message MetaSync { - message TableInfo { - required string table_name = 1; - required int32 partition_num = 2; + message DBInfo { + required string db_name = 1; + required int32 slot_num = 2; + required int32 db_instance_num = 3; } required bool classic_mode = 1; - repeated TableInfo tables_info = 2; + repeated DBInfo dbs_info = 2; + required string run_id = 3; + optional string replication_id = 4; } // master to slave @@ -111,27 +130,27 @@ message InnerResponse { kError = 4; } required ReplyCode reply_code = 1; - required Partition partition = 2; + required Slot slot = 2; optional BinlogOffset binlog_offset = 3; optional int32 session_id = 4; } message DBSync { - required Partition partition = 1; + required Slot slot = 1; required int32 session_id = 2; } // master to slave message BinlogSync { - required Partition partition = 1; + required Slot slot = 1; required BinlogOffset binlog_offset = 2; required bytes binlog = 3; required int32 session_id = 4; } message RemoveSlaveNode { - required Node node = 1; - required Partition partition = 2; + required Node node = 1; + required Slot slot = 2; } required Type type = 1; @@ -142,4 +161,6 @@ message InnerResponse { optional TrySync try_sync = 6; repeated BinlogSync binlog_sync = 7; repeated RemoveSlaveNode remove_slave_node = 8; + // consensus use + optional ConsensusMeta consensus_meta = 9; } diff --git a/tools/pika_migrate/src/pika_instant.cc b/tools/pika_migrate/src/pika_instant.cc new file mode 100644 index 0000000000..b2e33287fb --- /dev/null +++ b/tools/pika_migrate/src/pika_instant.cc @@ -0,0 +1,40 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include "../include/pika_instant.h" + +/* Return the mean of all the samples. */ +double Instant::getInstantaneousMetric(std::string metric) { + size_t j; + size_t sum = 0; + + for (j = 0; j < STATS_METRIC_SAMPLES; j++) + sum += inst_metrics_[metric].samples[j]; + + return sum / STATS_METRIC_SAMPLES; +} + +/* ======================= Cron: called every 5 s ======================== */ + +/* Add a sample to the instantaneous metric. This function computes the quotient + * of the increment of value and base, which is useful to record operation count + * per second, or the average time consumption of an operation. + * + * current_value - The dividend + * current_base - The divisor + * */ +void Instant::trackInstantaneousMetric(std::string metric, size_t current_value, size_t current_base, size_t factor) { + if (inst_metrics_[metric].last_sample_base > 0) { + size_t base = current_base - inst_metrics_[metric].last_sample_base; + size_t value = current_value - inst_metrics_[metric].last_sample_value; + size_t avg = base > 0 ? (value * factor / base) : 0; + inst_metrics_[metric].samples[inst_metrics_[metric].idx] = avg; + inst_metrics_[metric].idx++; + inst_metrics_[metric].idx %= STATS_METRIC_SAMPLES; + } + inst_metrics_[metric].last_sample_base = current_base; + inst_metrics_[metric].last_sample_value = current_value; +} \ No newline at end of file diff --git a/tools/pika_migrate/src/pika_kv.cc b/tools/pika_migrate/src/pika_kv.cc index 732878b05b..4c9c459184 100644 --- a/tools/pika_migrate/src/pika_kv.cc +++ b/tools/pika_migrate/src/pika_kv.cc @@ -4,14 +4,15 @@ // of patent rights can be found in the PATENTS file in the same directory. #include "include/pika_kv.h" +#include -#include "slash/include/slash_string.h" - +#include "include/pika_command.h" +#include "include/pika_slot_command.h" +#include "include/pika_cache.h" #include "include/pika_conf.h" -#include "include/pika_binlog_transverter.h" - -extern PikaConf *g_pika_conf; +#include "pstd/include/pstd_string.h" +extern std::unique_ptr g_pika_conf; /* SET key value [NX] [XX] [EX ] [PX ] */ void SetCmd::DoInitial() { if (!CheckArg(argv_.size())) { @@ -21,15 +22,15 @@ void SetCmd::DoInitial() { key_ = argv_[1]; value_ = argv_[2]; condition_ = SetCmd::kNONE; - sec_ = 0; + ttl_millsec = 0; size_t index = 3; while (index != argv_.size()) { std::string opt = argv_[index]; - if (!strcasecmp(opt.data(), "xx")) { + if (strcasecmp(opt.data(), "xx") == 0) { condition_ = SetCmd::kXX; - } else if (!strcasecmp(opt.data(), "nx")) { + } else if (strcasecmp(opt.data(), "nx") == 0) { condition_ = SetCmd::kNX; - } else if (!strcasecmp(opt.data(), "vx")) { + } else if (strcasecmp(opt.data(), "vx") == 0) { condition_ = SetCmd::kVX; index++; if (index == argv_.size()) { @@ -38,75 +39,86 @@ void SetCmd::DoInitial() { } else { target_ = argv_[index]; } - } else if (!strcasecmp(opt.data(), "ex") || !strcasecmp(opt.data(), "px")) { + } else if ((strcasecmp(opt.data(), "ex") == 0) || (strcasecmp(opt.data(), "px") == 0)) { condition_ = (condition_ == SetCmd::kNONE) ? SetCmd::kEXORPX : condition_; index++; if (index == argv_.size()) { res_.SetRes(CmdRes::kSyntaxErr); return; } - if (!slash::string2l(argv_[index].data(), argv_[index].size(), &sec_)) { + if (pstd::string2int(argv_[index].data(), argv_[index].size(), &ttl_millsec) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; - } else if (sec_ <= 0) { - res_.SetRes(CmdRes::kErrOther, "invalid expire time in set"); - return; } - if (!strcasecmp(opt.data(), "px")) { - sec_ /= 1000; + if (strcasecmp(opt.data(), "ex") == 0) { + ttl_millsec *= 1000; } + has_ttl_ = true; } else { res_.SetRes(CmdRes::kSyntaxErr); return; } index++; } - return; } -void SetCmd::Do(std::shared_ptr partition) { - rocksdb::Status s; +void SetCmd::Do() { int32_t res = 1; switch (condition_) { case SetCmd::kXX: - s = partition->db()->Setxx(key_, value_, &res, sec_); + s_ = db_->storage()->Setxx(key_, value_, &res, ttl_millsec); break; case SetCmd::kNX: - s = partition->db()->Setnx(key_, value_, &res, sec_); + s_ = db_->storage()->Setnx(key_, value_, &res, ttl_millsec); break; case SetCmd::kVX: - s = partition->db()->Setvx(key_, target_, value_, &success_, sec_); + s_ = db_->storage()->Setvx(key_, target_, value_, &success_, ttl_millsec); break; case SetCmd::kEXORPX: - s = partition->db()->Setex(key_, value_, sec_); + s_ = db_->storage()->Setex(key_, value_, ttl_millsec); break; default: - s = partition->db()->Set(key_, value_); + s_ = db_->storage()->Set(key_, value_); break; } - if (s.ok() || s.IsNotFound()) { + if (s_.ok() || s_.IsNotFound()) { if (condition_ == SetCmd::kVX) { res_.AppendInteger(success_); } else { if (res == 1) { res_.SetRes(CmdRes::kOk); + AddSlotKey("k", key_, db_); } else { - res_.AppendArrayLen(-1);; + res_.AppendStringLen(-1); } } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void SetCmd::DoThroughDB() { + Do(); +} + +void SetCmd::DoUpdateCache() { + if (SetCmd::kNX == condition_ || IsTooLargeKey(g_pika_conf->max_key_size_in_cache())) { + return; + } + if (s_.ok()) { + if (has_ttl_) { + db_->cache()->Setxx(key_, value_, ttl_millsec / 1000); + } else { + db_->cache()->SetxxWithoutTTL(key_, value_); + } } } -std::string SetCmd::ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) { +std::string SetCmd::ToRedisProtocol() { if (condition_ == SetCmd::kEXORPX) { std::string content; content.reserve(RAW_ARGS_LEN); @@ -114,31 +126,26 @@ std::string SetCmd::ToBinlog( // to pksetexat cmd std::string pksetexat_cmd("pksetexat"); - RedisAppendLen(content, pksetexat_cmd.size(), "$"); + RedisAppendLenUint64(content, pksetexat_cmd.size(), "$"); RedisAppendContent(content, pksetexat_cmd); // key - RedisAppendLen(content, key_.size(), "$"); + RedisAppendLenUint64(content, key_.size(), "$"); RedisAppendContent(content, key_); // time_stamp char buf[100]; - int32_t time_stamp = time(nullptr) + sec_; - slash::ll2string(buf, 100, time_stamp); + + // TODO 精度损失 + auto time_stamp = time(nullptr) + ttl_millsec / 1000; + pstd::ll2string(buf, 100, time_stamp); std::string at(buf); - RedisAppendLen(content, at.size(), "$"); + RedisAppendLenUint64(content, at.size(), "$"); RedisAppendContent(content, at); // value - RedisAppendLen(content, value_.size(), "$"); + RedisAppendLenUint64(content, value_.size(), "$"); RedisAppendContent(content, value_); - return PikaBinlogTransverter::BinlogEncode(BinlogType::TypeFirst, - exec_time, - std::stoi(server_id), - logic_id, - filenum, - offset, - content, - {}); + return content; } else { - return Cmd::ToBinlog(exec_time, server_id, logic_id, filenum, offset); + return Cmd::ToRedisProtocol(); } } @@ -148,41 +155,100 @@ void GetCmd::DoInitial() { return; } key_ = argv_[1]; - return; } -void GetCmd::Do(std::shared_ptr partition) { - std::string value; - rocksdb::Status s = partition->db()->Get(key_, &value); - if (s.ok()) { - res_.AppendStringLen(value.size()); - res_.AppendContent(value); - } else if (s.IsNotFound()) { +void GetCmd::Do() { + s_ = db_->storage()->GetWithTTL(key_, &value_, &ttl_millsec_); + if (s_.ok()) { + res_.AppendStringLenUint64(value_.size()); + res_.AppendContent(value_); + } else if (s_.IsNotFound()) { res_.AppendStringLen(-1); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void GetCmd::ReadCache() { + auto s = db_->cache()->Get(key_, &value_); + if (s.ok()) { + res_.AppendStringLen(value_.size()); + res_.AppendContent(value_); + } else { + res_.SetRes(CmdRes::kCacheMiss); + } +} + +void GetCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void GetCmd::DoUpdateCache() { + if (IsTooLargeKey(g_pika_conf->max_key_size_in_cache())) { + return; + } + if (s_.ok()) { + db_->cache()->WriteKVToCache(key_, value_, ttl_millsec_ / 1000); } } void DelCmd::DoInitial() { if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameDel); + res_.SetRes(CmdRes::kWrongNum, name()); return; } - std::vector::iterator iter = argv_.begin(); + auto iter = argv_.begin(); keys_.assign(++iter, argv_.end()); - return; } -void DelCmd::Do(std::shared_ptr partition) { - std::map type_status; - int64_t count = partition->db()->Del(keys_, &type_status); +void DelCmd::Do() { + int64_t count = db_->storage()->Del(keys_); if (count >= 0) { res_.AppendInteger(count); + s_ = rocksdb::Status::OK(); + std::vector::const_iterator it; + for (it = keys_.begin(); it != keys_.end(); it++) { + RemSlotKey(*it, db_); + } + } else { + res_.SetRes(CmdRes::kErrOther, "delete error"); + s_ = rocksdb::Status::Corruption("delete error"); + } +} + +void DelCmd::DoThroughDB() { + Do(); +} + +void DelCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->Del(keys_); + } +} + +void DelCmd::Split(const HintKeys& hint_keys) { + std::map type_status; + int64_t count = db_->storage()->Del(hint_keys.keys); + if (count >= 0) { + split_res_ += count; } else { res_.SetRes(CmdRes::kErrOther, "delete error"); } - return; +} + +void DelCmd::Merge() { res_.AppendInteger(split_res_); } + +void DelCmd::DoBinlog() { + std::string opt = argv_.at(0); + for(auto& key: keys_) { + argv_.clear(); + argv_.emplace_back(opt); + argv_.emplace_back(key); + Cmd::DoBinlog(); + } } void IncrCmd::DoInitial() { @@ -191,21 +257,58 @@ void IncrCmd::DoInitial() { return; } key_ = argv_[1]; - return; } -void IncrCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->Incrby(key_, 1, &new_value_); - if (s.ok()) { - res_.AppendContent(":" + std::to_string(new_value_)); - } else if (s.IsCorruption() && s.ToString() == "Corruption: Value is not a integer") { +void IncrCmd::Do() { + s_ = db_->storage()->Incrby(key_, 1, &new_value_, &expired_timestamp_millsec_); + if (s_.ok()) { + res_.AppendContent(":" + std::to_string(new_value_)); + AddSlotKey("k", key_, db_); + } else if (s_.IsCorruption() && s_.ToString() == "Corruption: Value is not a integer") { res_.SetRes(CmdRes::kInvalidInt); - } else if (s.IsInvalidArgument()) { + } else if (s_.IsInvalidArgument() && s_.ToString().substr(0, std::char_traits::length(ErrTypeMessage)) == ErrTypeMessage) { + res_.SetRes(CmdRes::kMultiKey); + } else if (s_.IsInvalidArgument()) { res_.SetRes(CmdRes::kOverFlow); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); } - return; +} + +void IncrCmd::DoThroughDB() { + Do(); +} + +void IncrCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->Incrxx(key_); + } +} + +std::string IncrCmd::ToRedisProtocol() { + std::string content; + content.reserve(RAW_ARGS_LEN); + RedisAppendLen(content, 4, "*"); + + // to pksetexat cmd + std::string pksetexat_cmd("pksetexat"); + RedisAppendLenUint64(content, pksetexat_cmd.size(), "$"); + RedisAppendContent(content, pksetexat_cmd); + // key + RedisAppendLenUint64(content, key_.size(), "$"); + RedisAppendContent(content, key_); + // time_stamp + char buf[100]; + auto time_stamp = expired_timestamp_millsec_ > 0 ? expired_timestamp_millsec_ / 1000 : expired_timestamp_millsec_; + pstd::ll2string(buf, sizeof(buf), time_stamp); + std::string at(buf); + RedisAppendLenUint64(content, at.size(), "$"); + RedisAppendContent(content, at); + // value + std::string new_value_str = std::to_string(new_value_); + RedisAppendLenUint64(content, new_value_str.size(), "$"); + RedisAppendContent(content, new_value_str); + return content; } void IncrbyCmd::DoInitial() { @@ -214,25 +317,62 @@ void IncrbyCmd::DoInitial() { return; } key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &by_)) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &by_) == 0) { res_.SetRes(CmdRes::kInvalidInt, kCmdNameIncrby); return; } - return; } -void IncrbyCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->Incrby(key_, by_, &new_value_); - if (s.ok()) { +void IncrbyCmd::Do() { + s_ = db_->storage()->Incrby(key_, by_, &new_value_, &expired_timestamp_millsec_); + if (s_.ok()) { res_.AppendContent(":" + std::to_string(new_value_)); - } else if (s.IsCorruption() && s.ToString() == "Corruption: Value is not a integer") { + AddSlotKey("k", key_, db_); + } else if (s_.IsCorruption() && s_.ToString() == "Corruption: Value is not a integer") { res_.SetRes(CmdRes::kInvalidInt); - } else if (s.IsInvalidArgument()) { + } else if (s_.IsInvalidArgument() && s_.ToString().substr(0, std::char_traits::length(ErrTypeMessage)) == ErrTypeMessage) { + res_.SetRes(CmdRes::kMultiKey); + } else if (s_.IsInvalidArgument()) { res_.SetRes(CmdRes::kOverFlow); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); } - return; +} + +void IncrbyCmd::DoThroughDB() { + Do(); +} + +void IncrbyCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->IncrByxx(key_, by_); + } +} + +std::string IncrbyCmd::ToRedisProtocol() { + std::string content; + content.reserve(RAW_ARGS_LEN); + RedisAppendLen(content, 4, "*"); + + // to pksetexat cmd + std::string pksetexat_cmd("pksetexat"); + RedisAppendLenUint64(content, pksetexat_cmd.size(), "$"); + RedisAppendContent(content, pksetexat_cmd); + // key + RedisAppendLenUint64(content, key_.size(), "$"); + RedisAppendContent(content, key_); + // time_stamp + char buf[100]; + auto time_stamp = expired_timestamp_millsec_ > 0 ? expired_timestamp_millsec_ / 1000 : expired_timestamp_millsec_; + pstd::ll2string(buf, sizeof(buf), time_stamp); + std::string at(buf); + RedisAppendLenUint64(content, at.size(), "$"); + RedisAppendContent(content, at); + // value + std::string new_value_str = std::to_string(new_value_); + RedisAppendLenUint64(content, new_value_str.size(), "$"); + RedisAppendContent(content, new_value_str); + return content; } void IncrbyfloatCmd::DoInitial() { @@ -242,49 +382,99 @@ void IncrbyfloatCmd::DoInitial() { } key_ = argv_[1]; value_ = argv_[2]; - if (!slash::string2d(argv_[2].data(), argv_[2].size(), &by_)) { + if (pstd::string2d(argv_[2].data(), argv_[2].size(), &by_) == 0) { res_.SetRes(CmdRes::kInvalidFloat); return; } - return; } -void IncrbyfloatCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->Incrbyfloat(key_, value_, &new_value_); - if (s.ok()) { - res_.AppendStringLen(new_value_.size()); +void IncrbyfloatCmd::Do() { + s_ = db_->storage()->Incrbyfloat(key_, value_, &new_value_, &expired_timestamp_millsec_); + if (s_.ok()) { + res_.AppendStringLenUint64(new_value_.size()); res_.AppendContent(new_value_); - } else if (s.IsCorruption() && s.ToString() == "Corruption: Value is not a vaild float"){ + AddSlotKey("k", key_, db_); + } else if (s_.IsCorruption() && s_.ToString() == "Corruption: Value is not a vaild float") { res_.SetRes(CmdRes::kInvalidFloat); - } else if (s.IsInvalidArgument()) { - res_.SetRes(CmdRes::kOverFlow); + } else if (s_.IsInvalidArgument() && s_.ToString().substr(0, std::char_traits::length(ErrTypeMessage)) == ErrTypeMessage) { + res_.SetRes(CmdRes::kMultiKey); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::KIncrByOverFlow); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); } - return; } +void IncrbyfloatCmd::DoThroughDB() { + Do(); +} + +void IncrbyfloatCmd::DoUpdateCache() { + if (s_.ok()) { + long double long_double_by; + if (storage::StrToLongDouble(value_.data(), value_.size(), &long_double_by) != -1) { + db_->cache()->Incrbyfloatxx(key_, long_double_by); + } + } +} + +std::string IncrbyfloatCmd::ToRedisProtocol() { + std::string content; + content.reserve(RAW_ARGS_LEN); + RedisAppendLen(content, 4, "*"); + + // to pksetexat cmd + std::string pksetexat_cmd("pksetexat"); + RedisAppendLenUint64(content, pksetexat_cmd.size(), "$"); + RedisAppendContent(content, pksetexat_cmd); + // key + RedisAppendLenUint64(content, key_.size(), "$"); + RedisAppendContent(content, key_); + // time_stamp + char buf[100]; + auto time_stamp = expired_timestamp_millsec_ > 0 ? expired_timestamp_millsec_ / 1000 : expired_timestamp_millsec_; + pstd::ll2string(buf, sizeof(buf), time_stamp); + std::string at(buf); + RedisAppendLenUint64(content, at.size(), "$"); + RedisAppendContent(content, at); + // value + RedisAppendLenUint64(content, new_value_.size(), "$"); + RedisAppendContent(content, new_value_); + return content; +} + + void DecrCmd::DoInitial() { if (!CheckArg(argv_.size())) { res_.SetRes(CmdRes::kWrongNum, kCmdNameDecr); return; } key_ = argv_[1]; - return; } -void DecrCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->Decrby(key_, 1, &new_value_); - if (s.ok()) { - res_.AppendContent(":" + std::to_string(new_value_)); - } else if (s.IsCorruption() && s.ToString() == "Corruption: Value is not a integer") { +void DecrCmd::Do() { + s_= db_->storage()->Decrby(key_, 1, &new_value_); + if (s_.ok()) { + res_.AppendContent(":" + std::to_string(new_value_)); + } else if (s_.IsCorruption() && s_.ToString() == "Corruption: Value is not a integer") { res_.SetRes(CmdRes::kInvalidInt); - } else if (s.IsInvalidArgument()) { + } else if (s_.IsInvalidArgument() && s_.ToString().substr(0, std::char_traits::length(ErrTypeMessage)) == ErrTypeMessage) { + res_.SetRes(CmdRes::kMultiKey); + } else if (s_.IsInvalidArgument()) { res_.SetRes(CmdRes::kOverFlow); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void DecrCmd::DoThroughDB() { + Do(); +} + +void DecrCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->Decrxx(key_); } - return; } void DecrbyCmd::DoInitial() { @@ -293,25 +483,36 @@ void DecrbyCmd::DoInitial() { return; } key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &by_)) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &by_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } - return; } -void DecrbyCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->Decrby(key_, by_, &new_value_); - if (s.ok()) { +void DecrbyCmd::Do() { + s_ = db_->storage()->Decrby(key_, by_, &new_value_); + if (s_.ok()) { + AddSlotKey("k", key_, db_); res_.AppendContent(":" + std::to_string(new_value_)); - } else if (s.IsCorruption() && s.ToString() == "Corruption: Value is not a integer") { + } else if (s_.IsCorruption() && s_.ToString() == "Corruption: Value is not a integer") { res_.SetRes(CmdRes::kInvalidInt); - } else if (s.IsInvalidArgument()) { + } else if (s_.IsInvalidArgument() && s_.ToString().substr(0, std::char_traits::length(ErrTypeMessage)) == ErrTypeMessage) { + res_.SetRes(CmdRes::kMultiKey); + } else if (s_.IsInvalidArgument()) { res_.SetRes(CmdRes::kOverFlow); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void DecrbyCmd::DoThroughDB() { + Do(); +} + +void DecrbyCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->DecrByxx(key_, by_); } - return; } void GetsetCmd::DoInitial() { @@ -321,23 +522,34 @@ void GetsetCmd::DoInitial() { } key_ = argv_[1]; new_value_ = argv_[2]; - return; } -void GetsetCmd::Do(std::shared_ptr partition) { +void GetsetCmd::Do() { std::string old_value; - rocksdb::Status s = partition->db()->GetSet(key_, new_value_, &old_value); - if (s.ok()) { + s_ = db_->storage()->GetSet(key_, new_value_, &old_value); + if (s_.ok()) { if (old_value.empty()) { res_.AppendContent("$-1"); } else { - res_.AppendStringLen(old_value.size()); + res_.AppendStringLenUint64(old_value.size()); res_.AppendContent(old_value); } + AddSlotKey("k", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void GetsetCmd::DoThroughDB() { + Do(); +} + +void GetsetCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->SetxxWithoutTTL(key_, new_value_); } - return; } void AppendCmd::DoInitial() { @@ -347,18 +559,54 @@ void AppendCmd::DoInitial() { } key_ = argv_[1]; value_ = argv_[2]; - return; } -void AppendCmd::Do(std::shared_ptr partition) { +void AppendCmd::Do() { int32_t new_len = 0; - rocksdb::Status s = partition->db()->Append(key_, value_, &new_len); - if (s.ok() || s.IsNotFound()) { + s_ = db_->storage()->Append(key_, value_, &new_len, &expired_timestamp_millsec_, new_value_); + if (s_.ok() || s_.IsNotFound()) { res_.AppendInteger(new_len); + AddSlotKey("k", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); } - return; +} + +void AppendCmd::DoThroughDB() { + Do(); +} + +void AppendCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->Appendxx(key_, value_); + } +} + +std::string AppendCmd::ToRedisProtocol() { + std::string content; + content.reserve(RAW_ARGS_LEN); + RedisAppendLen(content, 4, "*"); + + // to pksetexat cmd + std::string pksetexat_cmd("pksetexat"); + RedisAppendLenUint64(content, pksetexat_cmd.size(), "$"); + RedisAppendContent(content, pksetexat_cmd); + // key + RedisAppendLenUint64(content, key_.size(), "$"); + RedisAppendContent(content, key_); + // time_stamp + char buf[100]; + auto time_stamp = expired_timestamp_millsec_ > 0 ? expired_timestamp_millsec_ / 1000 : expired_timestamp_millsec_; + pstd::ll2string(buf, sizeof(buf), time_stamp); + std::string at(buf); + RedisAppendLenUint64(content, at.size(), "$"); + RedisAppendContent(content, at); + // value + RedisAppendLenUint64(content, new_value_.size(), "$"); + RedisAppendContent(content, new_value_); + return content; } void MgetCmd::DoInitial() { @@ -368,28 +616,135 @@ void MgetCmd::DoInitial() { } keys_ = argv_; keys_.erase(keys_.begin()); - return; + split_res_.resize(keys_.size()); + cache_miss_keys_.clear(); +} + +void MgetCmd::AssembleResponseFromCache() { + res_.AppendArrayLenUint64(keys_.size()); + for (const auto& key : keys_) { + auto it = cache_hit_values_.find(key); + if (it != cache_hit_values_.end()) { + res_.AppendStringLen(it->second.size()); + res_.AppendContent(it->second); + } else { + res_.SetRes(CmdRes::kErrOther, "Internal error during cache assembly"); + return; + } + } } -void MgetCmd::Do(std::shared_ptr partition) { - std::vector vss; - rocksdb::Status s = partition->db()->MGet(keys_, &vss); +void MgetCmd::Do() { + // Without using the cache and querying only the DB, we need to use keys_. + // This line will only be assigned when querying the DB directly. + if (cache_miss_keys_.size() == 0) { + cache_miss_keys_ = keys_; + } + db_value_status_array_.clear(); + s_ = db_->storage()->MGetWithTTL(cache_miss_keys_, &db_value_status_array_); + if (!s_.ok()) { + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } + return; + } + + MergeCachedAndDbResults(); +} + +void MgetCmd::Split(const HintKeys& hint_keys) { + std::vector vss; + const std::vector& keys = hint_keys.keys; + rocksdb::Status s = db_->storage()->MGet(keys, &vss); if (s.ok()) { - res_.AppendArrayLen(vss.size()); - for (const auto& vs : vss) { - if (vs.status.ok()) { - res_.AppendStringLen(vs.value.size()); - res_.AppendContent(vs.value); + if (hint_keys.hints.size() != vss.size()) { + res_.SetRes(CmdRes::kErrOther, "internal Mget return size invalid"); + } + const std::vector& hints = hint_keys.hints; + for (size_t i = 0; i < vss.size(); ++i) { + split_res_[hints[i]] = vss[i]; + } + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void MgetCmd::Merge() { + res_.AppendArrayLenUint64(split_res_.size()); + for (const auto& vs : split_res_) { + if (vs.status.ok()) { + res_.AppendStringLenUint64(vs.value.size()); + res_.AppendContent(vs.value); + } else { + res_.AppendContent("$-1"); + } + } +} + +void MgetCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void MgetCmd::ReadCache() { + for (const auto key : keys_) { + std::string value; + auto s = db_->cache()->Get(const_cast(key), &value); + if (s.ok()) { + cache_hit_values_[key] = value; + } else { + cache_miss_keys_.push_back(key); + } + } + if (cache_miss_keys_.empty()) { + AssembleResponseFromCache(); + } else { + res_.SetRes(CmdRes::kCacheMiss); + } +} + +void MgetCmd::DoUpdateCache() { + size_t db_index = 0; + for (const auto key : cache_miss_keys_) { + if (db_index < db_value_status_array_.size() && db_value_status_array_[db_index].status.ok()) { + int64_t ttl_millsec = db_value_status_array_[db_index].ttl_millsec; + db_->cache()->WriteKVToCache(const_cast(key), db_value_status_array_[db_index].value, ttl_millsec > 0 ? ttl_millsec / 1000 : ttl_millsec); + } + db_index++; + } +} + +void MgetCmd::MergeCachedAndDbResults() { + res_.AppendArrayLenUint64(keys_.size()); + + std::unordered_map db_results_map; + for (size_t i = 0; i < cache_miss_keys_.size(); ++i) { + if (db_value_status_array_[i].status.ok()) { + db_results_map[cache_miss_keys_[i]] = db_value_status_array_[i].value; + } + } + + for (const auto& key : keys_) { + auto cache_it = cache_hit_values_.find(key); + + if (cache_it != cache_hit_values_.end()) { + res_.AppendStringLen(cache_it->second.size()); + res_.AppendContent(cache_it->second); + } else { + auto db_it = db_results_map.find(key); + if (db_it != db_results_map.end()) { + res_.AppendStringLen(db_it->second.size()); + res_.AppendContent(db_it->second); } else { res_.AppendContent("$-1"); } } - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); } - return; } + void KeysCmd::DoInitial() { if (!CheckArg(argv_.size())) { res_.SetRes(CmdRes::kWrongNum, kCmdNameKeys); @@ -398,26 +753,27 @@ void KeysCmd::DoInitial() { pattern_ = argv_[1]; if (argv_.size() == 3) { std::string opt = argv_[2]; - if (!strcasecmp(opt.data(), "string")) { - type_ = blackwidow::DataType::kStrings; - } else if (!strcasecmp(opt.data(), "zset")) { - type_ = blackwidow::DataType::kZSets; - } else if (!strcasecmp(opt.data(), "set")) { - type_ = blackwidow::DataType::kSets; - } else if (!strcasecmp(opt.data(), "list")) { - type_ = blackwidow::DataType::kLists; - } else if (!strcasecmp(opt.data(), "hash")) { - type_ = blackwidow::DataType::kHashes; + if (strcasecmp(opt.data(), "string") == 0) { + type_ = storage::DataType::kStrings; + } else if (strcasecmp(opt.data(), "zset") == 0) { + type_ = storage::DataType::kZSets; + } else if (strcasecmp(opt.data(), "set") == 0) { + type_ = storage::DataType::kSets; + } else if (strcasecmp(opt.data(), "list") == 0) { + type_ = storage::DataType::kLists; + } else if (strcasecmp(opt.data(), "hash") == 0) { + type_ = storage::DataType::kHashes; + } else if (strcasecmp(opt.data(), "stream") == 0) { + type_ = storage::DataType::kStreams; } else { res_.SetRes(CmdRes::kSyntaxErr); } } else if (argv_.size() > 3) { res_.SetRes(CmdRes::kSyntaxErr); } - return; } -void KeysCmd::Do(std::shared_ptr partition) { +void KeysCmd::Do() { int64_t total_key = 0; int64_t cursor = 0; size_t raw_limit = g_pika_conf->max_client_response_size(); @@ -425,21 +781,20 @@ void KeysCmd::Do(std::shared_ptr partition) { std::vector keys; do { keys.clear(); - cursor = partition->db()->Scan(type_, cursor, pattern_, PIKA_SCAN_STEP_LENGTH, &keys); + cursor = db_->storage()->Scan(type_, cursor, pattern_, PIKA_SCAN_STEP_LENGTH, &keys); for (const auto& key : keys) { - RedisAppendLen(raw, key.size(), "$"); + RedisAppendLenUint64(raw, key.size(), "$"); RedisAppendContent(raw, key); } if (raw.size() >= raw_limit) { res_.SetRes(CmdRes::kErrOther, "Response exceeds the max-client-response-size limit"); return; } - total_key += keys.size(); + total_key += static_cast(keys.size()); } while (cursor != 0); res_.AppendArrayLen(total_key); res_.AppendStringRaw(raw); - return; } void SetnxCmd::DoInitial() { @@ -449,51 +804,37 @@ void SetnxCmd::DoInitial() { } key_ = argv_[1]; value_ = argv_[2]; - return; } -void SetnxCmd::Do(std::shared_ptr partition) { +void SetnxCmd::Do() { success_ = 0; - rocksdb::Status s = partition->db()->Setnx(key_, value_, &success_); - if (s.ok()) { + s_ = db_->storage()->Setnx(key_, value_, &success_); + if (s_.ok()) { res_.AppendInteger(success_); + AddSlotKey("k", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); } - return; } -std::string SetnxCmd::ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) { +std::string SetnxCmd::ToRedisProtocol() { std::string content; - if (success_) { - content.reserve(RAW_ARGS_LEN); - RedisAppendLen(content, 3, "*"); - - // to set cmd - std::string set_cmd("set"); - RedisAppendLen(content, set_cmd.size(), "$"); - RedisAppendContent(content, set_cmd); - // key - RedisAppendLen(content, key_.size(), "$"); - RedisAppendContent(content, key_); - // value - RedisAppendLen(content, value_.size(), "$"); - RedisAppendContent(content, value_); + content.reserve(RAW_ARGS_LEN); + RedisAppendLen(content, 3, "*"); - return PikaBinlogTransverter::BinlogEncode(BinlogType::TypeFirst, - exec_time, - std::stoi(server_id), - logic_id, - filenum, - offset, - content, - {}); - } + // don't check variable 'success_', because if 'success_' was false, an empty binlog will be saved into file. + // to setnx cmd + std::string set_cmd("setnx"); + RedisAppendLenUint64(content, set_cmd.size(), "$"); + RedisAppendContent(content, set_cmd); + // key + RedisAppendLenUint64(content, key_.size(), "$"); + RedisAppendContent(content, key_); + // value + RedisAppendLenUint64(content, value_.size(), "$"); + RedisAppendContent(content, value_); return content; } @@ -503,59 +844,58 @@ void SetexCmd::DoInitial() { return; } key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &sec_)) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &ttl_sec_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } value_ = argv_[3]; - return; } -void SetexCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->Setex(key_, value_, sec_); - if (s.ok()) { +void SetexCmd::Do() { + s_ = db_->storage()->Setex(key_, value_, ttl_sec_ * 1000); + if (s_.ok()) { res_.SetRes(CmdRes::kOk); + AddSlotKey("k", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); } } -std::string SetexCmd::ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) { +void SetexCmd::DoThroughDB() { + Do(); +} + +void SetexCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->Setxx(key_, value_, ttl_sec_); + } +} +std::string SetexCmd::ToRedisProtocol() { std::string content; content.reserve(RAW_ARGS_LEN); RedisAppendLen(content, 4, "*"); // to pksetexat cmd std::string pksetexat_cmd("pksetexat"); - RedisAppendLen(content, pksetexat_cmd.size(), "$"); + RedisAppendLenUint64(content, pksetexat_cmd.size(), "$"); RedisAppendContent(content, pksetexat_cmd); // key - RedisAppendLen(content, key_.size(), "$"); + RedisAppendLenUint64(content, key_.size(), "$"); RedisAppendContent(content, key_); // time_stamp char buf[100]; - int32_t time_stamp = time(nullptr) + sec_; - slash::ll2string(buf, 100, time_stamp); + auto time_stamp = time(nullptr) + ttl_sec_; + pstd::ll2string(buf, 100, time_stamp); std::string at(buf); - RedisAppendLen(content, at.size(), "$"); + RedisAppendLenUint64(content, at.size(), "$"); RedisAppendContent(content, at); // value - RedisAppendLen(content, value_.size(), "$"); + RedisAppendLenUint64(content, value_.size(), "$"); RedisAppendContent(content, value_); - return PikaBinlogTransverter::BinlogEncode(BinlogType::TypeFirst, - exec_time, - std::stoi(server_id), - logic_id, - filenum, - offset, - content, - {}); + return content; } void PsetexCmd::DoInitial() { @@ -564,59 +904,57 @@ void PsetexCmd::DoInitial() { return; } key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &usec_)) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &ttl_millsec) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } value_ = argv_[3]; - return; } -void PsetexCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->Setex(key_, value_, usec_ / 1000); - if (s.ok()) { +void PsetexCmd::Do() { + s_ = db_->storage()->Setex(key_, value_, ttl_millsec); + if (s_.ok()) { res_.SetRes(CmdRes::kOk); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); } } -std::string PsetexCmd::ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) { +void PsetexCmd::DoThroughDB() { + Do(); +} + +void PsetexCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->Setxx(key_, value_, ttl_millsec / 1000); + } +} +std::string PsetexCmd::ToRedisProtocol() { std::string content; content.reserve(RAW_ARGS_LEN); RedisAppendLen(content, 4, "*"); // to pksetexat cmd std::string pksetexat_cmd("pksetexat"); - RedisAppendLen(content, pksetexat_cmd.size(), "$"); + RedisAppendLenUint64(content, pksetexat_cmd.size(), "$"); RedisAppendContent(content, pksetexat_cmd); // key - RedisAppendLen(content, key_.size(), "$"); + RedisAppendLenUint64(content, key_.size(), "$"); RedisAppendContent(content, key_); // time_stamp char buf[100]; - int32_t time_stamp = time(nullptr) + usec_ / 1000; - slash::ll2string(buf, 100, time_stamp); + auto time_stamp = pstd::NowMillis() + ttl_millsec; + pstd::ll2string(buf, 100, time_stamp); std::string at(buf); - RedisAppendLen(content, at.size(), "$"); + RedisAppendLenUint64(content, at.size(), "$"); RedisAppendContent(content, at); // value - RedisAppendLen(content, value_.size(), "$"); + RedisAppendLenUint64(content, value_.size(), "$"); RedisAppendContent(content, value_); - return PikaBinlogTransverter::BinlogEncode(BinlogType::TypeFirst, - exec_time, - std::stoi(server_id), - logic_id, - filenum, - offset, - content, - {}); + return content; } void DelvxCmd::DoInitial() { @@ -626,13 +964,14 @@ void DelvxCmd::DoInitial() { } key_ = argv_[1]; value_ = argv_[2]; - return; } -void DelvxCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->Delvx(key_, value_, &success_); +void DelvxCmd::Do() { + rocksdb::Status s = db_->storage()->Delvx(key_, value_, &success_); if (s.ok() || s.IsNotFound()) { res_.AppendInteger(success_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } @@ -652,15 +991,73 @@ void MsetCmd::DoInitial() { for (size_t index = 1; index != argc; index += 2) { kvs_.push_back({argv_[index], argv_[index + 1]}); } - return; } -void MsetCmd::Do(std::shared_ptr partition) { - blackwidow::Status s = partition->db()->MSet(kvs_); +void MsetCmd::Do() { + s_ = db_->storage()->MSet(kvs_); + if (s_.ok()) { + res_.SetRes(CmdRes::kOk); + std::vector::const_iterator it; + for (it = kvs_.begin(); it != kvs_.end(); it++) { + AddSlotKey("k", it->key, db_); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void MsetCmd::DoThroughDB() { + Do(); +} + +void MsetCmd::DoUpdateCache() { + if (s_.ok()) { + for (auto key : kvs_) { + db_->cache()->SetxxWithoutTTL(key.key, key.value); + } + } +} + +void MsetCmd::Split(const HintKeys& hint_keys) { + std::vector kvs; + const std::vector& keys = hint_keys.keys; + const std::vector& hints = hint_keys.hints; + if (keys.size() != hints.size()) { + res_.SetRes(CmdRes::kErrOther, "SplitError hint_keys size not match"); + } + for (size_t i = 0; i < keys.size(); i++) { + if (kvs_[hints[i]].key == keys[i]) { + kvs.push_back(kvs_[hints[i]]); + } else { + res_.SetRes(CmdRes::kErrOther, "SplitError hint key: " + keys[i]); + return; + } + } + storage::Status s = db_->storage()->MSet(kvs); if (s.ok()) { res_.SetRes(CmdRes::kOk); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); + return; + } +} + +void MsetCmd::Merge() {} + +void MsetCmd::DoBinlog() { + PikaCmdArgsType set_argv; + set_argv.resize(3); + //used "set" instead of "SET" to distinguish the binlog of Set + set_argv[0] = "set"; + set_cmd_->SetConn(GetConn()); + set_cmd_->SetResp(resp_.lock()); + for(auto& kv: kvs_) { + set_argv[1] = kv.key; + set_argv[2] = kv.value; + set_cmd_->Initial(set_argv, db_name_); + set_cmd_->DoBinlog(); } } @@ -678,44 +1075,101 @@ void MsetnxCmd::DoInitial() { for (size_t index = 1; index != argc; index += 2) { kvs_.push_back({argv_[index], argv_[index + 1]}); } - return; } -void MsetnxCmd::Do(std::shared_ptr partition) { +void MsetnxCmd::Do() { success_ = 0; - rocksdb::Status s = partition->db()->MSetnx(kvs_, &success_); + rocksdb::Status s = db_->storage()->MSetnx(kvs_, &success_); if (s.ok()) { res_.AppendInteger(success_); + std::vector::const_iterator it; + for (it = kvs_.begin(); it != kvs_.end(); it++) { + AddSlotKey("k", it->key, db_); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } } +void MsetnxCmd::DoBinlog() { + if (!success_) { + //some keys already exist, set operations aborted, no need of binlog + return; + } + PikaCmdArgsType set_argv; + set_argv.resize(3); + //used "set" instead of "SET" to distinguish the binlog of SetCmd + set_argv[0] = "set"; + set_cmd_->SetConn(GetConn()); + set_cmd_->SetResp(resp_.lock()); + for (auto& kv: kvs_) { + set_argv[1] = kv.key; + set_argv[2] = kv.value; + set_cmd_->Initial(set_argv, db_name_); + set_cmd_->DoBinlog(); + } +} + void GetrangeCmd::DoInitial() { if (!CheckArg(argv_.size())) { res_.SetRes(CmdRes::kWrongNum, kCmdNameGetrange); return; } key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &start_)) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &start_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } - if (!slash::string2l(argv_[3].data(), argv_[3].size(), &end_)) { + if (pstd::string2int(argv_[3].data(), argv_[3].size(), &end_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } - return; } -void GetrangeCmd::Do(std::shared_ptr partition) { +void GetrangeCmd::Do() { std::string substr; - rocksdb::Status s = partition->db()->Getrange(key_, start_, end_, &substr); - if (s.ok() || s.IsNotFound()) { + s_= db_->storage()->Getrange(key_, start_, end_, &substr); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendStringLenUint64(substr.size()); + res_.AppendContent(substr); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void GetrangeCmd::ReadCache() { + std::string substr; + auto s = db_->cache()->GetRange(key_, start_, end_, &substr); + if (s.ok()) { res_.AppendStringLen(substr.size()); res_.AppendContent(substr); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kCacheMiss); + } +} + +void GetrangeCmd::DoThroughDB() { + res_.clear(); + std::string substr; + s_ = db_->storage()->GetrangeWithValue(key_, start_, end_, &substr, &value_, &sec_); + if (s_.ok()) { + res_.AppendStringLen(substr.size()); + res_.AppendContent(substr); + } else if (s_.IsNotFound()) { + res_.AppendStringLen(substr.size()); + res_.AppendContent(substr); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void GetrangeCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->WriteKVToCache(key_, value_, sec_); } } @@ -725,23 +1179,34 @@ void SetrangeCmd::DoInitial() { return; } key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &offset_)) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &offset_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } value_ = argv_[3]; - return; } -void SetrangeCmd::Do(std::shared_ptr partition) { - int32_t new_len; - rocksdb::Status s = partition->db()->Setrange(key_, offset_, value_, &new_len); - if (s.ok()) { +void SetrangeCmd::Do() { + int32_t new_len = 0; + s_ = db_->storage()->Setrange(key_, offset_, value_, &new_len); + if (s_.ok()) { res_.AppendInteger(new_len); + AddSlotKey("k", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void SetrangeCmd::DoThroughDB() { + Do(); +} + +void SetrangeCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->SetRangexx(key_, offset_, value_); } - return; } void StrlenCmd::DoInitial() { @@ -750,18 +1215,44 @@ void StrlenCmd::DoInitial() { return; } key_ = argv_[1]; - return; } -void StrlenCmd::Do(std::shared_ptr partition) { +void StrlenCmd::Do() { int32_t len = 0; - rocksdb::Status s = partition->db()->Strlen(key_, &len); - if (s.ok() || s.IsNotFound()) { + s_ = db_->storage()->Strlen(key_, &len); + if (s_.ok() || s_.IsNotFound()) { res_.AppendInteger(len); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void StrlenCmd::ReadCache() { + int32_t len = 0; + auto s= db_->cache()->Strlen(key_, &len); + if (s.ok()) { + res_.AppendInteger(len); + } else { + res_.SetRes(CmdRes::kCacheMiss); + } +} + +void StrlenCmd::DoThroughDB() { + res_.clear(); + s_ = db_->storage()->GetWithTTL(key_, &value_, &ttl_millsec); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendInteger(value_.size()); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void StrlenCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->WriteKVToCache(key_, value_, ttl_millsec > 0 ? ttl_millsec : ttl_millsec / 1000); } - return; } void ExistsCmd::DoInitial() { @@ -771,18 +1262,44 @@ void ExistsCmd::DoInitial() { } keys_ = argv_; keys_.erase(keys_.begin()); - return; } -void ExistsCmd::Do(std::shared_ptr partition) { - std::map type_status; - int64_t res = partition->db()->Exists(keys_, &type_status); +void ExistsCmd::Do() { + int64_t res = db_->storage()->Exists(keys_); if (res != -1) { res_.AppendInteger(res); } else { res_.SetRes(CmdRes::kErrOther, "exists internal error"); } - return; +} + +void ExistsCmd::Split(const HintKeys& hint_keys) { + int64_t res = db_->storage()->Exists(hint_keys.keys); + if (res != -1) { + split_res_ += res; + } else { + res_.SetRes(CmdRes::kErrOther, "exists internal error"); + } +} + +void ExistsCmd::Merge() { res_.AppendInteger(split_res_); } + +void ExistsCmd::ReadCache() { + if (keys_.size() > 1) { + res_.SetRes(CmdRes::kCacheMiss); + return; + } + bool exist = db_->cache()->Exists(keys_[0]); + if (exist) { + res_.AppendInteger(1); + } else { + res_.SetRes(CmdRes::kCacheMiss); + } +} + +void ExistsCmd::DoThroughDB() { + res_.clear(); + Do(); } void ExpireCmd::DoInitial() { @@ -791,57 +1308,53 @@ void ExpireCmd::DoInitial() { return; } key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &sec_)) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &ttl_sec_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } - return; } -void ExpireCmd::Do(std::shared_ptr partition) { - std::map type_status; - int64_t res = partition->db()->Expire(key_, sec_, &type_status); +void ExpireCmd::Do() { + int32_t res = db_->storage()->Expire(key_, ttl_sec_ * 1000); if (res != -1) { res_.AppendInteger(res); + s_ = rocksdb::Status::OK(); } else { res_.SetRes(CmdRes::kErrOther, "expire internal error"); + s_ = rocksdb::Status::Corruption("expire internal error"); } - return; } -std::string ExpireCmd::ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) { +std::string ExpireCmd::ToRedisProtocol() { std::string content; content.reserve(RAW_ARGS_LEN); RedisAppendLen(content, 3, "*"); // to expireat cmd std::string expireat_cmd("expireat"); - RedisAppendLen(content, expireat_cmd.size(), "$"); + RedisAppendLenUint64(content, expireat_cmd.size(), "$"); RedisAppendContent(content, expireat_cmd); // key - RedisAppendLen(content, key_.size(), "$"); + RedisAppendLenUint64(content, key_.size(), "$"); RedisAppendContent(content, key_); // sec char buf[100]; - int64_t expireat = time(nullptr) + sec_; - slash::ll2string(buf, 100, expireat); + int64_t expireat = time(nullptr) + ttl_sec_; + pstd::ll2string(buf, 100, expireat); std::string at(buf); - RedisAppendLen(content, at.size(), "$"); + RedisAppendLenUint64(content, at.size(), "$"); RedisAppendContent(content, at); + return content; +} - return PikaBinlogTransverter::BinlogEncode(BinlogType::TypeFirst, - exec_time, - std::stoi(server_id), - logic_id, - filenum, - offset, - content, - {}); +void ExpireCmd::DoThroughDB() { + Do(); +} + +void ExpireCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->Expire(key_, ttl_sec_); + } } void PexpireCmd::DoInitial() { @@ -850,57 +1363,53 @@ void PexpireCmd::DoInitial() { return; } key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &msec_)) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &ttl_millsec) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } - return; } -void PexpireCmd::Do(std::shared_ptr partition) { - std::map type_status; - int64_t res = partition->db()->Expire(key_, msec_/1000, &type_status); +void PexpireCmd::Do() { + int64_t res = db_->storage()->Expire(key_, ttl_millsec); if (res != -1) { res_.AppendInteger(res); + s_ = rocksdb::Status::OK(); } else { res_.SetRes(CmdRes::kErrOther, "expire internal error"); + s_ = rocksdb::Status::Corruption("expire internal error"); } - return; } -std::string PexpireCmd::ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) { +std::string PexpireCmd::ToRedisProtocol() { std::string content; content.reserve(RAW_ARGS_LEN); - RedisAppendLen(content, argv_.size(), "*"); + RedisAppendLenUint64(content, argv_.size(), "*"); - // to expireat cmd - std::string expireat_cmd("expireat"); - RedisAppendLen(content, expireat_cmd.size(), "$"); + // to pexpireat cmd + std::string expireat_cmd("pexpireat"); + RedisAppendLenUint64(content, expireat_cmd.size(), "$"); RedisAppendContent(content, expireat_cmd); // key - RedisAppendLen(content, key_.size(), "$"); + RedisAppendLenUint64(content, key_.size(), "$"); RedisAppendContent(content, key_); // sec char buf[100]; - int64_t expireat = time(nullptr) + msec_ / 1000; - slash::ll2string(buf, 100, expireat); + int64_t expireat = pstd::NowMillis() + ttl_millsec; + pstd::ll2string(buf, 100, expireat); std::string at(buf); - RedisAppendLen(content, at.size(), "$"); + RedisAppendLenUint64(content, at.size(), "$"); RedisAppendContent(content, at); + return content; +} - return PikaBinlogTransverter::BinlogEncode(BinlogType::TypeFirst, - exec_time, - std::stoi(server_id), - logic_id, - filenum, - offset, - content, - {}); +void PexpireCmd::DoThroughDB() { + Do(); +} + +void PexpireCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->Expire(key_, ttl_millsec); + } } void ExpireatCmd::DoInitial() { @@ -909,20 +1418,30 @@ void ExpireatCmd::DoInitial() { return; } key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &time_stamp_)) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &time_stamp_sec_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } - return; } -void ExpireatCmd::Do(std::shared_ptr partition) { - std::map type_status; - int32_t res = partition->db()->Expireat(key_, time_stamp_, &type_status); +void ExpireatCmd::Do() { + int32_t res = db_->storage()->Expireat(key_, time_stamp_sec_ * 1000); if (res != -1) { res_.AppendInteger(res); + s_ = rocksdb::Status::OK(); } else { res_.SetRes(CmdRes::kErrOther, "expireat internal error"); + s_ = rocksdb::Status::Corruption("expireat internal error"); + } +} + +void ExpireatCmd::DoThroughDB() { + Do(); +} + +void ExpireatCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->Expireat(key_, time_stamp_sec_); } } @@ -932,57 +1451,31 @@ void PexpireatCmd::DoInitial() { return; } key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &time_stamp_ms_)) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &time_stamp_millsec_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } - return; -} - -std::string PexpireatCmd::ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) { - std::string content; - content.reserve(RAW_ARGS_LEN); - RedisAppendLen(content, argv_.size(), "*"); - - // to expireat cmd - std::string expireat_cmd("expireat"); - RedisAppendLen(content, expireat_cmd.size(), "$"); - RedisAppendContent(content, expireat_cmd); - // key - RedisAppendLen(content, key_.size(), "$"); - RedisAppendContent(content, key_); - // sec - char buf[100]; - int64_t expireat = time_stamp_ms_ / 1000; - slash::ll2string(buf, 100, expireat); - std::string at(buf); - RedisAppendLen(content, at.size(), "$"); - RedisAppendContent(content, at); - - return PikaBinlogTransverter::BinlogEncode(BinlogType::TypeFirst, - exec_time, - std::stoi(server_id), - logic_id, - filenum, - offset, - content, - {}); } -void PexpireatCmd::Do(std::shared_ptr partition) { - std::map type_status; - int32_t res = partition->db()->Expireat(key_, time_stamp_ms_/1000, &type_status); +void PexpireatCmd::Do() { + int32_t res = db_->storage()->Expireat(key_, static_cast(time_stamp_millsec_)); if (res != -1) { res_.AppendInteger(res); + s_ = rocksdb::Status::OK(); } else { res_.SetRes(CmdRes::kErrOther, "pexpireat internal error"); + s_ = rocksdb::Status::Corruption("pexpireat internal error"); + } +} + +void PexpireatCmd::DoThroughDB() { + Do(); +} + +void PexpireatCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->Expireat(key_, time_stamp_millsec_ / 1000); } - return; } void TtlCmd::DoInitial() { @@ -991,35 +1484,31 @@ void TtlCmd::DoInitial() { return; } key_ = argv_[1]; - return; -} - -void TtlCmd::Do(std::shared_ptr partition) { - std::map type_timestamp; - std::map type_status; - type_timestamp = partition->db()->TTL(key_, &type_status); - for (const auto& item : type_timestamp) { - // mean operation exception errors happen in database - if (item.second == -3) { - res_.SetRes(CmdRes::kErrOther, "ttl internal error"); - return; - } - } - if (type_timestamp[blackwidow::kStrings] != -2) { - res_.AppendInteger(type_timestamp[blackwidow::kStrings]); - } else if (type_timestamp[blackwidow::kHashes] != -2) { - res_.AppendInteger(type_timestamp[blackwidow::kHashes]); - } else if (type_timestamp[blackwidow::kLists] != -2) { - res_.AppendInteger(type_timestamp[blackwidow::kLists]); - } else if (type_timestamp[blackwidow::kZSets] != -2) { - res_.AppendInteger(type_timestamp[blackwidow::kZSets]); - } else if (type_timestamp[blackwidow::kSets] != -2) { - res_.AppendInteger(type_timestamp[blackwidow::kSets]); +} + +void TtlCmd::Do() { + int64_t ttl_sec_ = db_->storage()->TTL(key_); + if (ttl_sec_ == -3) { + res_.SetRes(CmdRes::kErrOther, "ttl internal error"); } else { - // mean this key not exist - res_.AppendInteger(-2); + res_.AppendInteger(ttl_sec_); } - return; +} + +void TtlCmd::ReadCache() { + int64_t timestamp = db_->cache()->TTL(key_); + if (timestamp == -3) { + res_.SetRes(CmdRes::kErrOther, "ttl internal error"); + } else if (timestamp != -2) { + res_.AppendInteger(timestamp); + } else { + res_.SetRes(CmdRes::kCacheMiss); + } +} + +void TtlCmd::DoThroughDB() { + res_.clear(); + Do(); } void PttlCmd::DoInitial() { @@ -1028,55 +1517,25 @@ void PttlCmd::DoInitial() { return; } key_ = argv_[1]; - return; -} - -void PttlCmd::Do(std::shared_ptr partition) { - std::map type_timestamp; - std::map type_status; - type_timestamp = partition->db()->TTL(key_, &type_status); - for (const auto& item : type_timestamp) { - // mean operation exception errors happen in database - if (item.second == -3) { - res_.SetRes(CmdRes::kErrOther, "ttl internal error"); - return; - } - } - if (type_timestamp[blackwidow::kStrings] != -2) { - if (type_timestamp[blackwidow::kStrings] == -1) { - res_.AppendInteger(-1); - } else { - res_.AppendInteger(type_timestamp[blackwidow::kStrings] * 1000); - } - } else if (type_timestamp[blackwidow::kHashes] != -2) { - if (type_timestamp[blackwidow::kHashes] == -1) { - res_.AppendInteger(-1); - } else { - res_.AppendInteger(type_timestamp[blackwidow::kHashes] * 1000); - } - } else if (type_timestamp[blackwidow::kLists] != -2) { - if (type_timestamp[blackwidow::kLists] == -1) { - res_.AppendInteger(-1); - } else { - res_.AppendInteger(type_timestamp[blackwidow::kLists] * 1000); - } - } else if (type_timestamp[blackwidow::kSets] != -2) { - if (type_timestamp[blackwidow::kSets] == -1) { - res_.AppendInteger(-1); - } else { - res_.AppendInteger(type_timestamp[blackwidow::kSets] * 1000); - } - } else if (type_timestamp[blackwidow::kZSets] != -2) { - if (type_timestamp[blackwidow::kZSets] == -1) { - res_.AppendInteger(-1); - } else { - res_.AppendInteger(type_timestamp[blackwidow::kZSets] * 1000); - } +} + +void PttlCmd::Do() { + int64_t ttl_millsec = db_->storage()->PTTL(key_); + if (ttl_millsec == -3) { + res_.SetRes(CmdRes::kErrOther, "ttl internal error"); } else { - // mean this key not exist - res_.AppendInteger(-2); + res_.AppendInteger(ttl_millsec); } - return; +} + +void PttlCmd::ReadCache() { + // redis cache don't support pttl cache, so read directly from db + DoThroughDB(); +} + +void PttlCmd::DoThroughDB() { + res_.clear(); + Do(); } void PersistCmd::DoInitial() { @@ -1085,18 +1544,27 @@ void PersistCmd::DoInitial() { return; } key_ = argv_[1]; - return; } -void PersistCmd::Do(std::shared_ptr partition) { - std::map type_status; - int32_t res = partition->db()->Persist(key_, &type_status); +void PersistCmd::Do() { + int32_t res = db_->storage()->Persist(key_); if (res != -1) { res_.AppendInteger(res); + s_ = rocksdb::Status::OK(); } else { res_.SetRes(CmdRes::kErrOther, "persist internal error"); + s_ = rocksdb::Status::Corruption("persist internal error"); + } +} + +void PersistCmd::DoThroughDB() { + Do(); +} + +void PersistCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->Persist(key_); } - return; } void TypeCmd::DoInitial() { @@ -1105,18 +1573,36 @@ void TypeCmd::DoInitial() { return; } key_ = argv_[1]; - return; } -void TypeCmd::Do(std::shared_ptr partition) { - std::string res; - rocksdb::Status s = partition->db()->Type(key_, &res); +void TypeCmd::Do() { + enum storage::DataType type = storage::DataType::kNones; + std::string key_type; + rocksdb::Status s = db_->storage()->GetType(key_, type); if (s.ok()) { - res_.AppendContent("+" + res); + res_.AppendContent("+" + std::string(DataTypeToString(type))); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } - return; +} + +void TypeCmd::ReadCache() { + enum storage::DataType type = storage::DataType::kNones; + std::string key_type; + // TODO Cache GetType function + rocksdb::Status s = db_->storage()->GetType(key_, type); + if (s.ok()) { + res_.AppendContent("+" + std::string(DataTypeToString(type))); + } else { + res_.SetRes(CmdRes::kCacheMiss, s.ToString()); + } +} + +void TypeCmd::DoThroughDB() { + res_.clear(); + Do(); } void ScanCmd::DoInitial() { @@ -1124,24 +1610,40 @@ void ScanCmd::DoInitial() { res_.SetRes(CmdRes::kWrongNum, kCmdNameScan); return; } - if (!slash::string2l(argv_[1].data(), argv_[1].size(), &cursor_)) { + if (pstd::string2int(argv_[1].data(), argv_[1].size(), &cursor_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } - size_t index = 2, argc = argv_.size(); + size_t index = 2; + size_t argc = argv_.size(); while (index < argc) { std::string opt = argv_[index]; - if (!strcasecmp(opt.data(), "match") - || !strcasecmp(opt.data(), "count")) { + if ((strcasecmp(opt.data(), "match") == 0) || (strcasecmp(opt.data(), "count") == 0) || + (strcasecmp(opt.data(), "type") == 0)) { index++; if (index >= argc) { res_.SetRes(CmdRes::kSyntaxErr); return; } - if (!strcasecmp(opt.data(), "match")) { + if (strcasecmp(opt.data(), "match") == 0) { pattern_ = argv_[index]; - } else if (!slash::string2l(argv_[index].data(), argv_[index].size(), &count_) || count_ <= 0) { + } else if (strcasecmp(opt.data(), "type") == 0) { + std::string str_type = argv_[index]; + if (strcasecmp(str_type.data(), "string") == 0) { + type_ = storage::DataType::kStrings; + } else if (strcasecmp(str_type.data(), "zset") == 0) { + type_ = storage::DataType::kZSets; + } else if (strcasecmp(str_type.data(), "set") == 0) { + type_ = storage::DataType::kSets; + } else if (strcasecmp(str_type.data(), "list") == 0) { + type_ = storage::DataType::kLists; + } else if (strcasecmp(str_type.data(), "hash") == 0) { + type_ = storage::DataType::kHashes; + } else { + res_.SetRes(CmdRes::kSyntaxErr); + } + } else if ((pstd::string2int(argv_[index].data(), argv_[index].size(), &count_) == 0) || count_ <= 0) { res_.SetRes(CmdRes::kInvalidInt); return; } @@ -1151,10 +1653,9 @@ void ScanCmd::DoInitial() { } index++; } - return; } -void ScanCmd::Do(std::shared_ptr partition) { +void ScanCmd::Do() { int64_t total_key = 0; int64_t batch_count = 0; int64_t left = count_; @@ -1167,29 +1668,27 @@ void ScanCmd::Do(std::shared_ptr partition) { keys.clear(); batch_count = left < PIKA_SCAN_STEP_LENGTH ? left : PIKA_SCAN_STEP_LENGTH; left = left > PIKA_SCAN_STEP_LENGTH ? left - PIKA_SCAN_STEP_LENGTH : 0; - cursor_ret = partition->db()->Scan(blackwidow::DataType::kAll, cursor_ret, - pattern_, batch_count, &keys); + cursor_ret = db_->storage()->Scan(type_, cursor_ret, pattern_, batch_count, &keys); for (const auto& key : keys) { - RedisAppendLen(raw, key.size(), "$"); + RedisAppendLenUint64(raw, key.size(), "$"); RedisAppendContent(raw, key); } if (raw.size() >= raw_limit) { res_.SetRes(CmdRes::kErrOther, "Response exceeds the max-client-response-size limit"); return; } - total_key += keys.size(); - } while (cursor_ret != 0 && left); + total_key += static_cast(keys.size()); + } while (cursor_ret != 0 && (left != 0)); res_.AppendArrayLen(2); char buf[32]; - int len = slash::ll2string(buf, sizeof(buf), cursor_ret); + int len = pstd::ll2string(buf, sizeof(buf), cursor_ret); res_.AppendStringLen(len); res_.AppendContent(buf); res_.AppendArrayLen(total_key); res_.AppendStringRaw(raw); - return; } void ScanxCmd::DoInitial() { @@ -1197,35 +1696,35 @@ void ScanxCmd::DoInitial() { res_.SetRes(CmdRes::kWrongNum, kCmdNameScanx); return; } - if (!strcasecmp(argv_[1].data(), "string")) { - type_ = blackwidow::kStrings; - } else if (!strcasecmp(argv_[1].data(), "hash")) { - type_ = blackwidow::kHashes; - } else if (!strcasecmp(argv_[1].data(), "set")) { - type_ = blackwidow::kSets; - } else if (!strcasecmp(argv_[1].data(), "zset")) { - type_ = blackwidow::kZSets; - } else if (!strcasecmp(argv_[1].data(), "list")) { - type_ = blackwidow::kLists; + if (strcasecmp(argv_[1].data(), "string") == 0) { + type_ = storage::DataType::kStrings; + } else if (strcasecmp(argv_[1].data(), "hash") == 0) { + type_ = storage::DataType::kHashes; + } else if (strcasecmp(argv_[1].data(), "set") == 0) { + type_ = storage::DataType::kSets; + } else if (strcasecmp(argv_[1].data(), "zset") == 0) { + type_ = storage::DataType::kZSets; + } else if (strcasecmp(argv_[1].data(), "list") == 0) { + type_ = storage::DataType::kLists; } else { res_.SetRes(CmdRes::kInvalidDbType); return; } start_key_ = argv_[2]; - size_t index = 3, argc = argv_.size(); + size_t index = 3; + size_t argc = argv_.size(); while (index < argc) { std::string opt = argv_[index]; - if (!strcasecmp(opt.data(), "match") - || !strcasecmp(opt.data(), "count")) { + if ((strcasecmp(opt.data(), "match") == 0) || (strcasecmp(opt.data(), "count") == 0)) { index++; if (index >= argc) { res_.SetRes(CmdRes::kSyntaxErr); return; } - if (!strcasecmp(opt.data(), "match")) { + if (strcasecmp(opt.data(), "match") == 0) { pattern_ = argv_[index]; - } else if (!slash::string2l(argv_[index].data(), argv_[index].size(), &count_) || count_ <= 0) { + } else if ((pstd::string2int(argv_[index].data(), argv_[index].size(), &count_) == 0) || count_ <= 0) { res_.SetRes(CmdRes::kInvalidInt); return; } @@ -1235,28 +1734,26 @@ void ScanxCmd::DoInitial() { } index++; } - return; } -void ScanxCmd::Do(std::shared_ptr partition) { +void ScanxCmd::Do() { std::string next_key; std::vector keys; - rocksdb::Status s = partition->db()->Scanx(type_, start_key_, pattern_, count_, &keys, &next_key); + rocksdb::Status s = db_->storage()->Scanx(type_, start_key_, pattern_, count_, &keys, &next_key); if (s.ok()) { res_.AppendArrayLen(2); - res_.AppendStringLen(next_key.size()); + res_.AppendStringLenUint64(next_key.size()); res_.AppendContent(next_key); - res_.AppendArrayLen(keys.size()); + res_.AppendArrayLenUint64(keys.size()); std::vector::iterator iter; - for (const auto& key : keys){ + for (const auto& key : keys) { res_.AppendString(key); } } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } - return; } void PKSetexAtCmd::DoInitial() { @@ -1266,22 +1763,36 @@ void PKSetexAtCmd::DoInitial() { } key_ = argv_[1]; value_ = argv_[3]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &time_stamp_) - || time_stamp_ >= INT32_MAX) { + if ((pstd::string2int(argv_[2].data(), argv_[2].size(), &time_stamp_sec_) == 0) || time_stamp_sec_ >= INT32_MAX) { res_.SetRes(CmdRes::kInvalidInt); return; } - return; } -void PKSetexAtCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->PKSetexAt(key_, value_, time_stamp_); - if (s.ok()) { +void PKSetexAtCmd::Do() { + s_ = db_->storage()->PKSetexAt(key_, value_, static_cast(time_stamp_sec_ * 1000)); + if (s_.ok()) { res_.SetRes(CmdRes::kOk); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void PKSetexAtCmd::DoThroughDB() { + Do(); +} + +void PKSetexAtCmd::DoUpdateCache() { + if (s_.ok()) { + auto expire = time_stamp_sec_ - static_cast(std::time(nullptr)); + if (expire <= 0) [[unlikely]] { + db_->cache()->Del({key_}); + return; + } + db_->cache()->Setxx(key_, value_, expire); } - return; } void PKScanRangeCmd::DoInitial() { @@ -1289,19 +1800,19 @@ void PKScanRangeCmd::DoInitial() { res_.SetRes(CmdRes::kWrongNum, kCmdNamePKScanRange); return; } - if (!strcasecmp(argv_[1].data(), "string_with_value")) { - type_ = blackwidow::kStrings; + if (strcasecmp(argv_[1].data(), "string_with_value") == 0) { + type_ = storage::DataType::kStrings; string_with_value = true; - } else if (!strcasecmp(argv_[1].data(), "string")) { - type_ = blackwidow::kStrings; - } else if (!strcasecmp(argv_[1].data(), "hash")) { - type_ = blackwidow::kHashes; - } else if (!strcasecmp(argv_[1].data(), "set")) { - type_ = blackwidow::kSets; - } else if (!strcasecmp(argv_[1].data(), "zset")) { - type_ = blackwidow::kZSets; - } else if (!strcasecmp(argv_[1].data(), "list")) { - type_ = blackwidow::kLists; + } else if (strcasecmp(argv_[1].data(), "string") == 0) { + type_ = storage::DataType::kStrings; + } else if (strcasecmp(argv_[1].data(), "hash") == 0) { + type_ = storage::DataType::kHashes; + } else if (strcasecmp(argv_[1].data(), "set") == 0) { + type_ = storage::DataType::kSets; + } else if (strcasecmp(argv_[1].data(), "zset") == 0) { + type_ = storage::DataType::kZSets; + } else if (strcasecmp(argv_[1].data(), "list") == 0) { + type_ = storage::DataType::kLists; } else { res_.SetRes(CmdRes::kInvalidDbType); return; @@ -1309,19 +1820,24 @@ void PKScanRangeCmd::DoInitial() { key_start_ = argv_[2]; key_end_ = argv_[3]; - size_t index = 4, argc = argv_.size(); + // start key and end key hash tag have to be same in non classic mode + if (!HashtagIsConsistent(key_start_, key_start_)) { + res_.SetRes(CmdRes::kInconsistentHashTag); + return; + } + size_t index = 4; + size_t argc = argv_.size(); while (index < argc) { std::string opt = argv_[index]; - if (!strcasecmp(opt.data(), "match") - || !strcasecmp(opt.data(), "limit")) { + if ((strcasecmp(opt.data(), "match") == 0) || (strcasecmp(opt.data(), "limit") == 0)) { index++; if (index >= argc) { res_.SetRes(CmdRes::kSyntaxErr); return; } - if (!strcasecmp(opt.data(), "match")) { + if (strcasecmp(opt.data(), "match") == 0) { pattern_ = argv_[index]; - } else if (!slash::string2l(argv_[index].data(), argv_[index].size(), &limit_) || limit_ <= 0) { + } else if ((pstd::string2int(argv_[index].data(), argv_[index].size(), &limit_) == 0) || limit_ <= 0) { res_.SetRes(CmdRes::kInvalidInt); return; } @@ -1331,22 +1847,20 @@ void PKScanRangeCmd::DoInitial() { } index++; } - return; } -void PKScanRangeCmd::Do(std::shared_ptr partition) { +void PKScanRangeCmd::Do() { std::string next_key; std::vector keys; - std::vector kvs; - rocksdb::Status s = partition->db()->PKScanRange(type_, key_start_, key_end_, pattern_, limit_, &keys, &kvs, &next_key); + std::vector kvs; + s_ = db_->storage()->PKScanRange(type_, key_start_, key_end_, pattern_, static_cast(limit_), &keys, &kvs, &next_key); - if (s.ok()) { + if (s_.ok()) { res_.AppendArrayLen(2); - res_.AppendStringLen(next_key.size()); + res_.AppendStringLenUint64(next_key.size()); res_.AppendContent(next_key); - - if (type_ == blackwidow::kStrings) { - res_.AppendArrayLen(string_with_value ? 2 * kvs.size() : kvs.size()); + if (type_ == storage::DataType::kStrings) { + res_.AppendArrayLenUint64(string_with_value ? 2 * kvs.size() : kvs.size()); for (const auto& kv : kvs) { res_.AppendString(kv.key); if (string_with_value) { @@ -1354,15 +1868,16 @@ void PKScanRangeCmd::Do(std::shared_ptr partition) { } } } else { - res_.AppendArrayLen(keys.size()); - for (const auto& key : keys){ + res_.AppendArrayLenUint64(keys.size()); + for (const auto& key : keys) { res_.AppendString(key); } } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); } - return; } void PKRScanRangeCmd::DoInitial() { @@ -1370,19 +1885,19 @@ void PKRScanRangeCmd::DoInitial() { res_.SetRes(CmdRes::kWrongNum, kCmdNamePKRScanRange); return; } - if (!strcasecmp(argv_[1].data(), "string_with_value")) { - type_ = blackwidow::kStrings; + if (strcasecmp(argv_[1].data(), "string_with_value") == 0) { + type_ = storage::DataType::kStrings; string_with_value = true; - } else if (!strcasecmp(argv_[1].data(), "string")) { - type_ = blackwidow::kStrings; - } else if (!strcasecmp(argv_[1].data(), "hash")) { - type_ = blackwidow::kHashes; - } else if (!strcasecmp(argv_[1].data(), "set")) { - type_ = blackwidow::kSets; - } else if (!strcasecmp(argv_[1].data(), "zset")) { - type_ = blackwidow::kZSets; - } else if (!strcasecmp(argv_[1].data(), "list")) { - type_ = blackwidow::kLists; + } else if (strcasecmp(argv_[1].data(), "string") == 0) { + type_ = storage::DataType::kStrings; + } else if (strcasecmp(argv_[1].data(), "hash") == 0) { + type_ = storage::DataType::kHashes; + } else if (strcasecmp(argv_[1].data(), "set") == 0) { + type_ = storage::DataType::kSets; + } else if (strcasecmp(argv_[1].data(), "zset") == 0) { + type_ = storage::DataType::kZSets; + } else if (strcasecmp(argv_[1].data(), "list") == 0) { + type_ = storage::DataType::kLists; } else { res_.SetRes(CmdRes::kInvalidDbType); return; @@ -1390,19 +1905,24 @@ void PKRScanRangeCmd::DoInitial() { key_start_ = argv_[2]; key_end_ = argv_[3]; - size_t index = 4, argc = argv_.size(); + // start key and end key hash tag have to be same in non classic mode + if (!HashtagIsConsistent(key_start_, key_start_)) { + res_.SetRes(CmdRes::kInconsistentHashTag); + return; + } + size_t index = 4; + size_t argc = argv_.size(); while (index < argc) { std::string opt = argv_[index]; - if (!strcasecmp(opt.data(), "match") - || !strcasecmp(opt.data(), "limit")) { + if ((strcasecmp(opt.data(), "match") == 0) || (strcasecmp(opt.data(), "limit") == 0)) { index++; if (index >= argc) { res_.SetRes(CmdRes::kSyntaxErr); return; } - if (!strcasecmp(opt.data(), "match")) { + if (strcasecmp(opt.data(), "match") == 0) { pattern_ = argv_[index]; - } else if (!slash::string2l(argv_[index].data(), argv_[index].size(), &limit_) || limit_ <= 0) { + } else if ((pstd::string2int(argv_[index].data(), argv_[index].size(), &limit_) == 0) || limit_ <= 0) { res_.SetRes(CmdRes::kInvalidInt); return; } @@ -1412,22 +1932,22 @@ void PKRScanRangeCmd::DoInitial() { } index++; } - return; } -void PKRScanRangeCmd::Do(std::shared_ptr partition) { +void PKRScanRangeCmd::Do() { std::string next_key; std::vector keys; - std::vector kvs; - rocksdb::Status s = partition->db()->PKRScanRange(type_, key_start_, key_end_, pattern_, limit_, &keys, &kvs, &next_key); + std::vector kvs; + s_ = db_->storage()->PKRScanRange(type_, key_start_, key_end_, pattern_, static_cast(limit_), + &keys, &kvs, &next_key); - if (s.ok()) { + if (s_.ok()) { res_.AppendArrayLen(2); - res_.AppendStringLen(next_key.size()); + res_.AppendStringLenUint64(next_key.size()); res_.AppendContent(next_key); - if (type_ == blackwidow::kStrings) { - res_.AppendArrayLen(string_with_value ? 2 * kvs.size() : kvs.size()); + if (type_ == storage::DataType::kStrings) { + res_.AppendArrayLenUint64(string_with_value ? 2 * kvs.size() : kvs.size()); for (const auto& kv : kvs) { res_.AppendString(kv.key); if (string_with_value) { @@ -1435,13 +1955,14 @@ void PKRScanRangeCmd::Do(std::shared_ptr partition) { } } } else { - res_.AppendArrayLen(keys.size()); - for (const auto& key : keys){ + res_.AppendArrayLenUint64(keys.size()); + for (const auto& key : keys) { res_.AppendString(key); } } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); } - return; } diff --git a/tools/pika_migrate/src/pika_list.cc b/tools/pika_migrate/src/pika_list.cc index cf4442dab4..9cec350baa 100644 --- a/tools/pika_migrate/src/pika_list.cc +++ b/tools/pika_migrate/src/pika_list.cc @@ -4,8 +4,17 @@ // of patent rights can be found in the PATENTS file in the same directory. #include "include/pika_list.h" +#include +#include "include/pika_cache.h" +#include "include/pika_data_distribution.h" +#include "include/pika_rm.h" +#include "include/pika_server.h" +#include "include/pika_slot_command.h" +#include "pstd/include/pstd_string.h" +#include "scope_record_lock.h" -#include "slash/include/slash_string.h" +extern PikaServer* g_pika_server; +extern std::unique_ptr g_pika_rm; void LIndexCmd::DoInitial() { if (!CheckArg(argv_.size())) { @@ -14,23 +23,48 @@ void LIndexCmd::DoInitial() { } key_ = argv_[1]; std::string index = argv_[2]; - if (!slash::string2l(index.data(), index.size(), &index_)) { + if (pstd::string2int(index.data(), index.size(), &index_) == 0) { res_.SetRes(CmdRes::kInvalidInt); } - return; } -void LIndexCmd::Do(std::shared_ptr partition) { + +void LIndexCmd::Do() { std::string value; - rocksdb::Status s = partition->db()->LIndex(key_, index_, &value); + s_ = db_->storage()->LIndex(key_, index_, &value); + if (s_.ok()) { + res_.AppendString(value); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else if (s_.IsNotFound()) { + res_.AppendStringLen(-1); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void LIndexCmd::ReadCache() { + std::string value; + auto s = db_->cache()->LIndex(key_, index_, &value); if (s.ok()) { res_.AppendString(value); } else if (s.IsNotFound()) { - res_.AppendStringLen(-1); + res_.SetRes(CmdRes::kCacheMiss); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } } +void LIndexCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void LIndexCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_LIST, key_, db_); + } +} + void LInsertCmd::DoInitial() { if (!CheckArg(argv_.size())) { res_.SetRes(CmdRes::kWrongNum, kCmdNameLInsert); @@ -38,10 +72,10 @@ void LInsertCmd::DoInitial() { } key_ = argv_[1]; std::string dir = argv_[2]; - if (!strcasecmp(dir.data(), "before")) { - dir_ = blackwidow::Before; - } else if (!strcasecmp(dir.data(), "after")) { - dir_ = blackwidow::After; + if (strcasecmp(dir.data(), "before") == 0) { + dir_ = storage::Before; + } else if (strcasecmp(dir.data(), "after") == 0) { + dir_ = storage::After; } else { res_.SetRes(CmdRes::kSyntaxErr); return; @@ -49,13 +83,27 @@ void LInsertCmd::DoInitial() { pivot_ = argv_[3]; value_ = argv_[4]; } -void LInsertCmd::Do(std::shared_ptr partition) { + +void LInsertCmd::Do() { int64_t llen = 0; - rocksdb::Status s = partition->db()->LInsert(key_, dir_, pivot_, value_, &llen); - if (s.ok() || s.IsNotFound()) { + s_ = db_->storage()->LInsert(key_, dir_, pivot_, value_, &llen); + if (s_.ok() || s_.IsNotFound()) { res_.AppendInteger(llen); + AddSlotKey("l", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void LInsertCmd::DoThroughDB() { + Do(); +} + +void LInsertCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->LInsert(key_, dir_, pivot_, value_); } } @@ -66,16 +114,143 @@ void LLenCmd::DoInitial() { } key_ = argv_[1]; } -void LLenCmd::Do(std::shared_ptr partition) { + +void LLenCmd::Do() { + uint64_t llen = 0; + s_ = db_->storage()->LLen(key_, &llen); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendInteger(static_cast(llen)); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void LLenCmd::ReadCache() { uint64_t llen = 0; - rocksdb::Status s = partition->db()->LLen(key_, &llen); - if (s.ok() || s.IsNotFound()){ + auto s = db_->cache()->LLen(key_, &llen); + if (s.ok()) { res_.AppendInteger(llen); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } } +void LLenCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void LLenCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_LIST, key_, db_); + } +} + +void BlockingBaseCmd::TryToServeBLrPopWithThisKey(const std::string& key, std::shared_ptr db) { + std::shared_ptr curr_conn = std::dynamic_pointer_cast(GetConn()); + if (!curr_conn) { + // current node is a slave and is applying a binlog of lpush/rpush/rpoplpush, just return + return; + } + auto dispatchThread = dynamic_cast(curr_conn->thread()); + + { + std::shared_lock read_latch(dispatchThread->GetBlockMtx()); + auto& key_to_conns = dispatchThread->GetMapFromKeyToConns(); + net::BlockKey blrPop_key{curr_conn->GetCurrentTable(), key}; + + if (auto it = key_to_conns.find(blrPop_key); it == key_to_conns.end()) { + // no client is waitting for this key + return; + } + } + + auto* args = new UnblockTaskArgs(key, std::move(db), dispatchThread); + bool is_slow_cmd = g_pika_conf->is_slow_cmd("LPOP") || g_pika_conf->is_slow_cmd("RPOP"); + bool is_admin_cmd = false; + g_pika_server->ScheduleClientPool(&ServeAndUnblockConns, args, is_slow_cmd, is_admin_cmd); +} + +void BlockingBaseCmd::ServeAndUnblockConns(void* args) { + auto bg_args = std::unique_ptr(static_cast(args)); + net::DispatchThread* dispatchThread = bg_args->dispatchThread; + std::shared_ptr db = bg_args->db; + std::string key = std::move(bg_args->key); + auto& key_to_conns_ = dispatchThread->GetMapFromKeyToConns(); + net::BlockKey blrPop_key{db->GetDBName(), key}; + + pstd::lock::ScopeRecordLock record_lock(db->LockMgr(), key);//It's a RAII Lock + std::unique_lock map_lock(dispatchThread->GetBlockMtx());// do not change the sequence of these 3 locks, or deadlock will happen + auto it = key_to_conns_.find(blrPop_key); + if (it == key_to_conns_.end()) { + return; + } + CmdRes res; + std::vector pop_binlog_args; + auto& waitting_list = it->second; + std::vector values; + rocksdb::Status s; + // traverse this list from head to tail(in the order of adding sequence) ,means "first blocked, first get served“ + for (auto conn_blocked = waitting_list->begin(); conn_blocked != waitting_list->end();) { + if (conn_blocked->GetBlockType() == BlockKeyType::Blpop) { + s = db->storage()->LPop(key, 1, &values); + } else { // BlockKeyType is Brpop + s = db->storage()->RPop(key, 1, &values); + } + if (s.ok()) { + res.AppendArrayLen(2); + res.AppendString(key); + res.AppendString(values[0]); + } else if (s.IsNotFound() || s.ToString().substr(0, std::char_traits::length(ErrTypeMessage)) == ErrTypeMessage) { + // this key has no more elements to serve more blocked conn. + break; + } else { + res.SetRes(CmdRes::kErrOther, s.ToString()); + } + auto conn_ptr = conn_blocked->GetConnBlocked(); + // send response to this client + conn_ptr->WriteResp(res.message()); + res.clear(); + conn_ptr->NotifyEpoll(true); + pop_binlog_args.emplace_back(conn_blocked->GetBlockType(), key, db, conn_ptr); + conn_blocked = waitting_list->erase(conn_blocked); // remove this conn from current waiting list + // erase all waiting info of this conn + dispatchThread->CleanWaitNodeOfUnBlockedBlrConn(conn_ptr); + } + dispatchThread->CleanKeysAfterWaitNodeCleaned(); + map_lock.unlock(); + WriteBinlogOfPopAndUpdateCache(pop_binlog_args); +} + +void BlockingBaseCmd::WriteBinlogOfPopAndUpdateCache(std::vector& pop_args) { + // write binlog of l/rpop + for (auto& pop_arg : pop_args) { + std::shared_ptr pop_cmd; + std::string pop_type; + if (pop_arg.block_type == BlockKeyType::Blpop) { + pop_type = kCmdNameLPop; + pop_cmd = std::make_shared(kCmdNameLPop, 2, kCmdFlagsWrite | kCmdFlagsList); + } else if (pop_arg.block_type == BlockKeyType::Brpop) { + pop_type = kCmdNameRPop; + pop_cmd = std::make_shared(kCmdNameRPop, 2, kCmdFlagsWrite | kCmdFlagsList); + } + + PikaCmdArgsType args; + args.push_back(std::move(pop_type)); + args.push_back(pop_arg.key); + pop_cmd->Initial(args, pop_arg.db->GetDBName()); + pop_cmd->SetConn(pop_arg.conn); + auto resp_ptr = std::make_shared("this resp won't be used for current code(consensus-level always be 0)"); + pop_cmd->SetResp(resp_ptr); + pop_cmd->DoUpdateCache(); + pop_cmd->DoBinlog(); + } +} + void LPushCmd::DoInitial() { if (!CheckArg(argv_.size())) { res_.SetRes(CmdRes::kWrongNum, kCmdNameLPush); @@ -87,32 +262,192 @@ void LPushCmd::DoInitial() { values_.push_back(argv_[pos++]); } } -void LPushCmd::Do(std::shared_ptr partition) { + +void LPushCmd::Do() { uint64_t llen = 0; - rocksdb::Status s = partition->db()->LPush(key_, values_, &llen); - if (s.ok()) { - res_.AppendInteger(llen); + s_ = db_->storage()->LPush(key_, values_, &llen); + if (s_.ok()) { + res_.AppendInteger(static_cast(llen)); + AddSlotKey("l", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } + if (auto client_conn = std::dynamic_pointer_cast(GetConn()); client_conn != nullptr) { + if (client_conn->IsInTxn()) { + return; + } + } + TryToServeBLrPopWithThisKey(key_, db_); +} + +void LPushCmd::DoThroughDB() { + Do(); +} + +void LPushCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->LPushx(key_, values_); } } +void BlockingBaseCmd::BlockThisClientToWaitLRPush(BlockKeyType block_pop_type, std::vector& keys, + int64_t expire_time) { + std::shared_ptr conn_to_block = std::dynamic_pointer_cast(GetConn()); + + auto dispatchThread = dynamic_cast(conn_to_block->thread()); + std::lock_guard latch(dispatchThread->GetBlockMtx()); + auto& key_to_conns = dispatchThread->GetMapFromKeyToConns(); + auto& conn_to_keys_ = dispatchThread->GetMapFromConnToKeys(); + + std::vector blrpop_keys; + for (auto& key : keys) { + net::BlockKey blrpop_key{conn_to_block->GetCurrentTable(), key}; + blrpop_keys.push_back(blrpop_key); + auto it = key_to_conns.find(blrpop_key); + if (it == key_to_conns.end()) { + // no waiting info found, means no other clients are waiting for the list related with this key right now + key_to_conns.emplace(blrpop_key, std::make_unique>()); + it = key_to_conns.find(blrpop_key); + } + auto& wait_list_of_this_key = it->second; + // add current client-connection to the tail of waiting list of this key + wait_list_of_this_key->emplace_back(expire_time, conn_to_block, block_pop_type); + } + + // construct a list of keys and insert into this map as value(while key of the map is conn_fd) + conn_to_keys_.emplace(conn_to_block->fd(), + std::make_unique>(blrpop_keys.begin(), blrpop_keys.end())); +} + +void BlockingBaseCmd::removeDuplicates(std::vector& keys_) { + std::unordered_set seen; + auto it = std::remove_if(keys_.begin(), keys_.end(), [&seen](const auto& key) { return !seen.insert(key).second; }); + keys_.erase(it, keys_.end()); +} + +void BLPopCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameBLPop); + return; + } + + // fetching all keys(*argv_.begin is the command itself and *argv_.end() is the timeout value) + keys_.assign(++argv_.begin(), --argv_.end()); + removeDuplicates(keys_); + int64_t timeout = 0; + if (!pstd::string2int(argv_.back().data(), argv_.back().size(), &timeout)) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + constexpr int64_t seconds_of_ten_years = 10 * 365 * 24 * 3600; + if (timeout < 0 || timeout > seconds_of_ten_years) { + res_.SetRes(CmdRes::kErrOther, + "timeout can't be a negative value and can't exceed the number of seconds in 10 years"); + return; + } + + if (timeout > 0) { + auto now = std::chrono::system_clock::now(); + expire_time_ = + std::chrono::time_point_cast(now).time_since_epoch().count() + timeout * 1000; + } // else(timeout is 0): expire_time_ default value is 0, means never expire; +} + +void BLPopCmd::Do() { + for (auto& this_key : keys_) { + std::vector values; + rocksdb::Status s = db_->storage()->LPop(this_key, 1, &values); + if (s.ok()) { + res_.AppendArrayLen(2); + res_.AppendString(this_key); + res_.AppendString(values[0]); + // write a binlog of lpop + binlog_args_.block_type = BlockKeyType::Blpop; + binlog_args_.key = this_key; + binlog_args_.db = db_; + binlog_args_.conn = GetConn(); + is_binlog_deferred_ = false; + return; + } else if (s.IsNotFound()) { + continue; + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + return; + } + } + is_binlog_deferred_ = true; + if (auto client_conn = std::dynamic_pointer_cast(GetConn()); client_conn != nullptr) { + if (client_conn->IsInTxn()) { + res_.AppendArrayLen(-1); + return ; + } + } + BlockThisClientToWaitLRPush(BlockKeyType::Blpop, keys_, expire_time_); +} + +void BLPopCmd::DoBinlog() { + if (is_binlog_deferred_) { + return; + } + std::vector args; + args.push_back(std::move(binlog_args_)); + WriteBinlogOfPopAndUpdateCache(args); +} + void LPopCmd::DoInitial() { if (!CheckArg(argv_.size())) { res_.SetRes(CmdRes::kWrongNum, kCmdNameLPop); return; } key_ = argv_[1]; + size_t argc = argv_.size(); + if (argc > 3) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameLPop); + } else if (argc == 3) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &count_) == 0) { + res_.SetRes(CmdRes::kErrOther, kCmdNameLPop); + return; + } + if (count_ < 0) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + } } -void LPopCmd::Do(std::shared_ptr partition) { - std::string value; - rocksdb::Status s = partition->db()->LPop(key_, &value); - if (s.ok()) { - res_.AppendString(value); - } else if (s.IsNotFound()) { + +void LPopCmd::Do() { + std::vector elements; + s_ = db_->storage()->LPop(key_, count_, &elements); + + if (s_.ok()) { + if (elements.size() > 1) { + res_.AppendArrayLenUint64(elements.size()); + } + for (const auto& element : elements) { + res_.AppendString(element); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else if (s_.IsNotFound()) { res_.AppendStringLen(-1); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void LPopCmd::DoThroughDB() { + Do(); +} + +void LPopCmd::DoUpdateCache() { + if (s_.ok()) { + std::string value; + db_->cache()->LPop(key_, &value); } } @@ -122,15 +457,32 @@ void LPushxCmd::DoInitial() { return; } key_ = argv_[1]; - value_ = argv_[2]; + size_t pos = 2; + while (pos < argv_.size()) { + values_.push_back(argv_[pos++]); + } } -void LPushxCmd::Do(std::shared_ptr partition) { + +void LPushxCmd::Do() { uint64_t llen = 0; - rocksdb::Status s = partition->db()->LPushx(key_, value_, &llen); - if (s.ok() || s.IsNotFound()) { - res_.AppendInteger(llen); + s_ = db_->storage()->LPushx(key_, values_, &llen); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendInteger(static_cast(llen)); + AddSlotKey("l", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void LPushxCmd::DoThroughDB() { + Do(); +} + +void LPushxCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->LPushx(key_, values_); } } @@ -141,31 +493,59 @@ void LRangeCmd::DoInitial() { } key_ = argv_[1]; std::string left = argv_[2]; - if (!slash::string2l(left.data(), left.size(), &left_)) { + if (pstd::string2int(left.data(), left.size(), &left_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } std::string right = argv_[3]; - if (!slash::string2l(right.data(), right.size(), &right_)) { + if (pstd::string2int(right.data(), right.size(), &right_) == 0) { res_.SetRes(CmdRes::kInvalidInt); } - return; } -void LRangeCmd::Do(std::shared_ptr partition) { + +void LRangeCmd::Do() { std::vector values; - rocksdb::Status s = partition->db()->LRange(key_, left_, right_, &values); + s_ = db_->storage()->LRange(key_, left_, right_, &values); + if (s_.ok()) { + res_.AppendArrayLenUint64(values.size()); + for (const auto& value : values) { + res_.AppendString(value); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else if (s_.IsNotFound()) { + res_.AppendArrayLen(0); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void LRangeCmd::ReadCache() { + std::vector values; + auto s = db_->cache()->LRange(key_, left_, right_, &values); if (s.ok()) { res_.AppendArrayLen(values.size()); for (const auto& value : values) { res_.AppendString(value); } } else if (s.IsNotFound()) { - res_.AppendArrayLen(0); + res_.SetRes(CmdRes::kCacheMiss); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } } +void LRangeCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void LRangeCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_LIST, key_, db_); + } +} + void LRemCmd::DoInitial() { if (!CheckArg(argv_.size())) { res_.SetRes(CmdRes::kWrongNum, kCmdNameLRem); @@ -173,19 +553,32 @@ void LRemCmd::DoInitial() { } key_ = argv_[1]; std::string count = argv_[2]; - if (!slash::string2l(count.data(), count.size(), &count_)) { + if (pstd::string2int(count.data(), count.size(), &count_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } value_ = argv_[3]; } -void LRemCmd::Do(std::shared_ptr partition) { + +void LRemCmd::Do() { uint64_t res = 0; - rocksdb::Status s = partition->db()->LRem(key_, count_, value_, &res); - if (s.ok() || s.IsNotFound()) { - res_.AppendInteger(res); + s_ = db_->storage()->LRem(key_, count_, value_, &res); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendInteger(static_cast(res)); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void LRemCmd::DoThroughDB() { + Do(); +} + +void LRemCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->LRem(key_, count_, value_); } } @@ -196,24 +589,38 @@ void LSetCmd::DoInitial() { } key_ = argv_[1]; std::string index = argv_[2]; - if (!slash::string2l(index.data(), index.size(), &index_)) { + if (pstd::string2int(index.data(), index.size(), &index_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } value_ = argv_[3]; } -void LSetCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->LSet(key_, index_, value_); - if (s.ok()) { - res_.SetRes(CmdRes::kOk); - } else if (s.IsNotFound()) { - res_.SetRes(CmdRes::kNotFound); - } else if (s.IsCorruption() && s.ToString() == "Corruption: index out of range") { - //TODO refine return value - res_.SetRes(CmdRes::kOutOfRange); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } + +void LSetCmd::Do() { + s_ = db_->storage()->LSet(key_, index_, value_); + if (s_.ok()) { + res_.SetRes(CmdRes::kOk); + AddSlotKey("l", key_, db_); + } else if (s_.IsNotFound()) { + res_.SetRes(CmdRes::kNotFound); + } else if (s_.IsCorruption() && s_.ToString() == "Corruption: index out of range") { + // TODO(): refine return value + res_.SetRes(CmdRes::kOutOfRange); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void LSetCmd::DoThroughDB() { + Do(); +} + +void LSetCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->LSet(key_, index_, value_); + } } void LTrimCmd::DoInitial() { @@ -223,41 +630,158 @@ void LTrimCmd::DoInitial() { } key_ = argv_[1]; std::string start = argv_[2]; - if (!slash::string2l(start.data(), start.size(), &start_)) { + if (pstd::string2int(start.data(), start.size(), &start_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } std::string stop = argv_[3]; - if (!slash::string2l(stop.data(), stop.size(), &stop_)) { + if (pstd::string2int(stop.data(), stop.size(), &stop_) == 0) { res_.SetRes(CmdRes::kInvalidInt); } - return; } -void LTrimCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->LTrim(key_, start_, stop_); - if (s.ok() || s.IsNotFound()) { + +void LTrimCmd::Do() { + s_ = db_->storage()->LTrim(key_, start_, stop_); + if (s_.ok() || s_.IsNotFound()) { res_.SetRes(CmdRes::kOk); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void LTrimCmd::DoThroughDB() { + Do(); +} + +void LTrimCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->LTrim(key_, start_, stop_); + } +} + +void BRPopCmd::Do() { + for (auto& this_key : keys_) { + std::vector values; + s_ = db_->storage()->RPop(this_key, 1, &values); + if (s_.ok()) { + res_.AppendArrayLen(2); + res_.AppendString(this_key); + res_.AppendString(values[0]); + // write an binlog of rpop + binlog_args_.block_type = BlockKeyType::Brpop; + binlog_args_.key = this_key; + binlog_args_.db = db_; + binlog_args_.conn = GetConn(); + is_binlog_deferred_ = false; + return; + } else if (s_.IsNotFound()) { + continue; + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + return; + } } + is_binlog_deferred_ = true; + if (auto client_conn = std::dynamic_pointer_cast(GetConn()); client_conn != nullptr) { + if (client_conn->IsInTxn()) { + res_.AppendArrayLen(-1); + return ; + } + } + BlockThisClientToWaitLRPush(BlockKeyType::Brpop, keys_, expire_time_); } +void BRPopCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameBLPop); + return; + } + + // fetching all keys(*argv_.begin is the command itself and *argv_.end() is the timeout value) + keys_.assign(++argv_.begin(), --argv_.end()); + removeDuplicates(keys_); + int64_t timeout = 0; + if (!pstd::string2int(argv_.back().data(), argv_.back().size(), &timeout)) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + constexpr int64_t seconds_of_ten_years = 10 * 365 * 24 * 3600; + if (timeout < 0 || timeout > seconds_of_ten_years) { + res_.SetRes(CmdRes::kErrOther, + "timeout can't be a negative value and can't exceed the number of seconds in 10 years"); + return; + } + + if (timeout > 0) { + auto now = std::chrono::system_clock::now(); + expire_time_ = + std::chrono::time_point_cast(now).time_since_epoch().count() + timeout * 1000; + } // else(timeout is 0): expire_time_ default value is 0, means never expire; +} + +void BRPopCmd::DoBinlog() { + if (is_binlog_deferred_) { + return; + } + std::vector args; + args.push_back(std::move(binlog_args_)); + WriteBinlogOfPopAndUpdateCache(args); +} + + + void RPopCmd::DoInitial() { if (!CheckArg(argv_.size())) { res_.SetRes(CmdRes::kWrongNum, kCmdNameRPop); return; } key_ = argv_[1]; + if (argv_.size() > 3) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameRPop); + } else if (argv_.size() == 3) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &count_) == 0) { + res_.SetRes(CmdRes::kErrOther, kCmdNameRPop); + return; + } + if (count_ < 0) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + } } -void RPopCmd::Do(std::shared_ptr partition) { - std::string value; - rocksdb::Status s = partition->db()->RPop(key_, &value); - if (s.ok()) { - res_.AppendString(value); - } else if (s.IsNotFound()) { + +void RPopCmd::Do() { + std::vector elements; + s_ = db_->storage()->RPop(key_, count_, &elements); + if (s_.ok()) { + if (elements.size() > 1) { + res_.AppendArrayLenUint64(elements.size()); + } + for (const auto &element: elements) { + res_.AppendString(element); + } + } else if (s_.IsNotFound()) { res_.AppendStringLen(-1); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void RPopCmd::DoThroughDB() { + Do(); +} + +void RPopCmd::DoUpdateCache() { + if (s_.ok()) { + std::string value; + db_->cache()->RPop(key_, &value); } } @@ -268,17 +792,60 @@ void RPopLPushCmd::DoInitial() { } source_ = argv_[1]; receiver_ = argv_[2]; + if (!HashtagIsConsistent(source_, receiver_)) { + res_.SetRes(CmdRes::kInconsistentHashTag); + } } -void RPopLPushCmd::Do(std::shared_ptr partition) { + +void RPopLPushCmd::Do() { std::string value; - rocksdb::Status s = partition->db()->RPoplpush(source_, receiver_, &value); - if (s.ok()) { + s_ = db_->storage()->RPoplpush(source_, receiver_, &value); + if (s_.ok()) { + AddSlotKey("k", receiver_, db_); res_.AppendString(value); - } else if (s.IsNotFound()) { + value_poped_from_source_ = value; + is_write_binlog_ = true; + } else if (s_.IsNotFound()) { + // no actual write operation happened, will not write binlog res_.AppendStringLen(-1); + is_write_binlog_ = false; + return; + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + return; + } + TryToServeBLrPopWithThisKey(receiver_, db_); +} + +void RPopLPushCmd::ReadCache() { + res_.SetRes(CmdRes::kErrOther, "the command is not support in cache mode"); +} + +void RPopLPushCmd::DoBinlog() { + if (!is_write_binlog_) { + return; } + PikaCmdArgsType rpop_args; + rpop_args.push_back("RPOP"); + rpop_args.push_back(source_); + rpop_cmd_->Initial(rpop_args, db_name_); + + PikaCmdArgsType lpush_args; + lpush_args.push_back("LPUSH"); + lpush_args.push_back(receiver_); + lpush_args.push_back(value_poped_from_source_); + lpush_cmd_->Initial(lpush_args, db_name_); + + rpop_cmd_->SetConn(GetConn()); + rpop_cmd_->SetResp(resp_.lock()); + lpush_cmd_->SetConn(GetConn()); + lpush_cmd_->SetResp(resp_.lock()); + + rpop_cmd_->DoBinlog(); + lpush_cmd_->DoBinlog(); } void RPushCmd::DoInitial() { @@ -292,13 +859,33 @@ void RPushCmd::DoInitial() { values_.push_back(argv_[pos++]); } } -void RPushCmd::Do(std::shared_ptr partition) { + +void RPushCmd::Do() { uint64_t llen = 0; - rocksdb::Status s = partition->db()->RPush(key_, values_, &llen); - if (s.ok()) { - res_.AppendInteger(llen); + s_ = db_->storage()->RPush(key_, values_, &llen); + if (s_.ok()) { + res_.AppendInteger(static_cast(llen)); + AddSlotKey("l", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } + if (auto client_conn = std::dynamic_pointer_cast(GetConn()); client_conn != nullptr) { + if (client_conn->IsInTxn()) { + return; + } + } + TryToServeBLrPopWithThisKey(key_, db_); +} + +void RPushCmd::DoThroughDB() { + Do(); +} + +void RPushCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->RPushx(key_, values_); } } @@ -308,14 +895,31 @@ void RPushxCmd::DoInitial() { return; } key_ = argv_[1]; - value_ = argv_[2]; + size_t pos = 2; + while (pos < argv_.size()) { + values_.push_back(argv_[pos++]); + } } -void RPushxCmd::Do(std::shared_ptr partition) { + +void RPushxCmd::Do() { uint64_t llen = 0; - rocksdb::Status s = partition->db()->RPushx(key_, value_, &llen); - if (s.ok() || s.IsNotFound()) { - res_.AppendInteger(llen); + s_ = db_->storage()->RPushx(key_, values_, &llen); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendInteger(static_cast(llen)); + AddSlotKey("l", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); } } + +void RPushxCmd::DoThroughDB() { + Do(); +} + +void RPushxCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->RPushx(key_, values_); + } +} \ No newline at end of file diff --git a/tools/pika_migrate/src/pika_meta.cc b/tools/pika_migrate/src/pika_meta.cc deleted file mode 100644 index 48e11de7e2..0000000000 --- a/tools/pika_migrate/src/pika_meta.cc +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_meta.h" -#include "src/pika_inner_message.pb.h" - -const uint32_t VERSION = 1; - -PikaMeta::PikaMeta() - : local_meta_path_("") { - pthread_rwlock_init(&rwlock_, NULL); -} - -PikaMeta::~PikaMeta() { - pthread_rwlock_destroy(&rwlock_); -} - -void PikaMeta::SetPath(const std::string& path) { - local_meta_path_ = path; -} - -/* - * ******************* Meta File Format ****************** - * | | | | - * 4 Bytes 4 Bytes meta size Bytes - */ -Status PikaMeta::StableSave(const std::vector& table_structs) { - slash::RWLock l(&rwlock_, true); - if (local_meta_path_.empty()) { - LOG(WARNING) << "Local meta file path empty"; - return Status::Corruption("local meta file path empty"); - } - std::string local_meta_file = local_meta_path_ + kPikaMeta; - std::string tmp_file = local_meta_file; - tmp_file.append("_tmp"); - - slash::RWFile* saver = NULL; - slash::CreatePath(local_meta_path_); - Status s = slash::NewRWFile(tmp_file, &saver); - if (!s.ok()) { - delete saver; - LOG(WARNING) << "Open local meta file failed"; - return Status::Corruption("open local meta file failed"); - } - - InnerMessage::PikaMeta meta; - for (const auto& ts : table_structs) { - InnerMessage::TableInfo* table_info = meta.add_table_infos(); - table_info->set_table_name(ts.table_name); - table_info->set_partition_num(ts.partition_num); - for (const auto& id : ts.partition_ids) { - table_info->add_partition_ids(id); - } - } - - std::string meta_str; - if (!meta.SerializeToString(&meta_str)) { - delete saver; - LOG(WARNING) << "Serialize meta string failed"; - return Status::Corruption("serialize meta string failed"); - } - uint32_t meta_str_size = meta_str.size(); - - char *p = saver->GetData(); - memcpy(p, &VERSION, sizeof(uint32_t)); - p += sizeof(uint32_t); - memcpy(p, &meta_str_size, sizeof(uint32_t)); - p += sizeof(uint32_t); - memcpy(p, meta_str.data(), meta_str.size()); - delete saver; - - slash::DeleteFile(local_meta_file); - if (slash::RenameFile(tmp_file, local_meta_file)) { - LOG(WARNING) << "Failed to rename file, error: " << strerror(errno); - return Status::Corruption("faild to rename file"); - } - return Status::OK(); -} - -Status PikaMeta::ParseMeta(std::vector* const table_structs) { - slash::RWLock l(&rwlock_, false); - std::string local_meta_file = local_meta_path_ + kPikaMeta; - if (!slash::FileExists(local_meta_file)) { - LOG(WARNING) << "Local meta file not found, path: " << local_meta_file; - return Status::Corruption("meta file not found"); - } - - slash::RWFile* reader = NULL; - Status s = slash::NewRWFile(local_meta_file, &reader); - if (!s.ok()) { - delete reader; - LOG(WARNING) << "Open local meta file failed"; - return Status::Corruption("open local meta file failed"); - } - - if (reader->GetData() == NULL) { - delete reader; - LOG(WARNING) << "Meta file init error"; - return Status::Corruption("meta file init error"); - } - - uint32_t version = 0; - uint32_t meta_size = 0; - memcpy((char*)(&version), reader->GetData(), sizeof(uint32_t)); - memcpy((char*)(&meta_size), reader->GetData() + sizeof(uint32_t), sizeof(uint32_t)); - char* const buf = new char[meta_size]; - memcpy(buf, reader->GetData() + 2 * sizeof(uint32_t), meta_size); - - InnerMessage::PikaMeta meta; - if (!meta.ParseFromArray(buf, meta_size)) { - delete[] buf; - delete reader; - LOG(WARNING) << "Parse meta string failed"; - return Status::Corruption("parse meta string failed"); - } - delete[] buf; - delete reader; - - table_structs->clear(); - for (int idx = 0; idx < meta.table_infos_size(); ++idx) { - InnerMessage::TableInfo ti = meta.table_infos(idx); - std::set partition_ids; - for (int sidx = 0; sidx < ti.partition_ids_size(); ++sidx) { - partition_ids.insert(ti.partition_ids(sidx)); - } - table_structs->emplace_back(ti.table_name(), ti.partition_num(), partition_ids); - } - return Status::OK(); -} diff --git a/tools/pika_migrate/src/pika_migrate_thread.cc b/tools/pika_migrate/src/pika_migrate_thread.cc new file mode 100644 index 0000000000..fd221f0b8e --- /dev/null +++ b/tools/pika_migrate/src/pika_migrate_thread.cc @@ -0,0 +1,979 @@ +#include + +#include + +#include "include/pika_admin.h" +#include "include/pika_cmd_table_manager.h" +#include "include/pika_command.h" +#include "include/pika_conf.h" +#include "include/pika_define.h" +#include "include/pika_migrate_thread.h" +#include "include/pika_rm.h" +#include "include/pika_server.h" +#include "include/pika_slot_command.h" +#include "pstd/include/pika_codis_slot.h" +#include "src/redis_streams.h" + +#define min(a, b) (((a) > (b)) ? (b) : (a)) + +const int32_t MAX_MEMBERS_NUM = 512; +const std::string INVALID_STR = "NL"; + +extern std::unique_ptr g_pika_server; +extern std::unique_ptr g_pika_conf; +extern std::unique_ptr g_pika_rm; +extern std::unique_ptr g_pika_cmd_table_manager; + +// do migrate key to dest pika server +static int doMigrate(net::NetCli *cli, std::string send_str) { + pstd::Status s; + s = cli->Send(&send_str); + if (!s.ok()) { + LOG(WARNING) << "DB Migrate Send error: " << s.ToString(); + return -1; + } + return 1; +} + +// do migrate cli auth +static int doAuth(net::NetCli *cli) { + net::RedisCmdArgsType argv; + std::string wbuf_str; + std::string requirepass = g_pika_conf->requirepass(); + if (requirepass != "") { + argv.emplace_back("auth"); + argv.emplace_back(requirepass); + } else { + argv.emplace_back("ping"); + } + net::SerializeRedisCommand(argv, &wbuf_str); + + pstd::Status s; + s = cli->Send(&wbuf_str); + if (!s.ok()) { + LOG(WARNING) << "DB Migrate auth Send error: " << s.ToString(); + return -1; + } + // Recv + s = cli->Recv(&argv); + if (!s.ok()) { + LOG(WARNING) << "DB Migrate auth Recv error: " << s.ToString(); + return -1; + } + pstd::StringToLower(argv[0]); + if (argv[0] != "ok" && argv[0] != "pong" && argv[0].find("no password") == std::string::npos) { + LOG(WARNING) << "DB Migrate auth error: " << argv[0]; + return -1; + } + return 0; +} + +static int migrateKeyTTl(net::NetCli *cli, const std::string& key, storage::DataType data_type, + const std::shared_ptr& db) { + net::RedisCmdArgsType argv; + std::string send_str; + int64_t type_timestamp = db->storage()->TTL(key); + if (PIKA_TTL_ZERO == type_timestamp || PIKA_TTL_STALE == type_timestamp) { + argv.emplace_back("del"); + argv.emplace_back(key); + net::SerializeRedisCommand(argv, &send_str); + } else if (0 < type_timestamp) { + argv.emplace_back("expire"); + argv.emplace_back(key); + argv.emplace_back(std::to_string(type_timestamp)); + net::SerializeRedisCommand(argv, &send_str); + } else { + // no expire + return 0; + } + + if (doMigrate(cli, send_str) < 0) { + return -1; + } + + return 1; +} + +// get set key all values +static int setGetall(const std::string& key, std::vector *members, const std::shared_ptr& db) { + rocksdb::Status s = db->storage()->SMembers(key, members); + if (!s.ok()) { + if (s.IsNotFound()) { + LOG(WARNING) << "Set get key: " << key << " value not found "; + return 0; + } else { + LOG(WARNING) << "Set get key: " << key << " value error: " << s.ToString(); + return -1; + } + } + return 1; +} + +static int MigrateKv(net::NetCli *cli, const std::string& key, const std::shared_ptr& db) { + std::string value; + rocksdb::Status s = db->storage()->Get(key, &value); + if (!s.ok()) { + if (s.IsNotFound()) { + LOG(WARNING) << "Get kv key: " << key << " not found "; + return 0; + } else { + LOG(WARNING) << "Get kv key: " << key << " error: " << strerror(errno); + return -1; + } + } + + net::RedisCmdArgsType argv; + std::string send_str; + argv.emplace_back("SET"); + argv.emplace_back(key); + argv.emplace_back(value); + net::SerializeRedisCommand(argv, &send_str); + + int send_num = 0; + if (doMigrate(cli, send_str) < 0) { + return -1; + } else { + ++send_num; + } + + int r; + if (0 > (r = migrateKeyTTl(cli, key, storage::DataType::kStrings, db))) { + return -1; + } else { + send_num += r; + } + + return send_num; +} + +static int MigrateHash(net::NetCli *cli, const std::string& key, const std::shared_ptr& db) { + int send_num = 0; + int64_t cursor = 0; + std::vector field_values; + rocksdb::Status s; + + do { + s = db->storage()->HScan(key, cursor, "*", MAX_MEMBERS_NUM, &field_values, &cursor); + if (s.ok() && field_values.size() > 0) { + net::RedisCmdArgsType argv; + std::string send_str; + argv.emplace_back("HMSET"); + argv.emplace_back(key); + for (const auto &field_value : field_values) { + argv.emplace_back(field_value.field); + argv.emplace_back(field_value.value); + } + net::SerializeRedisCommand(argv, &send_str); + if (doMigrate(cli, send_str) < 0) { + return -1; + } else { + ++send_num; + } + } + } while (cursor != 0 && s.ok()); + + if (send_num > 0) { + int r; + if ((r = migrateKeyTTl(cli, key, storage::DataType::kHashes, db)) < 0) { + return -1; + } else { + send_num += r; + } + } + + return send_num; +} + +static int MigrateList(net::NetCli *cli, const std::string& key, const std::shared_ptr& db) { + // del old key, before migrate list; prevent redo when failed + int send_num = 0; + net::RedisCmdArgsType argv; + std::string send_str; + argv.emplace_back("DEL"); + argv.emplace_back(key); + net::SerializeRedisCommand(argv, &send_str); + if (doMigrate(cli, send_str) < 0) { + return -1; + } else { + ++send_num; + } + + std::vector values; + rocksdb::Status s = db->storage()->LRange(key, 0, -1, &values); + if (s.ok()) { + auto iter = values.begin(); + while (iter != values.end()) { + net::RedisCmdArgsType argv; + std::string send_str; + argv.emplace_back("RPUSH"); + argv.emplace_back(key); + + for (int i = 0; iter != values.end() && i < MAX_MEMBERS_NUM; ++iter, ++i) { + argv.emplace_back(*iter); + } + + net::SerializeRedisCommand(argv, &send_str); + if (doMigrate(cli, send_str) < 0) { + return -1; + } else { + ++send_num; + } + } + } + + // has send del key command + if (send_num > 1) { + int r; + if (0 > (r = migrateKeyTTl(cli, key, storage::DataType::kLists, db))) { + return -1; + } else { + send_num += r; + } + } + + return send_num; +} + +static int MigrateStreams(net::NetCli *cli, const std::string& key, const std::shared_ptr& db) { + int send_num = 0; + int64_t cursor = 0; + std::vector members; + rocksdb::Status s; + + std::vector id_messages; + storage::StreamScanArgs arg; + storage::StreamUtils::StreamParseIntervalId("-", arg.start_sid, &arg.start_ex, 0); + storage::StreamUtils::StreamParseIntervalId("+", arg.end_sid, &arg.end_ex, UINT64_MAX); + s = db->storage()->XRange(key, arg, id_messages); + if (s.ok()) { + net::RedisCmdArgsType argv; + std::string send_str; + argv.emplace_back("XADD"); + argv.emplace_back(key); + for (auto &fv : id_messages) { + std::vector message; + storage::StreamUtils::DeserializeMessage(fv.value, message); + storage::streamID sid; + sid.DeserializeFrom(fv.field); + argv.emplace_back(sid.ToString()); + for (auto &m : message) { + argv.emplace_back(m); + } + } + net::SerializeRedisCommand(argv, &send_str); + if (doMigrate(cli, send_str) < 0) { + return -1; + } else { + ++send_num; + } + } + return send_num; +} + +static int MigrateSet(net::NetCli *cli, const std::string& key, const std::shared_ptr& db) { + int send_num = 0; + int64_t cursor = 0; + std::vector members; + rocksdb::Status s; + + do { + s = db->storage()->SScan(key, cursor, "*", MAX_MEMBERS_NUM, &members, &cursor); + if (s.ok() && members.size() > 0) { + net::RedisCmdArgsType argv; + std::string send_str; + argv.emplace_back("SADD"); + argv.emplace_back(key); + + for (const auto &member : members) { + argv.emplace_back(member); + } + net::SerializeRedisCommand(argv, &send_str); + if (doMigrate(cli, send_str) < 0) { + return -1; + } else { + ++send_num; + } + } + } while (cursor != 0 && s.ok()); + + if (0 < send_num) { + int r; + if (0 > (r = migrateKeyTTl(cli, key, storage::DataType::kSets, db))) { + return -1; + } else { + send_num += r; + } + } + + return send_num; +} + +static int MigrateZset(net::NetCli *cli, const std::string& key, const std::shared_ptr& db) { + int send_num = 0; + int64_t cursor = 0; + std::vector score_members; + rocksdb::Status s; + + do { + s = db->storage()->ZScan(key, cursor, "*", MAX_MEMBERS_NUM, &score_members, &cursor); + if (s.ok() && score_members.size() > 0) { + net::RedisCmdArgsType argv; + std::string send_str; + argv.emplace_back("ZADD"); + argv.emplace_back(key); + + for (const auto &score_member : score_members) { + argv.emplace_back(std::to_string(score_member.score)); + argv.emplace_back(score_member.member); + } + net::SerializeRedisCommand(argv, &send_str); + if (doMigrate(cli, send_str) < 0) { + return -1; + } else { + ++send_num; + } + } + } while (cursor != 0 && s.ok()); + + if (send_num > 0) { + int r; + if ((r = migrateKeyTTl(cli, key, storage::DataType::kZSets, db)) < 0) { + return -1; + } else { + send_num += r; + } + } + + return send_num; +} + +// get list key all values +static int listGetall(const std::string& key, std::vector *values, const std::shared_ptr& db) { + rocksdb::Status s = db->storage()->LRange(key, 0, -1, values); + if (!s.ok()) { + if (s.IsNotFound()) { + LOG(WARNING) << "List get key: " << key << " value not found "; + return 0; + } else { + LOG(WARNING) << "List get key: " << key << " value error: " << s.ToString(); + return -1; + } + } + return 1; +} + +PikaParseSendThread::PikaParseSendThread(PikaMigrateThread *migrate_thread, const std::shared_ptr& db) + : dest_ip_("none"), + dest_port_(-1), + timeout_ms_(3000), + mgrtkeys_num_(64), + should_exit_(false), + migrate_thread_(migrate_thread), + db_(db) {} + +PikaParseSendThread::~PikaParseSendThread() { + if (is_running()) { + should_exit_ = true; + StopThread(); + } + + if (cli_) { + delete cli_; + cli_ = nullptr; + } +} + +bool PikaParseSendThread::Init(const std::string &ip, int64_t port, int64_t timeout_ms, int64_t mgrtkeys_num) { + dest_ip_ = ip; + dest_port_ = port; + timeout_ms_ = timeout_ms; + mgrtkeys_num_ = static_cast(mgrtkeys_num); + + cli_ = net::NewRedisCli(); + cli_->set_connect_timeout(static_cast(timeout_ms_)); + cli_->set_send_timeout(static_cast(timeout_ms_)); + cli_->set_recv_timeout(static_cast(timeout_ms_)); + LOG(INFO) << "PikaParseSendThread init cli_, dest_ip_: " << dest_ip_ << " ,dest_port_: " << dest_port_; + pstd::Status result = cli_->Connect(dest_ip_, static_cast(dest_port_), g_pika_server->host()); + if (!result.ok()) { + LOG(ERROR) << "PikaParseSendThread::Init failed. Connect server(" << dest_ip_ << ":" << dest_port_ << ") " + << result.ToString(); + return false; + } + + // do auth + if (doAuth(cli_) < 0) { + LOG(WARNING) << "PikaParseSendThread::Init do auth failed !!"; + cli_->Close(); + return false; + } + + return true; +} + +void PikaParseSendThread::ExitThread(void) { should_exit_ = true; } + +int PikaParseSendThread::MigrateOneKey(net::NetCli *cli, const std::string& key, const char key_type, bool async) { + int send_num; + switch (key_type) { + case 'k': + if (0 > (send_num = MigrateKv(cli_, key, db_))) { + return -1; + } + break; + case 'h': + if (0 > (send_num = MigrateHash(cli_, key, db_))) { + return -1; + } + break; + case 'l': + if (0 > (send_num = MigrateList(cli_, key, db_))) { + return -1; + } + break; + case 's': + if (0 > (send_num = MigrateSet(cli_, key, db_))) { + return -1; + } + break; + case 'z': + if (0 > (send_num = MigrateZset(cli_, key, db_))) { + return -1; + } + break; + case 'm': + if (0 > (send_num = MigrateStreams(cli_, key, db_))) { + return -1; + } + break; + default: + return -1; + break; + } + return send_num; +} + +void PikaParseSendThread::DelKeysAndWriteBinlog(std::deque> &send_keys, + const std::shared_ptr& db) { + for (const auto& send_key : send_keys) { + DeleteKey(send_key.second, send_key.first, db_); + WriteDelKeyToBinlog(send_key.second, db_); + } +} + +// write del key to binlog for slave +void WriteDelKeyToBinlog(const std::string& key, const std::shared_ptr& db) { + std::shared_ptr cmd_ptr = g_pika_cmd_table_manager->GetCmd("del"); + std::unique_ptr args = std::make_unique(); + args->emplace_back("DEL"); + args->emplace_back(key); + cmd_ptr->Initial(*args, db->GetDBName()); + + std::shared_ptr sync_db = + g_pika_rm->GetSyncMasterDBByName(DBInfo(db->GetDBName())); + pstd::Status s = sync_db->ConsensusProposeLog(cmd_ptr); + if (!s.ok()) { + LOG(ERROR) << "write delete key to binlog failed, key: " << key; + } +} + +bool PikaParseSendThread::CheckMigrateRecv(int64_t need_receive_num) { + net::RedisCmdArgsType argv; + for (int64_t i = 0; i < need_receive_num; ++i) { + pstd::Status s; + s = cli_->Recv(&argv); + if (!s.ok()) { + LOG(ERROR) << "PikaParseSendThread::CheckMigrateRecv Recv error: " << s.ToString(); + return false; + } + + // set return ok + // zadd return number + // hset return 0 or 1 + // hmset return ok + // sadd return number + // rpush return length + std::string reply = argv[0]; + int64_t ret; + if (1 == argv.size() && + (kInnerReplOk == pstd::StringToLower(reply) || pstd::string2int(reply.data(), reply.size(), &ret))) { + continue; + } else { + LOG(ERROR) << "PikaParseSendThread::CheckMigrateRecv reply error: " << reply; + return false; + } + } + return true; +} + +void *PikaParseSendThread::ThreadMain() { + while (!should_exit_) { + std::deque> send_keys; + { + std::unique_lock lq(migrate_thread_->mgrtkeys_queue_mutex_); + while (!should_exit_ && 0 >= migrate_thread_->mgrtkeys_queue_.size()) { + migrate_thread_->mgrtkeys_cond_.wait(lq); + } + + if (should_exit_) { + LOG(INFO) << "PikaParseSendThread::ThreadMain :" << pthread_self() << " exit !!!"; + return nullptr; + } + + migrate_thread_->IncWorkingThreadNum(); + for (int32_t i = 0; i < mgrtkeys_num_; ++i) { + if (migrate_thread_->mgrtkeys_queue_.empty()) { + break; + } + send_keys.emplace_back(migrate_thread_->mgrtkeys_queue_.front()); + migrate_thread_->mgrtkeys_queue_.pop_front(); + } + } + + int64_t send_num = 0; + int64_t need_receive_num = 0; + int32_t migrate_keys_num = 0; + for (const auto& send_key : send_keys) { + if (0 > (send_num = MigrateOneKey(cli_, send_key.second, send_key.first, false))) { + LOG(WARNING) << "PikaParseSendThread::ThreadMain MigrateOneKey: " << send_key.second << " failed !!!"; + migrate_thread_->OnTaskFailed(); + migrate_thread_->DecWorkingThreadNum(); + return nullptr; + } else { + need_receive_num += send_num; + ++migrate_keys_num; + } + } + + // check response + if (!CheckMigrateRecv(need_receive_num)) { + LOG(INFO) << "PikaMigrateThread::ThreadMain CheckMigrateRecv failed !!!"; + migrate_thread_->OnTaskFailed(); + migrate_thread_->DecWorkingThreadNum(); + return nullptr; + } else { + DelKeysAndWriteBinlog(send_keys, db_); + } + + migrate_thread_->AddResponseNum(migrate_keys_num); + migrate_thread_->DecWorkingThreadNum(); + } + + return nullptr; +} + +PikaMigrateThread::PikaMigrateThread() + : net::Thread(), + dest_ip_("none"), + dest_port_(-1), + timeout_ms_(3000), + keys_num_(-1), + slot_id_(-1), + is_migrating_(false), + should_exit_(false), + is_task_success_(true), + send_num_(0), + response_num_(0), + moved_num_(0), + + workers_num_(8), + working_thread_num_(0) + {} + +PikaMigrateThread::~PikaMigrateThread() { + LOG(INFO) << "PikaMigrateThread::~PikaMigrateThread"; + + if (is_running()) { + should_exit_ = true; + NotifyRequestMigrate(); + workers_cond_.notify_all(); + StopThread(); + } +} + +bool PikaMigrateThread::ReqMigrateBatch(const std::string &ip, int64_t port, int64_t time_out, int64_t slot_id, + int64_t keys_num, const std::shared_ptr& db) { + if (migrator_mutex_.try_lock()) { + if (is_migrating_) { + if (dest_ip_ != ip || dest_port_ != port || slot_id != slot_id_) { + LOG(INFO) << "PikaMigrateThread::ReqMigrate current: " << dest_ip_ << ":" << dest_port_ << " slot[" << slot_id_ + << "] request: " << ip << ":" << port << "db[" << db << "]";; + migrator_mutex_.unlock(); + return false; + } + db_ = db; + timeout_ms_ = time_out; + keys_num_ = keys_num; + NotifyRequestMigrate(); + migrator_mutex_.unlock(); + return true; + } else { + dest_ip_ = ip; + dest_port_ = port; + timeout_ms_ = time_out; + keys_num_ = keys_num; + slot_id_ = slot_id; + should_exit_ = false; + db_ = db; + + ResetThread(); + int ret = StartThread(); + if (0 != ret) { + LOG(ERROR) << "PikaMigrateThread::ReqMigrateBatch StartThread failed. " + << " ret=" << ret; + is_migrating_ = false; + StopThread(); + } else { + LOG(INFO) << "PikaMigrateThread::ReqMigrateBatch DB" << db; + is_migrating_ = true; + NotifyRequestMigrate(); + } + migrator_mutex_.unlock(); + return true; + } + } + return false; +} + +int PikaMigrateThread::ReqMigrateOne(const std::string &key, const std::shared_ptr &db) { + std::unique_lock lm(migrator_mutex_); + + int slot_id = GetSlotID(g_pika_conf->default_slot_num(), key); + storage::DataType type; + char key_type; + rocksdb::Status s = db->storage()->GetType(key, type); + if (!s.ok()) { + if (s.IsNotFound()) { + LOG(INFO) << "PikaMigrateThread::ReqMigrateOne key: " << key << " not found"; + return 0; + } else { + LOG(WARNING) << "PikaMigrateThread::ReqMigrateOne key: " << key << " error: " << strerror(errno); + return -1; + } + } + key_type = storage::DataTypeToTag(type); + if (type == storage::DataType::kNones) { + LOG(WARNING) << "PikaMigrateThread::ReqMigrateOne key: " << key << " type: " << static_cast(type) + << " is illegal"; + return 0; + } + + if (slot_id != slot_id_) { + LOG(WARNING) << "PikaMigrateThread::ReqMigrateOne Slot : " << slot_id << " is not the migrating slot:" << slot_id_; + return -1; + } + + // if the migrate thread exit, start it + if (!is_migrating_) { + ResetThread(); + int ret = StartThread(); + if (0 != ret) { + LOG(ERROR) << "PikaMigrateThread::ReqMigrateOne StartThread failed. " + << " ret=" << ret; + is_migrating_ = false; + StopThread(); + } else { + LOG(INFO) << "PikaMigrateThread::ReqMigrateOne StartThread"; + is_migrating_ = true; + usleep(100); + } + } + // check the key is migrating + std::pair kpair = std::make_pair(key_type, key); + if (IsMigrating(kpair)) { + LOG(INFO) << "PikaMigrateThread::ReqMigrateOne key: " << key << " is migrating ! "; + return 1; + } else { + std::unique_lock lo(mgrtone_queue_mutex_); + mgrtone_queue_.emplace_back(kpair); + NotifyRequestMigrate(); + } + + return 1; +} + +void PikaMigrateThread::GetMigrateStatus(std::string *ip, int64_t* port, int64_t *slot, bool *migrating, int64_t *moved, + int64_t *remained) { + std::unique_lock lm(migrator_mutex_); + // todo for sure + if (!is_migrating_) { + *remained = -1; + return; + } + + *ip = dest_ip_; + *port = dest_port_; + *migrating = is_migrating_; + *moved = moved_num_; + *slot = slot_id_; + std::unique_lock lq(mgrtkeys_queue_mutex_); + int64_t migrating_keys_num = static_cast(mgrtkeys_queue_.size()); + std::string slotKey = GetSlotKey(static_cast(slot_id_)); + int32_t slot_size = 0; + rocksdb::Status s = db_->storage()->SCard(slotKey, &slot_size); + if (s.ok()) { + *remained = slot_size + migrating_keys_num; + } else { + *remained = migrating_keys_num; + } +} + +void PikaMigrateThread::CancelMigrate(void) { + LOG(INFO) << "PikaMigrateThread::CancelMigrate"; + + if (is_running()) { + should_exit_ = true; + NotifyRequestMigrate(); + workers_cond_.notify_one(); + StopThread(); + } +} + +void PikaMigrateThread::IncWorkingThreadNum(void) { ++working_thread_num_; } + +void PikaMigrateThread::DecWorkingThreadNum(void) { + std::unique_lock lw(workers_mutex_); + --working_thread_num_; + workers_cond_.notify_one(); +} + +void PikaMigrateThread::OnTaskFailed() { + LOG(ERROR) << "PikaMigrateThread::OnTaskFailed !!!"; + is_task_success_ = false; +} + +void PikaMigrateThread::AddResponseNum(int32_t response_num) { response_num_ += response_num; } + +void PikaMigrateThread::ResetThread(void) { + if (0 != thread_id()) { + JoinThread(); + } +} + +void PikaMigrateThread::DestroyThread(bool is_self_exit) { + std::unique_lock lm(migrator_mutex_); + LOG(INFO) << "PikaMigrateThread::DestroyThread"; + + // Destroy work threads + DestroyParseSendThreads(); + + if (is_self_exit) { + set_is_running(false); + } + + { + std::unique_lock lq(mgrtkeys_queue_mutex_); + std::unique_lock lm(mgrtkeys_map_mutex_); + std::deque>().swap(mgrtkeys_queue_); + std::map, std::string>().swap(mgrtkeys_map_); + } + + cursor_ = 0; + is_migrating_ = false; + is_task_success_ = true; + moved_num_ = 0; +} + +void PikaMigrateThread::NotifyRequestMigrate(void) { + std::unique_lock lr(request_migrate_mutex_); + request_migrate_ = true; + request_migrate_cond_.notify_one(); +} + +bool PikaMigrateThread::IsMigrating(std::pair &kpair) { + std::unique_lock lo(mgrtone_queue_mutex_); + std::unique_lock lm(mgrtkeys_map_mutex_); + + for (const auto& iter : mgrtone_queue_) { + if (iter.first == kpair.first && iter.second == kpair.second) { + return true; + } + } + + auto iter = mgrtkeys_map_.find(kpair); + if (iter != mgrtkeys_map_.end()) { + return true; + } + + return false; +} + +void PikaMigrateThread::ReadSlotKeys(const std::string &slotKey, int64_t need_read_num, int64_t &real_read_num, + int32_t *finish) { + real_read_num = 0; + std::string key; + char key_type; + int32_t is_member = 0; + std::vector members; + + rocksdb::Status s = db_->storage()->SScan(slotKey, cursor_, "*", need_read_num, &members, &cursor_); + if (s.ok() && 0 < members.size()) { + for (const auto &member : members) { + db_->storage()->SIsmember(slotKey, member, &is_member); + if (is_member) { + key = member; + key_type = key.at(0); + key.erase(key.begin()); + std::pair kpair = std::make_pair(key_type, key); + if (mgrtkeys_map_.find(kpair) == mgrtkeys_map_.end()) { + mgrtkeys_queue_.emplace_back(kpair); + mgrtkeys_map_[kpair] = INVALID_STR; + ++real_read_num; + } + } else { + LOG(INFO) << "PikaMigrateThread::ReadSlotKeys key " << member << " not found in" << slotKey; + } + } + } + + *finish = (0 == cursor_) ? 1 : 0; +} + +bool PikaMigrateThread::CreateParseSendThreads(int32_t dispatch_num) { + workers_num_ = static_cast(g_pika_conf->slotmigrate_thread_num()); + for (int32_t i = 0; i < workers_num_; ++i) { + auto worker = new PikaParseSendThread(this, db_); + if (!worker->Init(dest_ip_, dest_port_, timeout_ms_, dispatch_num)) { + delete worker; + DestroyParseSendThreads(); + return false; + } else { + int ret = worker->StartThread(); + if (0 != ret) { + LOG(INFO) << "PikaMigrateThread::CreateParseSendThreads start work thread failed ret=" << ret; + delete worker; + DestroyParseSendThreads(); + return false; + } else { + workers_.emplace_back(worker); + } + } + } + return true; +} + +void PikaMigrateThread::DestroyParseSendThreads(void) { + if (!workers_.empty()) { + for (auto worker : workers_) { + worker->ExitThread(); + } + + { + std::unique_lock lm(mgrtkeys_queue_mutex_); + mgrtkeys_cond_.notify_all(); + } + + for (auto worker : workers_) { + delete worker; + } + workers_.clear(); + } +} + +void *PikaMigrateThread::ThreadMain() { + LOG(INFO) << "PikaMigrateThread::ThreadMain Start"; + + // Create parse_send_threads + auto dispatch_num = static_cast(g_pika_conf->thread_migrate_keys_num()); + if (!CreateParseSendThreads(dispatch_num)) { + LOG(INFO) << "PikaMigrateThread::ThreadMain CreateParseSendThreads failed !!!"; + DestroyThread(true); + return nullptr; + } + + std::string slotKey = GetSlotKey(static_cast(slot_id_)); + int32_t slot_size = 0; + db_->storage()->SCard(slotKey, &slot_size); + + while (!should_exit_) { + // Waiting migrate task + { + std::unique_lock lm(request_migrate_mutex_); + while (!request_migrate_) { + request_migrate_cond_.wait(lm); + } + request_migrate_ = false; + + if (should_exit_) { + LOG(INFO) << "PikaMigrateThread::ThreadMain :" << pthread_self() << " exit1 !!!"; + DestroyThread(false); + return nullptr; + } + } + + // read keys form slot and push to mgrtkeys_queue_ + int64_t round_remained_keys = keys_num_; + int64_t real_read_num = 0; + int32_t is_finish = 0; + send_num_ = 0; + response_num_ = 0; + do { + std::unique_lock lq(mgrtkeys_queue_mutex_); + std::unique_lock lo(mgrtone_queue_mutex_); + std::unique_lock lm(mgrtkeys_map_mutex_); + + // first check whether need migrate one key + if (!mgrtone_queue_.empty()) { + while (!mgrtone_queue_.empty()) { + mgrtkeys_queue_.push_front(mgrtone_queue_.front()); + mgrtkeys_map_[mgrtone_queue_.front()] = INVALID_STR; + mgrtone_queue_.pop_front(); + ++send_num_; + } + } else { + int64_t need_read_num = (0 < round_remained_keys - dispatch_num) ? dispatch_num : round_remained_keys; + ReadSlotKeys(slotKey, need_read_num, real_read_num, &is_finish); + round_remained_keys -= need_read_num; + send_num_ += static_cast(real_read_num); + } + mgrtkeys_cond_.notify_all(); + + } while (0 < round_remained_keys && !is_finish); + + LOG(INFO) << "PikaMigrateThread:: wait ParseSenderThread finish"; + // wait all ParseSenderThread finish + { + std::unique_lock lw(workers_mutex_); + while (!should_exit_ && is_task_success_ && send_num_ != response_num_) { + if (workers_cond_.wait_for(lw, std::chrono::seconds(60)) == std::cv_status::timeout) { + break; + } + } + } + LOG(INFO) << "PikaMigrateThread::ThreadMain send_num:" << send_num_ << " response_num:" << response_num_; + + if (should_exit_) { + LOG(INFO) << "PikaMigrateThread::ThreadMain :" << pthread_self() << " exit2 !!!"; + DestroyThread(false); + return nullptr; + } + + // check one round migrate task success + if (!is_task_success_) { + LOG(ERROR) << "PikaMigrateThread::ThreadMain one round migrate task failed !!!"; + DestroyThread(true); + return nullptr; + } else { + moved_num_ += response_num_; + + std::unique_lock lm(mgrtkeys_map_mutex_); + std::map, std::string>().swap(mgrtkeys_map_); + } + + // check slot migrate finish + int32_t slot_remained_keys = 0; + db_->storage()->SCard(slotKey, &slot_remained_keys); + if (0 == slot_remained_keys) { + LOG(INFO) << "PikaMigrateThread::ThreadMain slot_size:" << slot_size << " moved_num:" << moved_num_; + if (slot_size != moved_num_) { + LOG(ERROR) << "PikaMigrateThread::ThreadMain moved_num != slot_size !!!"; + } + DestroyThread(true); + return nullptr; + } + } + + return nullptr; +} + +/* EOF */ diff --git a/tools/pika_migrate/src/pika_monitor_thread.cc b/tools/pika_migrate/src/pika_monitor_thread.cc deleted file mode 100644 index 746aa09080..0000000000 --- a/tools/pika_migrate/src/pika_monitor_thread.cc +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_monitor_thread.h" - -#include - -PikaMonitorThread::PikaMonitorThread() - : pink::Thread(), - monitor_cond_(&monitor_mutex_protector_) { - set_thread_name("MonitorThread"); - has_monitor_clients_.store(false); -} - -PikaMonitorThread::~PikaMonitorThread() { - set_should_stop(); - if (is_running()) { - monitor_cond_.SignalAll(); - StopThread(); - } - for (std::list::iterator iter = monitor_clients_.begin(); - iter != monitor_clients_.end(); - ++iter) { - close(iter->fd); - } - LOG(INFO) << "PikaMonitorThread " << pthread_self() << " exit!!!"; -} - -void PikaMonitorThread::AddMonitorClient(std::shared_ptr client_ptr) { - StartThread(); - slash::MutexLock lm(&monitor_mutex_protector_); - monitor_clients_.push_back(ClientInfo{client_ptr->fd(), client_ptr->ip_port(), 0, client_ptr}); - has_monitor_clients_.store(true); -} - -void PikaMonitorThread::RemoveMonitorClient(const std::string& ip_port) { - std::list::iterator iter = monitor_clients_.begin(); - for (; iter != monitor_clients_.end(); ++iter) { - if (ip_port == "all") { - close(iter->fd); - continue; - } - if (iter->ip_port == ip_port) { - close(iter->fd); - break; - } - } - if (ip_port == "all") { - monitor_clients_.clear(); - } else if (iter != monitor_clients_.end()) { - monitor_clients_.erase(iter); - } - has_monitor_clients_.store(!monitor_clients_.empty()); -} - -void PikaMonitorThread::AddMonitorMessage(const std::string &monitor_message) { - slash::MutexLock lm(&monitor_mutex_protector_); - if (monitor_messages_.empty() && cron_tasks_.empty()) { - monitor_messages_.push_back(monitor_message); - monitor_cond_.Signal(); - } else { - monitor_messages_.push_back(monitor_message); - } -} - -int32_t PikaMonitorThread::ThreadClientList(std::vector* clients_ptr) { - if (clients_ptr != NULL) { - for (std::list::iterator iter = monitor_clients_.begin(); - iter != monitor_clients_.end(); - iter++) { - clients_ptr->push_back(*iter); - } - } - return monitor_clients_.size(); -} - -void PikaMonitorThread::AddCronTask(MonitorCronTask task) { - slash::MutexLock lm(&monitor_mutex_protector_); - if (monitor_messages_.empty() && cron_tasks_.empty()) { - cron_tasks_.push(task); - monitor_cond_.Signal(); - } else { - cron_tasks_.push(task); - } -} - -bool PikaMonitorThread::FindClient(const std::string &ip_port) { - slash::MutexLock lm(&monitor_mutex_protector_); - for (std::list::iterator iter = monitor_clients_.begin(); - iter != monitor_clients_.end(); - ++iter) { - if (iter->ip_port == ip_port) { - return true; - } - } - return false; -} - -bool PikaMonitorThread::ThreadClientKill(const std::string& ip_port) { - if (is_running()) { - if (ip_port == "all") { - AddCronTask({TASK_KILLALL, "all"}); - } else if (FindClient(ip_port)) { - AddCronTask({TASK_KILL, ip_port}); - } else { - return false; - } - } - return true; -} - -bool PikaMonitorThread::HasMonitorClients() { - return has_monitor_clients_.load(); -} - -pink::WriteStatus PikaMonitorThread::SendMessage(int32_t fd, std::string& message) { - size_t retry = 0; - ssize_t nwritten = 0, message_len_sended = 0, message_len_left = message.size(); - while (message_len_left > 0) { - nwritten = write(fd, message.data() + message_len_sended, message_len_left); - if (nwritten == -1 && errno == EAGAIN) { - // If the write buffer is full, but the client no longer consumes, it will - // get stuck in the loop and cause the entire Pika to block becase of monitor_mutex_protector_. - // So we put a limit on the number of retries - if (++retry >= 10) { - return pink::kWriteError; - } else { - // Sleep one second wait for client consume message - sleep(1); - continue; - } - } else if (nwritten == -1) { - return pink::kWriteError; - } - if (retry > 0) retry = 0; - message_len_sended += nwritten; - message_len_left -= nwritten; - } - return pink::kWriteAll; -} - -void* PikaMonitorThread::ThreadMain() { - std::deque messages_deque; - std::string messages_transfer; - MonitorCronTask task; - pink::WriteStatus write_status; - while (!should_stop()) { - { - slash::MutexLock lm(&monitor_mutex_protector_); - while (monitor_messages_.empty() && cron_tasks_.empty() && !should_stop()) { - monitor_cond_.Wait(); - } - } - if (should_stop()) { - break; - } - { - slash::MutexLock lm(&monitor_mutex_protector_); - while (!cron_tasks_.empty()) { - task = cron_tasks_.front(); - cron_tasks_.pop(); - RemoveMonitorClient(task.ip_port); - if (task.task == TASK_KILLALL) { - std::queue empty_queue; - cron_tasks_.swap(empty_queue); - } - } - } - - messages_deque.clear(); - { - slash::MutexLock lm(&monitor_mutex_protector_); - messages_deque.swap(monitor_messages_); - if (monitor_clients_.empty() || messages_deque.empty()) { - continue; - } - } - messages_transfer = "+"; - for (std::deque::iterator iter = messages_deque.begin(); - iter != messages_deque.end(); - ++iter) { - messages_transfer.append(iter->data(), iter->size()); - messages_transfer.append("\n"); - } - if (messages_transfer == "+") { - continue; - } - messages_transfer.replace(messages_transfer.size()-1, 1, "\r\n", 0, 2); - monitor_mutex_protector_.Lock(); - for (std::list::iterator iter = monitor_clients_.begin(); - iter != monitor_clients_.end(); - ++iter) { - write_status = SendMessage(iter->fd, messages_transfer); - if (write_status == pink::kWriteError) { - cron_tasks_.push({TASK_KILL, iter->ip_port}); - } - } - monitor_mutex_protector_.Unlock(); - } - return NULL; -} diff --git a/tools/pika_migrate/src/pika_monotonic_time.cc b/tools/pika_migrate/src/pika_monotonic_time.cc new file mode 100644 index 0000000000..1c3f6e820d --- /dev/null +++ b/tools/pika_migrate/src/pika_monotonic_time.cc @@ -0,0 +1,63 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#if defined(__APPLE__) // Mac +#include + +#include "include/pika_monotonic_time.h" + +monotime getMonotonicUs() { + static mach_timebase_info_data_t timebase; + if (timebase.denom == 0) { + mach_timebase_info(&timebase); + } + uint64_t nanos = mach_absolute_time() * timebase.numer / timebase.denom; + return nanos / 1000; +} + +#elif defined(__FreeBSD__) // FreeBSD +#include + +#include "include/pika_monotonic_time.h" + +monotime getMonotonicUs() { + struct timespec ts; + clock_gettime(CLOCK_MONOTONIC, &ts); + return (ts.tv_sec * 1000000) + (ts.tv_nsec / 1000); +} + +#elif defined(__linux__) // Linux + +#ifdef __x86_64__ // x86_64 + +#include + +#include "include/pika_monotonic_time.h" + +monotime getMonotonicUs() { + timespec ts; + clock_gettime(CLOCK_MONOTONIC, &ts); + return static_cast(ts.tv_sec) * 1000000 + static_cast(ts.tv_nsec) / 1000; +} + +#elif __arm__ || __aarch64__ // ARM + +#include + +#include "include/pika_monotonic_time.h" + +uint64_t getMonotonicUs() { + timeval tv; + gettimeofday(&tv, nullptr); + return static_cast(tv.tv_sec) * 1000000 + static_cast(tv.tv_usec); +} + +#else +#error "Unsupported architecture for Linux" +#endif // __x86_64__, __arm__ + +#else +#error "Unsupported platform" +#endif // __APPLE__, __linux__ \ No newline at end of file diff --git a/tools/pika_migrate/src/pika_partition.cc b/tools/pika_migrate/src/pika_partition.cc deleted file mode 100644 index 5d4c014135..0000000000 --- a/tools/pika_migrate/src/pika_partition.cc +++ /dev/null @@ -1,679 +0,0 @@ -// Copyright (c) 2018-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_partition.h" - -#include - -#include "include/pika_conf.h" -#include "include/pika_server.h" -#include "include/pika_rm.h" - -#include "slash/include/mutex_impl.h" - -extern PikaConf* g_pika_conf; -extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; - -std::string PartitionPath(const std::string& table_path, - uint32_t partition_id) { - char buf[100]; - snprintf(buf, sizeof(buf), "%u/", partition_id); - return table_path + buf; -} - -std::string PartitionName(const std::string& table_name, - uint32_t partition_id) { - char buf[256]; - snprintf(buf, sizeof(buf), "(%s:%u)", table_name.data(), partition_id); - return std::string(buf); -} - -std::string BgsaveSubPath(const std::string& table_name, - uint32_t partition_id) { - char buf[256]; - std::string partition_id_str = std::to_string(partition_id); - snprintf(buf, sizeof(buf), "%s/%s", table_name.data(), partition_id_str.data()); - return std::string(buf); -} - -std::string DbSyncPath(const std::string& sync_path, - const std::string& table_name, - const uint32_t partition_id, - bool classic_mode) { - char buf[256]; - std::string partition_id_str = std::to_string(partition_id); - if (classic_mode) { - snprintf(buf, sizeof(buf), "%s/", table_name.data()); - } else { - snprintf(buf, sizeof(buf), "%s/%s/", table_name.data(), partition_id_str.data()); - } - return sync_path + buf; -} - -Partition::Partition(const std::string& table_name, - uint32_t partition_id, - const std::string& table_db_path, - const std::string& table_log_path) : - table_name_(table_name), - partition_id_(partition_id), - binlog_io_error_(false), - bgsave_engine_(NULL), - purging_(false) { - - db_path_ = g_pika_conf->classic_mode() ? - table_db_path : PartitionPath(table_db_path, partition_id_); - log_path_ = g_pika_conf->classic_mode() ? - table_log_path : PartitionPath(table_log_path, partition_id_); - bgsave_sub_path_ = g_pika_conf->classic_mode() ? - table_name : BgsaveSubPath(table_name_, partition_id_); - dbsync_path_ = DbSyncPath(g_pika_conf->db_sync_path(), table_name_, - partition_id_, g_pika_conf->classic_mode()); - partition_name_ = g_pika_conf->classic_mode() ? - table_name : PartitionName(table_name_, partition_id_); - - pthread_rwlockattr_t attr; - pthread_rwlockattr_init(&attr); - pthread_rwlockattr_setkind_np(&attr, - PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); - - pthread_rwlock_init(&db_rwlock_, &attr); - - db_ = std::shared_ptr(new blackwidow::BlackWidow()); - rocksdb::Status s = db_->Open(g_pika_server->bw_options(), db_path_); - - lock_mgr_ = new slash::lock::LockMgr(1000, 0, std::make_shared()); - - opened_ = s.ok() ? true : false; - assert(db_); - assert(s.ok()); - LOG(INFO) << partition_name_ << " DB Success"; - - logger_ = std::shared_ptr( - new Binlog(log_path_, g_pika_conf->binlog_file_size())); -} - -Partition::~Partition() { - Close(); - delete bgsave_engine_; - pthread_rwlock_destroy(&db_rwlock_); - delete lock_mgr_; -} - -void Partition::Leave() { - Close(); - MoveToTrash(); -} - -void Partition::Close() { - if (!opened_) { - return; - } - slash::RWLock rwl(&db_rwlock_, true); - db_.reset(); - logger_.reset(); - opened_ = false; -} - -// Before call this function, should -// close db and log first -void Partition::MoveToTrash() { - if (opened_) { - return; - } - - std::string dbpath = db_path_; - if (dbpath[dbpath.length() - 1] == '/') { - dbpath.erase(dbpath.length() - 1); - } - dbpath.append("_deleting/"); - if (slash::RenameFile(db_path_, dbpath.c_str())) { - LOG(WARNING) << "Failed to move db to trash, error: " << strerror(errno); - return; - } - g_pika_server->PurgeDir(dbpath); - - std::string logpath = log_path_; - if (logpath[logpath.length() - 1] == '/') { - logpath.erase(logpath.length() - 1); - } - logpath.append("_deleting/"); - if (slash::RenameFile(log_path_, logpath.c_str())) { - LOG(WARNING) << "Failed to move log to trash, error: " << strerror(errno); - return; - } - g_pika_server->PurgeDir(logpath); - - LOG(WARNING) << "Partition: " << partition_name_ << " move to trash success"; -} - -std::string Partition::GetTableName() const { - return table_name_; -} - -uint32_t Partition::GetPartitionId() const { - return partition_id_; -} - -std::string Partition::GetPartitionName() const { - return partition_name_; -} - -std::shared_ptr Partition::logger() const { - return logger_; -} - -std::shared_ptr Partition::db() const { - return db_; -} - -Status Partition::WriteBinlog(const std::string& binlog) { - if (!opened_) { - LOG(WARNING) << partition_name_ << " not opened, failed to exec command"; - return Status::Corruption("Partition Not Opened"); - } - slash::Status s; - if (!binlog.empty()) { - s = logger_->Put(binlog); - } - - if (!s.ok()) { - LOG(WARNING) << partition_name_ << " Writing binlog failed, maybe no space left on device"; - SetBinlogIoError(true); - return Status::Corruption("Writing binlog failed, maybe no space left on device"); - } - return Status::OK(); -} - -void Partition::Compact(const blackwidow::DataType& type) { - if (!opened_) return; - db_->Compact(type); -} - -void Partition::DbRWLockWriter() { - pthread_rwlock_wrlock(&db_rwlock_); -} - -void Partition::DbRWLockReader() { - pthread_rwlock_rdlock(&db_rwlock_); -} - -void Partition::DbRWUnLock() { - pthread_rwlock_unlock(&db_rwlock_); -} - -slash::lock::LockMgr* Partition::LockMgr() { - return lock_mgr_; -} - -void Partition::SetBinlogIoError(bool error) { - binlog_io_error_ = error; -} - -bool Partition::IsBinlogIoError() { - return binlog_io_error_; -} - -bool Partition::GetBinlogOffset(BinlogOffset* const boffset) { - if (opened_) { - logger_->GetProducerStatus(&boffset->filenum, &boffset->offset); - return true; - } - return false; -} - -bool Partition::SetBinlogOffset(const BinlogOffset& boffset) { - if (opened_) { - logger_->SetProducerStatus(boffset.filenum, boffset.offset); - return true; - } - return false; -} - -void Partition::PrepareRsync() { - slash::DeleteDirIfExist(dbsync_path_); - slash::CreatePath(dbsync_path_ + "strings"); - slash::CreatePath(dbsync_path_ + "hashes"); - slash::CreatePath(dbsync_path_ + "lists"); - slash::CreatePath(dbsync_path_ + "sets"); - slash::CreatePath(dbsync_path_ + "zsets"); -} - -// Try to update master offset -// This may happend when dbsync from master finished -// Here we do: -// 1, Check dbsync finished, got the new binlog offset -// 2, Replace the old db -// 3, Update master offset, and the PikaAuxiliaryThread cron will connect and do slaveof task with master -bool Partition::TryUpdateMasterOffset() { - std::string info_path = dbsync_path_ + kBgsaveInfoFile; - if (!slash::FileExists(info_path)) { - return false; - } - - std::shared_ptr slave_partition = - g_pika_rm->GetSyncSlavePartitionByName( - PartitionInfo(table_name_, partition_id_)); - if (!slave_partition) { - LOG(WARNING) << "Slave Partition: " << partition_name_ << " not exist"; - return false; - } - - // Got new binlog offset - std::ifstream is(info_path); - if (!is) { - LOG(WARNING) << "Partition: " << partition_name_ - << ", Failed to open info file after db sync"; - slave_partition->SetReplState(ReplState::kError); - return false; - } - std::string line, master_ip; - int lineno = 0; - int64_t filenum = 0, offset = 0, tmp = 0, master_port = 0; - while (std::getline(is, line)) { - lineno++; - if (lineno == 2) { - master_ip = line; - } else if (lineno > 2 && lineno < 6) { - if (!slash::string2l(line.data(), line.size(), &tmp) || tmp < 0) { - LOG(WARNING) << "Partition: " << partition_name_ - << ", Format of info file after db sync error, line : " << line; - is.close(); - slave_partition->SetReplState(ReplState::kError); - return false; - } - if (lineno == 3) { master_port = tmp; } - else if (lineno == 4) { filenum = tmp; } - else { offset = tmp; } - - } else if (lineno > 5) { - LOG(WARNING) << "Partition: " << partition_name_ - << ", Format of info file after db sync error, line : " << line; - is.close(); - slave_partition->SetReplState(ReplState::kError); - return false; - } - } - is.close(); - - LOG(INFO) << "Partition: " << partition_name_ << " Information from dbsync info" - << ", master_ip: " << master_ip - << ", master_port: " << master_port - << ", filenum: " << filenum - << ", offset: " << offset; - - // Sanity check - if (master_ip != slave_partition->MasterIp() - || master_port != slave_partition->MasterPort()) { - LOG(WARNING) << "Partition: " << partition_name_ - << " Error master node ip port: " << master_ip << ":" << master_port; - slave_partition->SetReplState(ReplState::kError); - return false; - } - - // Retransmit Data to target redis - g_pika_server->RetransmitData(dbsync_path_); - - slash::DeleteFile(info_path); - if (!ChangeDb(dbsync_path_)) { - LOG(WARNING) << "Partition: " << partition_name_ - << ", Failed to change db"; - slave_partition->SetReplState(ReplState::kError); - return false; - } - - // Update master offset - logger_->SetProducerStatus(filenum, offset); - slave_partition->SetReplState(ReplState::kTryConnect); - return true; -} - -/* - * Change a new db locate in new_path - * return true when change success - * db remain the old one if return false - */ -bool Partition::ChangeDb(const std::string& new_path) { - - std::string tmp_path(db_path_); - if (tmp_path.back() == '/') { - tmp_path.resize(tmp_path.size() - 1); - } - tmp_path += "_bak"; - slash::DeleteDirIfExist(tmp_path); - - RWLock l(&db_rwlock_, true); - LOG(INFO) << "Partition: "<< partition_name_ - << ", Prepare change db from: " << tmp_path; - db_.reset(); - - if (0 != slash::RenameFile(db_path_.c_str(), tmp_path)) { - LOG(WARNING) << "Partition: " << partition_name_ - << ", Failed to rename db path when change db, error: " << strerror(errno); - return false; - } - - if (0 != slash::RenameFile(new_path.c_str(), db_path_.c_str())) { - LOG(WARNING) << "Partition: " << partition_name_ - << ", Failed to rename new db path when change db, error: " << strerror(errno); - return false; - } - - db_.reset(new blackwidow::BlackWidow()); - rocksdb::Status s = db_->Open(g_pika_server->bw_options(), db_path_); - assert(db_); - assert(s.ok()); - slash::DeleteDirIfExist(tmp_path); - LOG(INFO) << "Partition: " << partition_name_ << ", Change db success"; - return true; -} - -bool Partition::IsBgSaving() { - slash::MutexLock ml(&bgsave_protector_); - return bgsave_info_.bgsaving; -} - -void Partition::BgSavePartition() { - slash::MutexLock l(&bgsave_protector_); - if (bgsave_info_.bgsaving) { - return; - } - bgsave_info_.bgsaving = true; - BgTaskArg* bg_task_arg = new BgTaskArg(); - bg_task_arg->partition = shared_from_this(); - g_pika_server->BGSaveTaskSchedule(&DoBgSave, static_cast(bg_task_arg)); -} - -BgSaveInfo Partition::bgsave_info() { - slash::MutexLock l(&bgsave_protector_); - return bgsave_info_; -} - -void Partition::DoBgSave(void* arg) { - BgTaskArg* bg_task_arg = static_cast(arg); - - // Do BgSave - bool success = bg_task_arg->partition->RunBgsaveEngine(); - - // Some output - BgSaveInfo info = bg_task_arg->partition->bgsave_info(); - std::ofstream out; - out.open(info.path + "/" + kBgsaveInfoFile, std::ios::in | std::ios::trunc); - if (out.is_open()) { - out << (time(NULL) - info.start_time) << "s\n" - << g_pika_server->host() << "\n" - << g_pika_server->port() << "\n" - << info.filenum << "\n" - << info.offset << "\n"; - out.close(); - } - if (!success) { - std::string fail_path = info.path + "_FAILED"; - slash::RenameFile(info.path.c_str(), fail_path.c_str()); - } - bg_task_arg->partition->FinishBgsave(); - - delete bg_task_arg; -} - -bool Partition::RunBgsaveEngine() { - // Prepare for Bgsaving - if (!InitBgsaveEnv() || !InitBgsaveEngine()) { - ClearBgsave(); - return false; - } - LOG(INFO) << partition_name_ << " after prepare bgsave"; - - BgSaveInfo info = bgsave_info(); - LOG(INFO) << partition_name_ << " bgsave_info: path=" << info.path - << ", filenum=" << info.filenum - << ", offset=" << info.offset; - - // Backup to tmp dir - rocksdb::Status s = bgsave_engine_->CreateNewBackup(info.path); - LOG(INFO) << partition_name_ << " create new backup finished."; - - if (!s.ok()) { - LOG(WARNING) << partition_name_ << " create new backup failed :" << s.ToString(); - return false; - } - return true; -} - -// Prepare engine, need bgsave_protector protect -bool Partition::InitBgsaveEnv() { - slash::MutexLock l(&bgsave_protector_); - // Prepare for bgsave dir - bgsave_info_.start_time = time(NULL); - char s_time[32]; - int len = strftime(s_time, sizeof(s_time), "%Y%m%d%H%M%S", localtime(&bgsave_info_.start_time)); - bgsave_info_.s_start_time.assign(s_time, len); - std::string time_sub_path = g_pika_conf->bgsave_prefix() + std::string(s_time, 8); - bgsave_info_.path = g_pika_conf->bgsave_path() + time_sub_path + "/" + bgsave_sub_path_; - if (!slash::DeleteDirIfExist(bgsave_info_.path)) { - LOG(WARNING) << partition_name_ << " remove exist bgsave dir failed"; - return false; - } - slash::CreatePath(bgsave_info_.path, 0755); - // Prepare for failed dir - if (!slash::DeleteDirIfExist(bgsave_info_.path + "_FAILED")) { - LOG(WARNING) << partition_name_ << " remove exist fail bgsave dir failed :"; - return false; - } - return true; -} - -// Prepare bgsave env, need bgsave_protector protect -bool Partition::InitBgsaveEngine() { - delete bgsave_engine_; - rocksdb::Status s = blackwidow::BackupEngine::Open(db().get(), &bgsave_engine_); - if (!s.ok()) { - LOG(WARNING) << partition_name_ << " open backup engine failed " << s.ToString(); - return false; - } - - { - RWLock l(&db_rwlock_, true); - { - slash::MutexLock l(&bgsave_protector_); - logger_->GetProducerStatus(&bgsave_info_.filenum, &bgsave_info_.offset); - } - s = bgsave_engine_->SetBackupContent(); - if (!s.ok()) { - LOG(WARNING) << partition_name_ << " set backup content failed " << s.ToString(); - return false; - } - } - return true; -} - -void Partition::ClearBgsave() { - slash::MutexLock l(&bgsave_protector_); - bgsave_info_.Clear(); -} - -void Partition::FinishBgsave() { - slash::MutexLock l(&bgsave_protector_); - bgsave_info_.bgsaving = false; -} - -bool Partition::FlushDB() { - slash::RWLock rwl(&db_rwlock_, true); - slash::MutexLock ml(&bgsave_protector_); - if (bgsave_info_.bgsaving) { - return false; - } - - LOG(INFO) << partition_name_ << " Delete old db..."; - db_.reset(); - - std::string dbpath = db_path_; - if (dbpath[dbpath.length() - 1] == '/') { - dbpath.erase(dbpath.length() - 1); - } - dbpath.append("_deleting/"); - slash::RenameFile(db_path_, dbpath.c_str()); - - db_ = std::shared_ptr(new blackwidow::BlackWidow()); - rocksdb::Status s = db_->Open(g_pika_server->bw_options(), db_path_); - assert(db_); - assert(s.ok()); - LOG(INFO) << partition_name_ << " Open new db success"; - g_pika_server->PurgeDir(dbpath); - return true; -} - -bool Partition::FlushSubDB(const std::string& db_name) { - slash::RWLock rwl(&db_rwlock_, true); - slash::MutexLock ml(&bgsave_protector_); - if (bgsave_info_.bgsaving) { - return false; - } - - LOG(INFO) << partition_name_ << " Delete old " + db_name + " db..."; - db_.reset(); - - std::string dbpath = db_path_; - if (dbpath[dbpath.length() - 1] != '/') { - dbpath.append("/"); - } - - std::string sub_dbpath = dbpath + db_name; - std::string del_dbpath = dbpath + db_name + "_deleting"; - slash::RenameFile(sub_dbpath, del_dbpath); - - db_ = std::shared_ptr(new blackwidow::BlackWidow()); - rocksdb::Status s = db_->Open(g_pika_server->bw_options(), db_path_); - assert(db_); - assert(s.ok()); - LOG(INFO) << partition_name_ << " open new " + db_name + " db success"; - g_pika_server->PurgeDir(del_dbpath); - return true; -} - -bool Partition::PurgeLogs(uint32_t to, bool manual) { - // Only one thread can go through - bool expect = false; - if (!purging_.compare_exchange_strong(expect, true)) { - LOG(WARNING) << "purge process already exist"; - return false; - } - PurgeArg *arg = new PurgeArg(); - arg->to = to; - arg->manual = manual; - arg->partition = shared_from_this(); - g_pika_server->PurgelogsTaskSchedule(&DoPurgeLogs, static_cast(arg)); - return true; -} - -void Partition::ClearPurge() { - purging_ = false; -} - -void Partition::DoPurgeLogs(void* arg) { - PurgeArg* purge = static_cast(arg); - purge->partition->PurgeFiles(purge->to, purge->manual); - purge->partition->ClearPurge(); - delete (PurgeArg*)arg; -} - -bool Partition::PurgeFiles(uint32_t to, bool manual) { - std::map binlogs; - if (!GetBinlogFiles(binlogs)) { - LOG(WARNING) << partition_name_ << " Could not get binlog files!"; - return false; - } - - int delete_num = 0; - struct stat file_stat; - int remain_expire_num = binlogs.size() - g_pika_conf->expire_logs_nums(); - std::map::iterator it; - for (it = binlogs.begin(); it != binlogs.end(); ++it) { - if ((manual && it->first <= to) // Manual purgelogsto - || (remain_expire_num > 0) // Expire num trigger - || (binlogs.size() - delete_num > 10 // At lease remain 10 files - && stat(((log_path_ + it->second)).c_str(), &file_stat) == 0 - && file_stat.st_mtime < time(NULL) - g_pika_conf->expire_logs_days() * 24 * 3600)) { // Expire time trigger - // We check this every time to avoid lock when we do file deletion - if (!g_pika_rm->BinlogCloudPurgeFromSMP(table_name_, partition_id_, it->first)) { - LOG(WARNING) << partition_name_ << " Could not purge "<< (it->first) << ", since it is already be used"; - return false; - } - - // Do delete - slash::Status s = slash::DeleteFile(log_path_ + it->second); - if (s.ok()) { - ++delete_num; - --remain_expire_num; - } else { - LOG(WARNING) << partition_name_ << " Purge log file : " << (it->second) << " failed! error:" << s.ToString(); - } - } else { - // Break when face the first one not satisfied - // Since the binlogs is order by the file index - break; - } - } - if (delete_num) { - LOG(INFO) << partition_name_ << " Success purge "<< delete_num; - } - return true; -} - -bool Partition::GetBinlogFiles(std::map& binlogs) { - std::vector children; - int ret = slash::GetChildren(log_path_, children); - if (ret != 0) { - LOG(WARNING) << partition_name_ << " Get all files in log path failed! error:" << ret; - return false; - } - - int64_t index = 0; - std::string sindex; - std::vector::iterator it; - for (it = children.begin(); it != children.end(); ++it) { - if ((*it).compare(0, kBinlogPrefixLen, kBinlogPrefix) != 0) { - continue; - } - sindex = (*it).substr(kBinlogPrefixLen); - if (slash::string2l(sindex.c_str(), sindex.size(), &index) == 1) { - binlogs.insert(std::pair(static_cast(index), *it)); - } - } - return true; -} - -void Partition::InitKeyScan() { - key_scan_info_.start_time = time(NULL); - char s_time[32]; - int len = strftime(s_time, sizeof(s_time), "%Y-%m-%d %H:%M:%S", localtime(&key_scan_info_.start_time)); - key_scan_info_.s_start_time.assign(s_time, len); - key_scan_info_.duration = -1; // duration -1 mean the task in processing -} - -KeyScanInfo Partition::GetKeyScanInfo() { - slash::MutexLock l(&key_info_protector_); - return key_scan_info_; -} - -Status Partition::GetKeyNum(std::vector* key_info) { - slash::MutexLock l(&key_info_protector_); - if (key_scan_info_.key_scaning_) { - *key_info = key_scan_info_.key_infos; - return Status::OK(); - } - InitKeyScan(); - key_scan_info_.key_scaning_ = true; - key_scan_info_.duration = -2; // duration -2 mean the task in waiting status, - // has not been scheduled for exec - rocksdb::Status s = db_->GetKeyNum(key_info); - if (!s.ok()) { - return Status::Corruption(s.ToString()); - } - key_scan_info_.key_infos = *key_info; - key_scan_info_.duration = time(NULL) - key_scan_info_.start_time; - key_scan_info_.key_scaning_ = false; - return Status::OK(); -} diff --git a/tools/pika_migrate/src/pika_pubsub.cc b/tools/pika_migrate/src/pika_pubsub.cc index c3a0127d3f..935015ae7c 100644 --- a/tools/pika_migrate/src/pika_pubsub.cc +++ b/tools/pika_migrate/src/pika_pubsub.cc @@ -7,26 +7,28 @@ #include "include/pika_server.h" -extern PikaServer *g_pika_server; +extern PikaServer* g_pika_server; - -static std::string ConstructPubSubResp( - const std::string& cmd, - const std::vector>& result) { +static std::string ConstructPubSubResp(const std::string& cmd, const std::vector>& result) { std::stringstream resp; - if (result.size() == 0) { - resp << "*3\r\n" << "$" << cmd.length() << "\r\n" << cmd << "\r\n" << - "$" << -1 << "\r\n" << ":" << 0 << "\r\n"; - } - for (auto it = result.begin(); it != result.end(); it++) { - resp << "*3\r\n" << "$" << cmd.length() << "\r\n" << cmd << "\r\n" << - "$" << it->first.length() << "\r\n" << it->first << "\r\n" << - ":" << it->second << "\r\n"; + if (result.empty()) { + resp << "*3\r\n" + << "$" << cmd.length() << "\r\n" + << cmd << "\r\n" + << "$" << -1 << "\r\n" + << ":" << 0 << "\r\n"; + } + for (const auto & it : result) { + resp << "*3\r\n" + << "$" << cmd.length() << "\r\n" + << cmd << "\r\n" + << "$" << it.first.length() << "\r\n" + << it.first << "\r\n" + << ":" << it.second << "\r\n"; } return resp.str(); } - void PublishCmd::DoInitial() { if (!CheckArg(argv_.size())) { res_.SetRes(CmdRes::kWrongNum, kCmdNamePublish); @@ -36,10 +38,9 @@ void PublishCmd::DoInitial() { msg_ = argv_[2]; } -void PublishCmd::Do(std::shared_ptr partition) { +void PublishCmd::Do() { int receivers = g_pika_server->Publish(channel_, msg_); res_.AppendInteger(receivers); - return; } void SubscribeCmd::DoInitial() { @@ -47,28 +48,33 @@ void SubscribeCmd::DoInitial() { res_.SetRes(CmdRes::kWrongNum, kCmdNameSubscribe); return; } + for (size_t i = 1; i < argv_.size(); i++) { + channels_.push_back(argv_[i]); + } } -void SubscribeCmd::Do(std::shared_ptr partition) { - std::shared_ptr conn = GetConn(); +void SubscribeCmd::Do() { + std::shared_ptr conn = GetConn(); if (!conn) { res_.SetRes(CmdRes::kErrOther, kCmdNameSubscribe); - LOG(WARNING) << name_ << " weak ptr is empty"; + LOG(WARNING) << name_ << " weak ptr is empty"; return; } std::shared_ptr cli_conn = std::dynamic_pointer_cast(conn); - if (!cli_conn->IsPubSub()) { cli_conn->server_thread()->MoveConnOut(conn->fd()); - } - std::vector channels; - for (size_t i = 1; i < argv_.size(); i++) { - channels.push_back(argv_[i]); + cli_conn->SetIsPubSub(true); + cli_conn->SetHandleType(net::HandleType::kSynchronous); + cli_conn->SetWriteCompleteCallback([cli_conn]() { + if (!cli_conn->IsPubSub()) { + return; + } + cli_conn->set_is_writable(true); + g_pika_server->EnablePublish(cli_conn->fd()); + }); } std::vector> result; - cli_conn->SetIsPubSub(true); - cli_conn->SetHandleType(pink::HandleType::kSynchronous); - g_pika_server->Subscribe(conn, channels, name_ == kCmdNamePSubscribe, &result); + g_pika_server->Subscribe(conn, channels_, name_ == kCmdNamePSubscribe, &result); return res_.SetRes(CmdRes::kNone, ConstructPubSubResp(name_, result)); } @@ -77,31 +83,36 @@ void UnSubscribeCmd::DoInitial() { res_.SetRes(CmdRes::kWrongNum, kCmdNameUnSubscribe); return; } -} - -void UnSubscribeCmd::Do(std::shared_ptr partition) { - std::vector channels; for (size_t i = 1; i < argv_.size(); i++) { - channels.push_back(argv_[i]); + channels_.push_back(argv_[i]); } +} - std::shared_ptr conn = GetConn(); +void UnSubscribeCmd::Do() { + std::shared_ptr conn = GetConn(); if (!conn) { res_.SetRes(CmdRes::kErrOther, kCmdNameUnSubscribe); - LOG(WARNING) << name_ << " weak ptr is empty"; + LOG(WARNING) << name_ << " weak ptr is empty"; return; } std::shared_ptr cli_conn = std::dynamic_pointer_cast(conn); std::vector> result; - int subscribed = g_pika_server->UnSubscribe(conn, channels, name_ == kCmdNamePUnSubscribe, &result); + int subscribed = g_pika_server->UnSubscribe(conn, channels_, name_ == kCmdNamePUnSubscribe, &result); if (subscribed == 0 && cli_conn->IsPubSub()) { /* * if the number of client subscribed is zero, * the client will exit the Pub/Sub state */ - cli_conn->server_thread()->HandleNewConn(conn->fd(), conn->ip_port()); cli_conn->SetIsPubSub(false); + cli_conn->SetWriteCompleteCallback([cli_conn, conn]() { + if (cli_conn->IsPubSub()) { + return; + } + cli_conn->set_is_writable(false); + cli_conn->SetHandleType(net::HandleType::kAsynchronous); + cli_conn->server_thread()->MoveConnIn(conn, net::NotifyType::kNotiWait); + }); } return res_.SetRes(CmdRes::kNone, ConstructPubSubResp(name_, result)); } @@ -111,28 +122,33 @@ void PSubscribeCmd::DoInitial() { res_.SetRes(CmdRes::kWrongNum, kCmdNamePSubscribe); return; } + for (size_t i = 1; i < argv_.size(); i++) { + channels_.push_back(argv_[i]); + } } -void PSubscribeCmd::Do(std::shared_ptr partition) { - std::shared_ptr conn = GetConn(); +void PSubscribeCmd::Do() { + std::shared_ptr conn = GetConn(); if (!conn) { res_.SetRes(CmdRes::kErrOther, kCmdNamePSubscribe); - LOG(WARNING) << name_ << " weak ptr is empty"; + LOG(WARNING) << name_ << " weak ptr is empty"; return; } std::shared_ptr cli_conn = std::dynamic_pointer_cast(conn); - if (!cli_conn->IsPubSub()) { cli_conn->server_thread()->MoveConnOut(conn->fd()); - } - std::vector channels; - for (size_t i = 1; i < argv_.size(); i++) { - channels.push_back(argv_[i]); + cli_conn->SetIsPubSub(true); + cli_conn->SetHandleType(net::HandleType::kSynchronous); + cli_conn->SetWriteCompleteCallback([cli_conn]() { + if (!cli_conn->IsPubSub()) { + return; + } + cli_conn->set_is_writable(true); + g_pika_server->EnablePublish(cli_conn->fd()); + }); } std::vector> result; - cli_conn->SetIsPubSub(true); - cli_conn->SetHandleType(pink::HandleType::kSynchronous); - g_pika_server->Subscribe(conn, channels, name_ == kCmdNamePSubscribe, &result); + g_pika_server->Subscribe(conn, channels_, name_ == kCmdNamePSubscribe, &result); return res_.SetRes(CmdRes::kNone, ConstructPubSubResp(name_, result)); } @@ -141,31 +157,37 @@ void PUnSubscribeCmd::DoInitial() { res_.SetRes(CmdRes::kWrongNum, kCmdNamePUnSubscribe); return; } -} - -void PUnSubscribeCmd::Do(std::shared_ptr partition) { - std::vector channels; for (size_t i = 1; i < argv_.size(); i++) { - channels.push_back(argv_[i]); + channels_.push_back(argv_[i]); } - std::shared_ptr conn = GetConn(); +} + +void PUnSubscribeCmd::Do() { + std::shared_ptr conn = GetConn(); if (!conn) { res_.SetRes(CmdRes::kErrOther, kCmdNamePUnSubscribe); - LOG(WARNING) << name_ << " weak ptr is empty"; + LOG(WARNING) << name_ << " weak ptr is empty"; return; } std::shared_ptr cli_conn = std::dynamic_pointer_cast(conn); std::vector> result; - int subscribed = g_pika_server->UnSubscribe(conn, channels, name_ == kCmdNamePUnSubscribe, &result); + int subscribed = g_pika_server->UnSubscribe(conn, channels_, name_ == kCmdNamePUnSubscribe, &result); if (subscribed == 0 && cli_conn->IsPubSub()) { /* * if the number of client subscribed is zero, * the client will exit the Pub/Sub state */ - cli_conn->server_thread()->HandleNewConn(conn->fd(), conn->ip_port()); cli_conn->SetIsPubSub(false); + cli_conn->SetWriteCompleteCallback([cli_conn, conn]() { + if (cli_conn->IsPubSub()) { + return; + } + cli_conn->set_is_writable(false); + cli_conn->SetHandleType(net::HandleType::kAsynchronous); + cli_conn->server_thread()->MoveConnIn(conn, net::NotifyType::kNotiWait); + }); } return res_.SetRes(CmdRes::kNone, ConstructPubSubResp(name_, result)); } @@ -176,47 +198,45 @@ void PubSubCmd::DoInitial() { return; } subcommand_ = argv_[1]; - if (strcasecmp(subcommand_.data(), "channels") - && strcasecmp(subcommand_.data(), "numsub") - && strcasecmp(subcommand_.data(), "numpat")) { + if (strcasecmp(subcommand_.data(), "channels") != 0 && strcasecmp(subcommand_.data(), "numsub") != 0 && + strcasecmp(subcommand_.data(), "numpat") != 0) { res_.SetRes(CmdRes::kErrOther, "Unknown PUBSUB subcommand or wrong number of arguments for '" + subcommand_ + "'"); } for (size_t i = 2; i < argv_.size(); i++) { - arguments_.push_back(argv_[i]); + arguments_.push_back(argv_[i]); } } -void PubSubCmd::Do(std::shared_ptr partition) { - if (!strcasecmp(subcommand_.data(), "channels")) { - std::string pattern = ""; - std::vector result; +void PubSubCmd::Do() { + if (strcasecmp(subcommand_.data(), "channels") == 0) { + std::string pattern; + std::vector result; if (arguments_.size() == 1) { pattern = arguments_[0]; } else if (arguments_.size() > 1) { - res_.SetRes(CmdRes::kErrOther, "Unknown PUBSUB subcommand or wrong number of arguments for '" + subcommand_ + "'"); + res_.SetRes(CmdRes::kErrOther, + "Unknown PUBSUB subcommand or wrong number of arguments for '" + subcommand_ + "'"); return; } g_pika_server->PubSubChannels(pattern, &result); - res_.AppendArrayLen(result.size()); - for (auto it = result.begin(); it != result.end(); ++it) { - res_.AppendStringLen((*it).length()); - res_.AppendContent(*it); + res_.AppendArrayLenUint64(result.size()); + for (auto &it : result) { + res_.AppendStringLenUint64(it.length()); + res_.AppendContent(it); } - } else if (!strcasecmp(subcommand_.data(), "numsub")) { + } else if (strcasecmp(subcommand_.data(), "numsub") == 0) { std::vector> result; g_pika_server->PubSubNumSub(arguments_, &result); - res_.AppendArrayLen(result.size() * 2); - for (auto it = result.begin(); it != result.end(); ++it) { - res_.AppendStringLen(it->first.length()); - res_.AppendContent(it->first); - res_.AppendInteger(it->second); + res_.AppendArrayLenUint64(result.size() * 2); + for (auto &it : result) { + res_.AppendStringLenUint64(it.first.length()); + res_.AppendContent(it.first); + res_.AppendInteger(it.second); } return; - } else if (!strcasecmp(subcommand_.data(), "numpat")) { + } else if (strcasecmp(subcommand_.data(), "numpat") == 0) { int subscribed = g_pika_server->PubSubNumPat(); res_.AppendInteger(subscribed); } - return; } - diff --git a/tools/pika_migrate/src/pika_repl_bgworker.cc b/tools/pika_migrate/src/pika_repl_bgworker.cc index f68db2d288..dc6724dcc8 100644 --- a/tools/pika_migrate/src/pika_repl_bgworker.cc +++ b/tools/pika_migrate/src/pika_repl_bgworker.cc @@ -3,120 +3,130 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#include "include/pika_repl_bgworker.h" - #include -#include "pink/include/redis_cli.h" - +#include "include/pika_repl_bgworker.h" +#include "include/pika_cmd_table_manager.h" #include "include/pika_rm.h" -#include "include/pika_conf.h" #include "include/pika_server.h" -#include "include/pika_cmd_table_manager.h" +#include "pstd/include/pstd_defer.h" +#include "src/pstd/include/scope_record_lock.h" +#include "include/pika_conf.h" -extern PikaConf* g_pika_conf; extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; -extern PikaCmdTableManager* g_pika_cmd_table_manager; +extern std::unique_ptr g_pika_rm; +extern std::unique_ptr g_pika_cmd_table_manager; -PikaReplBgWorker::PikaReplBgWorker(int queue_size) - : bg_thread_(queue_size) { +PikaReplBgWorker::PikaReplBgWorker(int queue_size) : bg_thread_(queue_size) { bg_thread_.set_thread_name("ReplBgWorker"); - pink::RedisParserSettings settings; + net::RedisParserSettings settings; settings.DealMessage = &(PikaReplBgWorker::HandleWriteBinlog); redis_parser_.RedisParserInit(REDIS_PARSER_REQUEST, settings); redis_parser_.data = this; - table_name_ = g_pika_conf->default_table(); - partition_id_ = 0; - + db_name_ = g_pika_conf->default_db(); } -PikaReplBgWorker::~PikaReplBgWorker() { -} +int PikaReplBgWorker::StartThread() { return bg_thread_.StartThread(); } -int PikaReplBgWorker::StartThread() { - return bg_thread_.StartThread(); -} +int PikaReplBgWorker::StopThread() { return bg_thread_.StopThread(); } -int PikaReplBgWorker::StopThread() { - return bg_thread_.StopThread(); -} +void PikaReplBgWorker::Schedule(net::TaskFunc func, void* arg) { bg_thread_.Schedule(func, arg); } -void PikaReplBgWorker::Schedule(pink::TaskFunc func, void* arg) { - bg_thread_.Schedule(func, arg); +void PikaReplBgWorker::Schedule(net::TaskFunc func, void* arg, std::function& call_back) { + bg_thread_.Schedule(func, arg, call_back); } -void PikaReplBgWorker::QueueClear() { - bg_thread_.QueueClear(); +void PikaReplBgWorker::ParseBinlogOffset(const InnerMessage::BinlogOffset& pb_offset, LogOffset* offset) { + offset->b_offset.filenum = pb_offset.filenum(); + offset->b_offset.offset = pb_offset.offset(); + offset->l_offset.term = pb_offset.term(); + offset->l_offset.index = pb_offset.index(); } void PikaReplBgWorker::HandleBGWorkerWriteBinlog(void* arg) { - ReplClientWriteBinlogTaskArg* task_arg = static_cast(arg); + auto task_arg = static_cast(arg); const std::shared_ptr res = task_arg->res; - std::shared_ptr conn = task_arg->conn; - std::vector* index = static_cast* >(task_arg->res_private_data); + std::shared_ptr conn = task_arg->conn; + auto index = static_cast*>(task_arg->res_private_data); PikaReplBgWorker* worker = task_arg->worker; worker->ip_port_ = conn->ip_port(); - std::string table_name; - uint32_t partition_id = 0; - BinlogOffset ack_start, ack_end; + DEFER { + delete index; + delete task_arg; + }; + + std::string db_name; + + LogOffset pb_begin; + LogOffset pb_end; + bool only_keepalive = false; + // find the first not keepalive binlogsync for (size_t i = 0; i < index->size(); ++i) { const InnerMessage::InnerResponse::BinlogSync& binlog_res = res->binlog_sync((*index)[i]); if (i == 0) { - table_name = binlog_res.partition().table_name(); - partition_id = binlog_res.partition().partition_id(); + db_name = binlog_res.slot().db_name(); } if (!binlog_res.binlog().empty()) { - ack_start.filenum = binlog_res.binlog_offset().filenum(); - ack_start.offset = binlog_res.binlog_offset().offset(); + ParseBinlogOffset(binlog_res.binlog_offset(), &pb_begin); break; } } - worker->table_name_ = table_name; - worker->partition_id_ = partition_id; - std::shared_ptr partition = g_pika_server->GetTablePartitionById(table_name, partition_id); - if (!partition) { - LOG(WARNING) << "Partition " << table_name << "_" << partition_id << " Not Found"; - delete index; - delete task_arg; + // find the last not keepalive binlogsync + for (int i = static_cast(index->size() - 1); i >= 0; i--) { + const InnerMessage::InnerResponse::BinlogSync& binlog_res = res->binlog_sync((*index)[i]); + if (!binlog_res.binlog().empty()) { + ParseBinlogOffset(binlog_res.binlog_offset(), &pb_end); + break; + } + } + + if (pb_begin == LogOffset()) { + only_keepalive = true; + } + + LogOffset ack_start; + if (only_keepalive) { + ack_start = LogOffset(); + } else { + ack_start = pb_begin; + } + + // because DispatchBinlogRes() have been order them. + worker->db_name_ = db_name; + + std::shared_ptr db = + g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name)); + if (!db) { + LOG(WARNING) << "DB " << db_name << " Not Found"; return; } - std::shared_ptr slave_partition = - g_pika_rm->GetSyncSlavePartitionByName( - PartitionInfo(table_name, partition_id)); - if (!slave_partition) { - LOG(WARNING) << "Slave Partition " << table_name << "_" << partition_id << " Not Found"; - delete index; - delete task_arg; + std::shared_ptr slave_db = + g_pika_rm->GetSyncSlaveDBByName(DBInfo(db_name)); + if (!slave_db) { + LOG(WARNING) << "Slave DB " << db_name << " Not Found"; return; } - for (size_t i = 0; i < index->size(); ++i) { - const InnerMessage::InnerResponse::BinlogSync& binlog_res = res->binlog_sync((*index)[i]); - // if pika are not current a slave or partition not in + for (int i : *index) { + const InnerMessage::InnerResponse::BinlogSync& binlog_res = res->binlog_sync(i); + // if pika are not current a slave or DB not in // BinlogSync state, we drop remain write binlog task - if ((g_pika_conf->classic_mode() && !(g_pika_server->role() & PIKA_ROLE_SLAVE)) - || ((slave_partition->State() != ReplState::kConnected) - && (slave_partition->State() != ReplState::kWaitDBSync))) { - delete index; - delete task_arg; + if (((g_pika_server->role() & PIKA_ROLE_SLAVE) == 0) || + ((slave_db->State() != ReplState::kConnected) && (slave_db->State() != ReplState::kWaitDBSync))) { return; } - if (!g_pika_rm->CheckSlavePartitionSessionId( - binlog_res.partition().table_name(), - binlog_res.partition().partition_id(), - binlog_res.session_id())) { - LOG(WARNING) << "Check Session failed " - << binlog_res.partition().table_name() - << "_" << binlog_res.partition().partition_id(); - slave_partition->SetReplState(ReplState::kTryConnect); - delete index; - delete task_arg; + if (slave_db->MasterSessionId() != binlog_res.session_id()) { + LOG(WARNING) << "Check SessionId Mismatch: " << slave_db->MasterIp() << ":" + << slave_db->MasterPort() << ", " << slave_db->SyncDBInfo().ToString() + << " expected_session: " << binlog_res.session_id() + << ", actual_session:" << slave_db->MasterSessionId(); + LOG(WARNING) << "Check Session failed " << binlog_res.slot().db_name(); + slave_db->SetReplState(ReplState::kTryConnect); return; } @@ -126,136 +136,155 @@ void PikaReplBgWorker::HandleBGWorkerWriteBinlog(void* arg) { } if (!PikaBinlogTransverter::BinlogItemWithoutContentDecode(TypeFirst, binlog_res.binlog(), &worker->binlog_item_)) { LOG(WARNING) << "Binlog item decode failed"; - slave_partition->SetReplState(ReplState::kTryConnect); - delete index; - delete task_arg; + slave_db->SetReplState(ReplState::kTryConnect); return; } const char* redis_parser_start = binlog_res.binlog().data() + BINLOG_ENCODE_LEN; int redis_parser_len = static_cast(binlog_res.binlog().size()) - BINLOG_ENCODE_LEN; int processed_len = 0; - pink::RedisParserStatus ret = worker->redis_parser_.ProcessInputBuffer( - redis_parser_start, redis_parser_len, &processed_len); - if (ret != pink::kRedisParserDone) { + net::RedisParserStatus ret = + worker->redis_parser_.ProcessInputBuffer(redis_parser_start, redis_parser_len, &processed_len); + if (ret != net::kRedisParserDone) { LOG(WARNING) << "Redis parser failed"; - slave_partition->SetReplState(ReplState::kTryConnect); - delete index; - delete task_arg; + slave_db->SetReplState(ReplState::kTryConnect); return; } } - delete index; - delete task_arg; - - // Reply Ack to master immediately - std::shared_ptr logger = partition->logger(); - logger->GetProducerStatus(&ack_end.filenum, &ack_end.offset); - // keepalive case - if (ack_start == BinlogOffset()) { - // set ack_end as 0 - ack_end = ack_start; + + LogOffset ack_end; + if (only_keepalive) { + ack_end = LogOffset(); + } else { + LogOffset productor_status; + // Reply Ack to master immediately + std::shared_ptr logger = db->Logger(); + logger->GetProducerStatus(&productor_status.b_offset.filenum, &productor_status.b_offset.offset, + &productor_status.l_offset.term, &productor_status.l_offset.index); + ack_end = productor_status; + ack_end.l_offset.term = pb_end.l_offset.term; } - g_pika_rm->SendPartitionBinlogSyncAckRequest(table_name, partition_id, ack_start, ack_end); -} -int PikaReplBgWorker::HandleWriteBinlog(pink::RedisParser* parser, const pink::RedisCmdArgsType& argv) { - PikaReplBgWorker* worker = static_cast(parser->data); - const BinlogItem& binlog_item = worker->binlog_item_; - g_pika_server->UpdateQueryNumAndExecCountTable(argv[0]); + g_pika_rm->SendBinlogSyncAckRequest(db_name, ack_start, ack_end); +} +int PikaReplBgWorker::HandleWriteBinlog(net::RedisParser* parser, const net::RedisCmdArgsType& argv) { + std::string opt = argv[0]; + auto worker = static_cast(parser->data); // Monitor related std::string monitor_message; if (g_pika_server->HasMonitorClients()) { - std::string table_name = g_pika_conf->classic_mode() - ? worker->table_name_.substr(2) : worker->table_name_; - std::string monitor_message = std::to_string(1.0 * slash::NowMicros() / 1000000) - + " [" + table_name + " " + worker->ip_port_ + "]"; + std::string db_name = worker->db_name_.substr(2); + std::string monitor_message = + std::to_string(static_cast(pstd::NowMicros()) / 1000000) + " [" + db_name + " " + worker->ip_port_ + "]"; for (const auto& item : argv) { - monitor_message += " " + slash::ToRead(item); + monitor_message += " " + pstd::ToRead(item); } g_pika_server->AddMonitorMessage(monitor_message); } - std::string opt = argv[0]; - std::shared_ptr c_ptr = g_pika_cmd_table_manager->GetCmd(slash::StringToLower(opt)); + std::shared_ptr c_ptr = g_pika_cmd_table_manager->GetCmd(pstd::StringToLower(opt)); if (!c_ptr) { - LOG(WARNING) << "Command " << opt << " not in the command table"; + LOG(WARNING) << "Command " << opt << " not in the command db"; return -1; } // Initial - c_ptr->Initial(argv, worker->table_name_); + c_ptr->Initial(argv, worker->db_name_); if (!c_ptr->res().ok()) { LOG(WARNING) << "Fail to initial command from binlog: " << opt; return -1; } - std::shared_ptr partition = g_pika_server->GetTablePartitionById(worker->table_name_, worker->partition_id_); - std::shared_ptr logger = partition->logger(); - - logger->Lock(); - logger->Put(c_ptr->ToBinlog(binlog_item.exec_time(), - std::to_string(binlog_item.server_id()), - binlog_item.logic_id(), - binlog_item.filenum(), - binlog_item.offset())); - uint32_t filenum; - uint64_t offset; - logger->GetProducerStatus(&filenum, &offset); - logger->Unlock(); - - PikaCmdArgsType *v = new PikaCmdArgsType(argv); - BinlogItem *b = new BinlogItem(binlog_item); - std::string dispatch_key = argv.size() >= 2 ? argv[1] : argv[0]; - g_pika_rm->ScheduleWriteDBTask(dispatch_key, v, b, worker->table_name_, worker->partition_id_); + g_pika_server->UpdateQueryNumAndExecCountDB(worker->db_name_, opt, c_ptr->is_write()); + + std::shared_ptr db = + g_pika_rm->GetSyncMasterDBByName(DBInfo(worker->db_name_)); + if (!db) { + LOG(WARNING) << worker->db_name_ << "Not found."; + } + + db->ConsensusProcessLeaderLog(c_ptr, worker->binlog_item_); return 0; } void PikaReplBgWorker::HandleBGWorkerWriteDB(void* arg) { - ReplClientWriteDBTaskArg* task_arg = static_cast(arg); - PikaCmdArgsType* argv = task_arg->argv; - BinlogItem binlog_item = *(task_arg->binlog_item); - std::string table_name = task_arg->table_name; - uint32_t partition_id = task_arg->partition_id; - std::string opt = (*argv)[0]; - slash::StringToLower(opt); - - // Get command - std::shared_ptr c_ptr = g_pika_cmd_table_manager->GetCmd(slash::StringToLower(opt)); - if (!c_ptr) { - LOG(WARNING) << "Error operation from binlog: " << opt; - delete task_arg; - return; - } + std::unique_ptr task_arg(static_cast(arg)); + const std::shared_ptr c_ptr = task_arg->cmd_ptr; + WriteDBInSyncWay(c_ptr); +} - // Initial - c_ptr->Initial(*argv, table_name); - if (!c_ptr->res().ok()) { - LOG(WARNING) << "Fail to initial command from binlog: " << opt; - delete task_arg; - return; - } +void PikaReplBgWorker::WriteDBInSyncWay(const std::shared_ptr& c_ptr) { + const PikaCmdArgsType& argv = c_ptr->argv(); uint64_t start_us = 0; if (g_pika_conf->slowlog_slower_than() >= 0) { - start_us = slash::NowMicros(); + start_us = pstd::NowMicros(); + } + // Add read lock for no suspend command + pstd::lock::MultiRecordLock record_lock(c_ptr->GetDB()->LockMgr()); + record_lock.Lock(c_ptr->current_key()); + if (!c_ptr->IsSuspend()) { + c_ptr->GetDB()->DBLockShared(); + } + if (c_ptr->IsNeedCacheDo() + && PIKA_CACHE_NONE != g_pika_conf->cache_mode() + && c_ptr->GetDB()->cache()->CacheStatus() == PIKA_CACHE_STATUS_OK) { + if (c_ptr->is_write()) { + ParseAndSendPikaCommand(c_ptr); + c_ptr->DoThroughDB(); + if (c_ptr->IsNeedUpdateCache()) { + c_ptr->DoUpdateCache(); + } + } else { + LOG(WARNING) << "It is impossbile to reach here"; + } + } else { + ParseAndSendPikaCommand(c_ptr); + c_ptr->Do(); + } + if (!c_ptr->IsSuspend()) { + c_ptr->GetDB()->DBUnlockShared(); } - std::shared_ptr partition = g_pika_server->GetTablePartitionById(table_name, partition_id); - if (strcmp(table_name.data(), "db0") || partition_id != 0) { - LOG(FATAL) << "table_name: " << table_name << ", partition_id: " - << std::to_string(partition_id) << ", but only single DB data is support transfer"; - return; + if (c_ptr->res().ok() + && c_ptr->is_write() + && c_ptr->name() != kCmdNameFlushdb + && c_ptr->name() != kCmdNameFlushall + && c_ptr->name() != kCmdNameExec) { + auto table_keys = c_ptr->current_key(); + for (auto& key : table_keys) { + key = c_ptr->db_name().append(key); + } + auto dispatcher = dynamic_cast(g_pika_server->pika_dispatch_thread()->server_thread()); + auto involved_conns = dispatcher->GetInvolvedTxn(table_keys); + for (auto& conn : involved_conns) { + auto c = std::dynamic_pointer_cast(conn); + c->SetTxnWatchFailState(true); + } + } + + record_lock.Unlock(c_ptr->current_key()); + if (g_pika_conf->slowlog_slower_than() >= 0) { + auto start_time = static_cast(start_us / 1000000); + auto duration = static_cast(pstd::NowMicros() - start_us); + if (duration > g_pika_conf->slowlog_slower_than()) { + g_pika_server->SlowlogPushEntry(argv, start_time, duration); + if (g_pika_conf->slowlog_write_errorlog()) { + LOG(INFO) << "command: " << argv[0] << ", start_time(s): " << start_time << ", duration(us): " << duration; + } + } } +} - /* convert Pika custom command to Redis standard command */ - if (!strcasecmp((*argv)[0].data(), "pksetexat")) { - if (argv->size() != 4) { - LOG(WARNING) << "find invaild command, command size: " << argv->size(); +void PikaReplBgWorker::ParseAndSendPikaCommand(const std::shared_ptr& c_ptr) { + const PikaCmdArgsType& argv = c_ptr->argv(); + if (!strcasecmp(argv[0].data(), "pksetexat")) { + if (argv.size() != 4) { + LOG(WARNING) << "find invaild command, command size: " << argv.size(); return; } else { - std::string key = (*argv)[1]; - int timestamp = std::atoi((*argv)[2].data()); - std::string value = (*argv)[3]; + std::string key = argv[1]; + int timestamp = std::atoi(argv[2].data()); + std::string value = argv[3]; int seconds = timestamp - time(NULL); PikaCmdArgsType tmp_argv; @@ -265,37 +294,13 @@ void PikaReplBgWorker::HandleBGWorkerWriteDB(void* arg) { tmp_argv.push_back(value); std::string command; - pink::SerializeRedisCommand(tmp_argv, &command); + net::SerializeRedisCommand(tmp_argv, &command); g_pika_server->SendRedisCommand(command, key); } } else { - std::string key = argv->size() > 1 ? (*argv)[1] : ""; + std::string key = argv.size() >= 2 ? argv[1] : argv[0]; std::string command; - pink::SerializeRedisCommand(*argv, &command); + net::SerializeRedisCommand(argv, &command); g_pika_server->SendRedisCommand(command, key); } - - // Add read lock for no suspend command - if (!c_ptr->is_suspend()) { - partition->DbRWLockReader(); - } - - c_ptr->Do(partition); - - if (!c_ptr->is_suspend()) { - partition->DbRWUnLock(); - } - - if (g_pika_conf->slowlog_slower_than() >= 0) { - int32_t start_time = start_us / 1000000; - int64_t duration = slash::NowMicros() - start_us; - if (duration > g_pika_conf->slowlog_slower_than()) { - g_pika_server->SlowlogPushEntry(*argv, start_time, duration); - if (g_pika_conf->slowlog_write_errorlog()) { - LOG(ERROR) << "command: " << opt << ", start_time(s): " << start_time << ", duration(us): " << duration; - } - } - } - delete task_arg; -} - +} \ No newline at end of file diff --git a/tools/pika_migrate/src/pika_repl_client.cc b/tools/pika_migrate/src/pika_repl_client.cc index 5c78dfab7d..117b5adb8c 100644 --- a/tools/pika_migrate/src/pika_repl_client.cc +++ b/tools/pika_migrate/src/pika_repl_client.cc @@ -5,119 +5,174 @@ #include "include/pika_repl_client.h" -#include -#include #include +#include +#include + +#include -#include "pink/include/pink_cli.h" -#include "pink/include/redis_cli.h" -#include "slash/include/slash_coding.h" -#include "slash/include/env.h" -#include "slash/include/slash_string.h" +#include "net/include/net_cli.h" +#include "net/include/redis_cli.h" +#include "pstd/include/env.h" +#include "pstd/include/pstd_coding.h" +#include "pstd/include/pstd_string.h" #include "include/pika_rm.h" #include "include/pika_server.h" +using pstd::Status; extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; +extern std::unique_ptr g_pika_rm; -PikaReplClient::PikaReplClient(int cron_interval, int keepalive_timeout) : next_avail_(0) { - client_thread_ = new PikaReplClientThread(cron_interval, keepalive_timeout); +PikaReplClient::PikaReplClient(int cron_interval, int keepalive_timeout) { + for (int i = 0; i < MAX_DB_NUM; i++) { + async_write_db_task_counts_[i].store(0, std::memory_order::memory_order_seq_cst); + } + client_thread_ = std::make_unique(cron_interval, keepalive_timeout); client_thread_->set_thread_name("PikaReplClient"); - for (int i = 0; i < 2 * g_pika_conf->sync_thread_num(); ++i) { - bg_workers_.push_back(new PikaReplBgWorker(PIKA_SYNC_BUFFER_SIZE)); + for (int i = 0; i < g_pika_conf->sync_binlog_thread_num(); i++) { + auto new_binlog_worker = std::make_unique(PIKA_SYNC_BUFFER_SIZE); + std::string binlog_worker_name = "ReplBinlogWorker" + std::to_string(i); + new_binlog_worker->SetThreadName(binlog_worker_name); + write_binlog_workers_.emplace_back(std::move(new_binlog_worker)); + } + for (int i = 0; i < g_pika_conf->sync_thread_num(); ++i) { + auto new_db_worker = std::make_unique(PIKA_SYNC_BUFFER_SIZE); + std::string db_worker_name = "ReplWriteDBWorker" + std::to_string(i); + new_db_worker->SetThreadName(db_worker_name); + write_db_workers_.emplace_back(std::move(new_db_worker)); } } PikaReplClient::~PikaReplClient() { client_thread_->StopThread(); - delete client_thread_; - for (size_t i = 0; i < bg_workers_.size(); ++i) { - delete bg_workers_[i]; - } LOG(INFO) << "PikaReplClient exit!!!"; } int PikaReplClient::Start() { int res = client_thread_->StartThread(); - if (res != pink::kSuccess) { - LOG(FATAL) << "Start ReplClient ClientThread Error: " << res << (res == pink::kCreateThreadError ? ": create thread error " : ": other error"); + if (res != net::kSuccess) { + LOG(FATAL) << "Start ReplClient ClientThread Error: " << res + << (res == net::kCreateThreadError ? ": create thread error " : ": other error"); } - for (size_t i = 0; i < bg_workers_.size(); ++i) { - res = bg_workers_[i]->StartThread(); - if (res != pink::kSuccess) { - LOG(FATAL) << "Start Pika Repl Worker Thread Error: " << res - << (res == pink::kCreateThreadError ? ": create thread error " : ": other error"); + for (auto & binlog_worker : write_binlog_workers_) { + res = binlog_worker->StartThread(); + if (res != net::kSuccess) { + LOG(FATAL) << "Start Pika Repl Write Binlog Worker Thread Error: " << res + << (res == net::kCreateThreadError ? ": create thread error " : ": other error"); } } + for (auto & db_worker : write_db_workers_) { + res = db_worker->StartThread(); + if (res != net::kSuccess) { + LOG(FATAL) << "Start Pika Repl Write DB Worker Thread Error: " << res + << (res == net::kCreateThreadError ? ": create thread error " : ": other error"); + } + } return res; } int PikaReplClient::Stop() { client_thread_->StopThread(); - for (size_t i = 0; i < bg_workers_.size(); ++i) { - bg_workers_[i]->StopThread(); + for (auto & binlog_worker : write_binlog_workers_) { + binlog_worker->StopThread(); + } + + // write DB task is async task, we must wait all writeDB task done and then to exit + // or some data will be loss + bool all_write_db_task_done = true; + do { + for (auto &db_worker: write_db_workers_) { + if (db_worker->TaskQueueSize() != 0) { + all_write_db_task_done = false; + std::this_thread::sleep_for(std::chrono::microseconds(300)); + break; + } else { + all_write_db_task_done = true; + } + } + //if there are unfinished async write db task, just continue to wait + } while (!all_write_db_task_done); + + for (auto &db_worker: write_db_workers_) { + db_worker->StopThread(); } return 0; } -void PikaReplClient::Schedule(pink::TaskFunc func, void* arg) { - bg_workers_[next_avail_]->Schedule(func, arg); +void PikaReplClient::Schedule(net::TaskFunc func, void* arg) { + write_binlog_workers_[next_avail_]->Schedule(func, arg); UpdateNextAvail(); } -void PikaReplClient::ScheduleWriteBinlogTask(std::string table_partition, - const std::shared_ptr res, - std::shared_ptr conn, void* res_private_data) { - size_t index = GetHashIndex(table_partition, true); - ReplClientWriteBinlogTaskArg* task_arg = - new ReplClientWriteBinlogTaskArg(res, conn, res_private_data, bg_workers_[index]); - bg_workers_[index]->Schedule(&PikaReplBgWorker::HandleBGWorkerWriteBinlog, static_cast(task_arg)); +void PikaReplClient::ScheduleByDBName(net::TaskFunc func, void* arg, const std::string& db_name) { + size_t index = GetBinlogWorkerIndexByDBName(db_name); + write_binlog_workers_[index]->Schedule(func, arg); +}; + +void PikaReplClient::ScheduleWriteBinlogTask(const std::string& db_name, + const std::shared_ptr& res, + const std::shared_ptr& conn, void* res_private_data) { + size_t index = GetBinlogWorkerIndexByDBName(db_name); + auto task_arg = new ReplClientWriteBinlogTaskArg(res, conn, res_private_data, write_binlog_workers_[index].get()); + write_binlog_workers_[index]->Schedule(&PikaReplBgWorker::HandleBGWorkerWriteBinlog, static_cast(task_arg)); } -void PikaReplClient::ScheduleWriteDBTask(const std::string& dispatch_key, - PikaCmdArgsType* argv, BinlogItem* binlog_item, - const std::string& table_name, uint32_t partition_id) { - size_t index = GetHashIndex(dispatch_key, false); - ReplClientWriteDBTaskArg* task_arg = - new ReplClientWriteDBTaskArg(argv, binlog_item, table_name, partition_id); - bg_workers_[index]->Schedule(&PikaReplBgWorker::HandleBGWorkerWriteDB, static_cast(task_arg)); +void PikaReplClient::ScheduleWriteDBTask(const std::shared_ptr& cmd_ptr, const std::string& db_name) { + const PikaCmdArgsType& argv = cmd_ptr->argv(); + std::string dispatch_key = argv.size() >= 2 ? argv[1] : argv[0]; + size_t index = GetHashIndexByKey(dispatch_key); + auto task_arg = new ReplClientWriteDBTaskArg(cmd_ptr); + + IncrAsyncWriteDBTaskCount(db_name, 1); + std::function task_finish_call_back = [this, db_name]() { this->DecrAsyncWriteDBTaskCount(db_name, 1); }; + + write_db_workers_[index]->Schedule(&PikaReplBgWorker::HandleBGWorkerWriteDB, static_cast(task_arg), + task_finish_call_back); } -size_t PikaReplClient::GetHashIndex(std::string key, bool upper_half) { - size_t hash_base = bg_workers_.size() / 2; - return (str_hash(key) % hash_base) + (upper_half ? 0 : hash_base); +size_t PikaReplClient::GetBinlogWorkerIndexByDBName(const std::string &db_name) { + char db_num_c = db_name.back(); + int32_t db_num = db_num_c - '0'; + //Valid range of db_num is [0, MAX_DB_NUM) + if (db_num < 0 || db_num >= MAX_DB_NUM) { + LOG(ERROR) + << "Corruption in consuming binlog: the last char of the db_name(extracted from binlog) is not a valid db num, the extracted db_num is " + << db_num_c << " while write_binlog_workers.size() is " << write_binlog_workers_.size(); + if (db_num < 0) { assert(false && "db_num invalid, check if the db_name in the request is valid, also check the ERROR Log of Pika."); } + } + return db_num % write_binlog_workers_.size(); } -Status PikaReplClient::Write(const std::string& ip, const int port, const std::string& msg) { - return client_thread_->Write(ip, port, msg); +size_t PikaReplClient::GetHashIndexByKey(const std::string& key) { + size_t hash_base = write_db_workers_.size(); + return (str_hash(key) % hash_base); } -Status PikaReplClient::Close(const std::string& ip, const int port) { - return client_thread_->Close(ip, port); +Status PikaReplClient::Write(const std::string& ip, const int port, const std::string& msg) { + return client_thread_->Write(ip, port, msg); } +Status PikaReplClient::Close(const std::string& ip, const int port) { return client_thread_->Close(ip, port); } Status PikaReplClient::SendMetaSync() { std::string local_ip; - pink::PinkCli* cli = pink::NewRedisCli(); + std::unique_ptr cli (net::NewRedisCli()); cli->set_connect_timeout(1500); if ((cli->Connect(g_pika_server->master_ip(), g_pika_server->master_port(), "")).ok()) { struct sockaddr_in laddr; socklen_t llen = sizeof(laddr); - getsockname(cli->fd(), (struct sockaddr*) &laddr, &llen); + getsockname(cli->fd(), reinterpret_cast(&laddr), &llen); std::string tmp_local_ip(inet_ntoa(laddr.sin_addr)); local_ip = tmp_local_ip; cli->Close(); - delete cli; } else { - LOG(WARNING) << "Failed to connect master, Master (" - << g_pika_server->master_ip() << ":" << g_pika_server->master_port() << "), try reconnect"; + LOG(WARNING) << "Failed to connect master, Master (" << g_pika_server->master_ip() << ":" + << g_pika_server->master_port() << "), try reconnect"; // Sleep three seconds to avoid frequent try Meta Sync // when the connection fails sleep(3); g_pika_server->ResetMetaSyncStatus(); - delete cli; return Status::Corruption("Connect master error"); } @@ -137,31 +192,30 @@ Status PikaReplClient::SendMetaSync() { std::string master_ip = g_pika_server->master_ip(); int master_port = g_pika_server->master_port(); if (!request.SerializeToString(&to_send)) { - LOG(WARNING) << "Serialize Meta Sync Request Failed, to Master (" - << master_ip << ":" << master_port << ")"; + LOG(WARNING) << "Serialize Meta Sync Request Failed, to Master (" << master_ip << ":" << master_port << ")"; return Status::Corruption("Serialize Failed"); } - LOG(INFO) << "Try Send Meta Sync Request to Master (" - << master_ip << ":" << master_port << ")"; + LOG(INFO) << "Try Send Meta Sync Request to Master (" << master_ip << ":" << master_port << ")"; return client_thread_->Write(master_ip, master_port + kPortShiftReplServer, to_send); } -Status PikaReplClient::SendPartitionDBSync(const std::string& ip, - uint32_t port, - const std::string& table_name, - uint32_t partition_id, - const BinlogOffset& boffset, - const std::string& local_ip) { +Status PikaReplClient::SendDBSync(const std::string& ip, uint32_t port, const std::string& db_name, + const BinlogOffset& boffset, const std::string& local_ip) { InnerMessage::InnerRequest request; request.set_type(InnerMessage::kDBSync); InnerMessage::InnerRequest::DBSync* db_sync = request.mutable_db_sync(); InnerMessage::Node* node = db_sync->mutable_node(); node->set_ip(local_ip); node->set_port(g_pika_server->port()); - InnerMessage::Partition* partition = db_sync->mutable_partition(); - partition->set_table_name(table_name); - partition->set_partition_id(partition_id); + InnerMessage::Slot* db = db_sync->mutable_slot(); + db->set_db_name(db_name); + /* + * Since the slot field is written in protobuffer, + * slot_id is set to the default value 0 for compatibility + * with older versions, but slot_id is not used + */ + db->set_slot_id(0); InnerMessage::BinlogOffset* binlog_offset = db_sync->mutable_binlog_offset(); binlog_offset->set_filenum(boffset.filenum); @@ -169,29 +223,28 @@ Status PikaReplClient::SendPartitionDBSync(const std::string& ip, std::string to_send; if (!request.SerializeToString(&to_send)) { - LOG(WARNING) << "Serialize Partition DBSync Request Failed, to Master (" - << ip << ":" << port << ")"; + LOG(WARNING) << "Serialize DB DBSync Request Failed, to Master (" << ip << ":" << port << ")"; return Status::Corruption("Serialize Failed"); } - return client_thread_->Write(ip, port + kPortShiftReplServer, to_send); + return client_thread_->Write(ip, static_cast(port) + kPortShiftReplServer, to_send); } - -Status PikaReplClient::SendPartitionTrySync(const std::string& ip, - uint32_t port, - const std::string& table_name, - uint32_t partition_id, - const BinlogOffset& boffset, - const std::string& local_ip) { +Status PikaReplClient::SendTrySync(const std::string& ip, uint32_t port, const std::string& db_name, + const BinlogOffset& boffset, const std::string& local_ip) { InnerMessage::InnerRequest request; request.set_type(InnerMessage::kTrySync); InnerMessage::InnerRequest::TrySync* try_sync = request.mutable_try_sync(); InnerMessage::Node* node = try_sync->mutable_node(); node->set_ip(local_ip); node->set_port(g_pika_server->port()); - InnerMessage::Partition* partition = try_sync->mutable_partition(); - partition->set_table_name(table_name); - partition->set_partition_id(partition_id); + InnerMessage::Slot* db = try_sync->mutable_slot(); + db->set_db_name(db_name); + /* + * Since the slot field is written in protobuffer, + * slot_id is set to the default value 0 for compatibility + * with older versions, but slot_id is not used + */ + db->set_slot_id(0); InnerMessage::BinlogOffset* binlog_offset = try_sync->mutable_binlog_offset(); binlog_offset->set_filenum(boffset.filenum); @@ -199,73 +252,81 @@ Status PikaReplClient::SendPartitionTrySync(const std::string& ip, std::string to_send; if (!request.SerializeToString(&to_send)) { - LOG(WARNING) << "Serialize Partition TrySync Request Failed, to Master (" - << ip << ":" << port << ")"; + LOG(WARNING) << "Serialize DB TrySync Request Failed, to Master (" << ip << ":" << port << ")"; return Status::Corruption("Serialize Failed"); } - return client_thread_->Write(ip, port + kPortShiftReplServer, to_send); + return client_thread_->Write(ip, static_cast(port + kPortShiftReplServer), to_send); } -Status PikaReplClient::SendPartitionBinlogSync(const std::string& ip, - uint32_t port, - const std::string& table_name, - uint32_t partition_id, - const BinlogOffset& ack_start, - const BinlogOffset& ack_end, - const std::string& local_ip, - bool is_first_send) { +Status PikaReplClient::SendBinlogSync(const std::string& ip, uint32_t port, const std::string& db_name, + const LogOffset& ack_start, const LogOffset& ack_end, + const std::string& local_ip, bool is_first_send) { InnerMessage::InnerRequest request; request.set_type(InnerMessage::kBinlogSync); InnerMessage::InnerRequest::BinlogSync* binlog_sync = request.mutable_binlog_sync(); InnerMessage::Node* node = binlog_sync->mutable_node(); node->set_ip(local_ip); node->set_port(g_pika_server->port()); - binlog_sync->set_table_name(table_name); - binlog_sync->set_partition_id(partition_id); + binlog_sync->set_db_name(db_name); + /* + * Since the slot field is written in protobuffer, + * slot_id is set to the default value 0 for compatibility + * with older versions, but slot_id is not used + */ + binlog_sync->set_slot_id(0); binlog_sync->set_first_send(is_first_send); InnerMessage::BinlogOffset* ack_range_start = binlog_sync->mutable_ack_range_start(); - ack_range_start->set_filenum(ack_start.filenum); - ack_range_start->set_offset(ack_start.offset); + ack_range_start->set_filenum(ack_start.b_offset.filenum); + ack_range_start->set_offset(ack_start.b_offset.offset); + ack_range_start->set_term(ack_start.l_offset.term); + ack_range_start->set_index(ack_start.l_offset.index); InnerMessage::BinlogOffset* ack_range_end = binlog_sync->mutable_ack_range_end(); - ack_range_end->set_filenum(ack_end.filenum); - ack_range_end->set_offset(ack_end.offset); - - int32_t session_id = g_pika_rm->GetSlavePartitionSessionId(table_name, partition_id); + ack_range_end->set_filenum(ack_end.b_offset.filenum); + ack_range_end->set_offset(ack_end.b_offset.offset); + ack_range_end->set_term(ack_end.l_offset.term); + ack_range_end->set_index(ack_end.l_offset.index); + + std::shared_ptr slave_db = + g_pika_rm->GetSyncSlaveDBByName(DBInfo(db_name)); + if (!slave_db) { + LOG(WARNING) << "Slave DB: " << db_name << " not exist"; + return Status::NotFound("SyncSlaveDB NotFound"); + } + int32_t session_id = slave_db->MasterSessionId(); binlog_sync->set_session_id(session_id); std::string to_send; if (!request.SerializeToString(&to_send)) { - LOG(WARNING) << "Serialize Partition BinlogSync Request Failed, to Master (" - << ip << ":" << port << ")"; + LOG(WARNING) << "Serialize DB BinlogSync Request Failed, to Master (" << ip << ":" << port << ")"; return Status::Corruption("Serialize Failed"); } - return client_thread_->Write(ip, port + kPortShiftReplServer, to_send); + return client_thread_->Write(ip, static_cast(port + kPortShiftReplServer), to_send); } -Status PikaReplClient::SendRemoveSlaveNode(const std::string& ip, - uint32_t port, - const std::string& table_name, - uint32_t partition_id, +Status PikaReplClient::SendRemoveSlaveNode(const std::string& ip, uint32_t port, const std::string& db_name, const std::string& local_ip) { InnerMessage::InnerRequest request; request.set_type(InnerMessage::kRemoveSlaveNode); - InnerMessage::InnerRequest::RemoveSlaveNode* remove_slave_node = - request.add_remove_slave_node(); + InnerMessage::InnerRequest::RemoveSlaveNode* remove_slave_node = request.add_remove_slave_node(); InnerMessage::Node* node = remove_slave_node->mutable_node(); node->set_ip(local_ip); node->set_port(g_pika_server->port()); - InnerMessage::Partition* partition = remove_slave_node->mutable_partition(); - partition->set_table_name(table_name); - partition->set_partition_id(partition_id); + InnerMessage::Slot* db = remove_slave_node->mutable_slot(); + db->set_db_name(db_name); + /* + * Since the slot field is written in protobuffer, + * slot_id is set to the default value 0 for compatibility + * with older versions, but slot_id is not used + */ + db->set_slot_id(0); std::string to_send; if (!request.SerializeToString(&to_send)) { - LOG(WARNING) << "Serialize Remove Slave Node Failed, to Master (" - << ip << ":" << port << "), " << table_name << "_" << partition_id; + LOG(WARNING) << "Serialize Remove Slave Node Failed, to Master (" << ip << ":" << port << "), " << db_name; return Status::Corruption("Serialize Failed"); } - return client_thread_->Write(ip, port + kPortShiftReplServer, to_send); + return client_thread_->Write(ip, static_cast(port + kPortShiftReplServer), to_send); } diff --git a/tools/pika_migrate/src/pika_repl_client_conn.cc b/tools/pika_migrate/src/pika_repl_client_conn.cc index dce825afbf..8fb30d9306 100644 --- a/tools/pika_migrate/src/pika_repl_client_conn.cc +++ b/tools/pika_migrate/src/pika_repl_client_conn.cc @@ -5,36 +5,32 @@ #include "include/pika_repl_client_conn.h" +#include +#include #include -#include "include/pika_server.h" -#include "include/pika_rm.h" -#include "slash/include/slash_string.h" - #include "include/pika_rm.h" #include "include/pika_server.h" +#include "pstd/include/pstd_string.h" +#include "pika_inner_message.pb.h" + +using pstd::Status; -extern PikaConf* g_pika_conf; extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; - -PikaReplClientConn::PikaReplClientConn(int fd, - const std::string& ip_port, - pink::Thread* thread, - void* worker_specific_data, - pink::PinkEpoll* epoll) - : pink::PbConn(fd, ip_port, thread, epoll) { -} +extern std::unique_ptr g_pika_rm; + +PikaReplClientConn::PikaReplClientConn(int fd, const std::string& ip_port, net::Thread* thread, + void* worker_specific_data, net::NetMultiplexer* mpx) + : net::PbConn(fd, ip_port, thread, mpx) {} -bool PikaReplClientConn::IsTableStructConsistent( - const std::vector& current_tables, - const std::vector& expect_tables) { - if (current_tables.size() != expect_tables.size()) { +bool PikaReplClientConn::IsDBStructConsistent(const std::vector& current_dbs, + const std::vector& expect_dbs) { + if (current_dbs.size() != expect_dbs.size()) { return false; } - for (const auto& table_struct : current_tables) { - if (find(expect_tables.begin(), expect_tables.end(), - table_struct) == expect_tables.end()) { + for (const auto& db_struct : current_dbs) { + if (find(expect_dbs.begin(), expect_dbs.end(), db_struct) == expect_dbs.end()) { + LOG(WARNING) << "DB struct mismatch"; return false; } } @@ -42,36 +38,48 @@ bool PikaReplClientConn::IsTableStructConsistent( } int PikaReplClientConn::DealMessage() { - std::shared_ptr response = std::make_shared(); - response->ParseFromArray(rbuf_ + cur_pos_ - header_len_, header_len_); + std::shared_ptr response = std::make_shared(); + ::google::protobuf::io::ArrayInputStream input(rbuf_ + cur_pos_ - header_len_, static_cast(header_len_)); + ::google::protobuf::io::CodedInputStream decoder(&input); + decoder.SetTotalBytesLimit(g_pika_conf->max_conn_rbuf_size()); + bool success = response->ParseFromCodedStream(&decoder) && decoder.ConsumedEntireMessage(); + if (!success) { + LOG(WARNING) << "ParseFromArray FAILED! " + << " msg_len: " << header_len_; + g_pika_server->SyncError(); + return -1; + } switch (response->type()) { - case InnerMessage::kMetaSync: - { - ReplClientTaskArg* task_arg = new ReplClientTaskArg(response, std::dynamic_pointer_cast(shared_from_this())); + case InnerMessage::kMetaSync: { + auto task_arg = + new ReplClientTaskArg(response, std::dynamic_pointer_cast(shared_from_this())); g_pika_rm->ScheduleReplClientBGTask(&PikaReplClientConn::HandleMetaSyncResponse, static_cast(task_arg)); break; } - case InnerMessage::kDBSync: - { - ReplClientTaskArg* task_arg = new ReplClientTaskArg(response, std::dynamic_pointer_cast(shared_from_this())); + case InnerMessage::kDBSync: { + auto task_arg = + new ReplClientTaskArg(response, std::dynamic_pointer_cast(shared_from_this())); g_pika_rm->ScheduleReplClientBGTask(&PikaReplClientConn::HandleDBSyncResponse, static_cast(task_arg)); break; } - case InnerMessage::kTrySync: - { - ReplClientTaskArg* task_arg = new ReplClientTaskArg(response, std::dynamic_pointer_cast(shared_from_this())); - g_pika_rm->ScheduleReplClientBGTask(&PikaReplClientConn::HandleTrySyncResponse, static_cast(task_arg)); + case InnerMessage::kTrySync: { + const std::string& db_name = response->try_sync().slot().db_name(); + //TrySync resp must contain db_name + assert(!db_name.empty()); + auto task_arg = + new ReplClientTaskArg(response, std::dynamic_pointer_cast(shared_from_this())); + g_pika_rm->ScheduleReplClientBGTaskByDBName(&PikaReplClientConn::HandleTrySyncResponse, static_cast(task_arg), db_name); break; } - case InnerMessage::kBinlogSync: - { + case InnerMessage::kBinlogSync: { DispatchBinlogRes(response); break; } - case InnerMessage::kRemoveSlaveNode: - { - ReplClientTaskArg* task_arg = new ReplClientTaskArg(response, std::dynamic_pointer_cast(shared_from_this())); - g_pika_rm->ScheduleReplClientBGTask(&PikaReplClientConn::HandleRemoveSlaveNodeResponse, static_cast(task_arg)); + case InnerMessage::kRemoveSlaveNode: { + auto task_arg = + new ReplClientTaskArg(response, std::dynamic_pointer_cast(shared_from_this())); + g_pika_rm->ScheduleReplClientBGTask(&PikaReplClientConn::HandleRemoveSlaveNodeResponse, + static_cast(task_arg)); break; } default: @@ -81,181 +89,194 @@ int PikaReplClientConn::DealMessage() { } void PikaReplClientConn::HandleMetaSyncResponse(void* arg) { - ReplClientTaskArg* task_arg = static_cast(arg); - std::shared_ptr conn = task_arg->conn; + std::unique_ptr task_arg(static_cast(arg)); + std::shared_ptr conn = task_arg->conn; std::shared_ptr response = task_arg->res; + if (response->code() == InnerMessage::kOther) { + std::string reply = response->has_reply() ? response->reply() : ""; + // keep sending MetaSync + LOG(WARNING) << "Meta Sync Failed: " << reply << " will keep sending MetaSync msg"; + return; + } + if (response->code() != InnerMessage::kOk) { std::string reply = response->has_reply() ? response->reply() : ""; LOG(WARNING) << "Meta Sync Failed: " << reply; g_pika_server->SyncError(); conn->NotifyClose(); - delete task_arg; return; } const InnerMessage::InnerResponse_MetaSync meta_sync = response->meta_sync(); - if (g_pika_conf->classic_mode() != meta_sync.classic_mode()) { - LOG(WARNING) << "Self in " << (g_pika_conf->classic_mode() ? "classic" : "sharding") - << " mode, but master in " << (meta_sync.classic_mode() ? "classic" : "sharding") - << " mode, failed to establish master-slave relationship"; + + std::vector master_db_structs; + for (int idx = 0; idx < meta_sync.dbs_info_size(); ++idx) { + const InnerMessage::InnerResponse_MetaSync_DBInfo& db_info = meta_sync.dbs_info(idx); + master_db_structs.push_back({db_info.db_name(), db_info.db_instance_num()}); + } + + std::vector self_db_structs = g_pika_conf->db_structs(); + if (!PikaReplClientConn::IsDBStructConsistent(self_db_structs, master_db_structs)) { + LOG(WARNING) << "Self db structs(number of databases: " << self_db_structs.size() + << ") inconsistent with master(number of databases: " << master_db_structs.size() + << "), failed to establish master-slave relationship"; g_pika_server->SyncError(); conn->NotifyClose(); - delete task_arg; return; } - std::vector master_table_structs; - for (int idx = 0; idx < meta_sync.tables_info_size(); ++idx) { - InnerMessage::InnerResponse_MetaSync_TableInfo table_info = meta_sync.tables_info(idx); - master_table_structs.push_back({table_info.table_name(), - static_cast(table_info.partition_num()), {0}}); + // The relicationid obtained from the server is null + if (meta_sync.replication_id() == "") { + LOG(WARNING) << "Meta Sync Failed: the relicationid obtained from the server is null, keep sending MetaSync msg"; + return; } - std::vector self_table_structs = g_pika_conf->table_structs(); - if (!PikaReplClientConn::IsTableStructConsistent(self_table_structs, master_table_structs)) { - LOG(WARNING) << "Self table structs(number of databases: " << self_table_structs.size() - << ") inconsistent with master(number of databases: " << master_table_structs.size() - << "), failed to establish master-slave relationship"; + // The Replicationids of both the primary and secondary Replicationid are not empty and are not equal + if (g_pika_conf->replication_id() != meta_sync.replication_id() && g_pika_conf->replication_id() != "") { + LOG(WARNING) << "Meta Sync Failed: replicationid on both sides of the connection are inconsistent"; g_pika_server->SyncError(); conn->NotifyClose(); - delete task_arg; return; } + // First synchronization between the master and slave + if (g_pika_conf->replication_id() != meta_sync.replication_id()) { + LOG(INFO) << "New node is added to the cluster and requires full replication, remote replication id: " << meta_sync.replication_id() + << ", local replication id: " << g_pika_conf->replication_id(); + g_pika_server->force_full_sync_ = true; + g_pika_conf->SetReplicationID(meta_sync.replication_id()); + g_pika_conf->ConfigRewriteReplicationID(); + } + g_pika_conf->SetWriteBinlog("yes"); - g_pika_server->PreparePartitionTrySync(); + g_pika_server->PrepareDBTrySync(); g_pika_server->FinishMetaSync(); LOG(INFO) << "Finish to handle meta sync response"; - delete task_arg; } void PikaReplClientConn::HandleDBSyncResponse(void* arg) { - ReplClientTaskArg* task_arg = static_cast(arg); - std::shared_ptr conn = task_arg->conn; + std::unique_ptr task_arg(static_cast(arg)); + std::shared_ptr conn = task_arg->conn; std::shared_ptr response = task_arg->res; const InnerMessage::InnerResponse_DBSync db_sync_response = response->db_sync(); int32_t session_id = db_sync_response.session_id(); - const InnerMessage::Partition partition_response = db_sync_response.partition(); - std::string table_name = partition_response.table_name(); - uint32_t partition_id = partition_response.partition_id(); - - std::shared_ptr slave_partition = - g_pika_rm->GetSyncSlavePartitionByName( - PartitionInfo(table_name, partition_id)); - if (!slave_partition) { - LOG(WARNING) << "Slave Partition: " << table_name << ":" << partition_id << " Not Found"; - delete task_arg; + const InnerMessage::Slot& db_response = db_sync_response.slot(); + const std::string& db_name = db_response.db_name(); + + std::shared_ptr slave_db = + g_pika_rm->GetSyncSlaveDBByName(DBInfo(db_name)); + if (!slave_db) { + LOG(WARNING) << "Slave DB: " << db_name << " Not Found"; return; } if (response->code() != InnerMessage::kOk) { - slave_partition->SetReplState(ReplState::kError); + slave_db->SetReplState(ReplState::kError); std::string reply = response->has_reply() ? response->reply() : ""; LOG(WARNING) << "DBSync Failed: " << reply; - delete task_arg; return; } - g_pika_rm->UpdateSyncSlavePartitionSessionId( - PartitionInfo(table_name, partition_id), session_id); + slave_db->SetMasterSessionId(session_id); - std::string partition_name = slave_partition->SyncPartitionInfo().ToString(); - slave_partition->SetReplState(ReplState::kWaitDBSync); - LOG(INFO) << "Partition: " << partition_name << " Need Wait To Sync"; - delete task_arg; + slave_db->StopRsync(); + slave_db->SetReplState(ReplState::kWaitDBSync); + LOG(INFO) << "DB: " << db_name << " Need Wait To Sync"; + + //now full sync is starting, add an unfinished full sync count + g_pika_conf->AddInternalUsedUnfinishedFullSync(slave_db->DBName()); } void PikaReplClientConn::HandleTrySyncResponse(void* arg) { - ReplClientTaskArg* task_arg = static_cast(arg); - std::shared_ptr conn = task_arg->conn; + std::unique_ptr task_arg(static_cast(arg)); + std::shared_ptr conn = task_arg->conn; std::shared_ptr response = task_arg->res; if (response->code() != InnerMessage::kOk) { std::string reply = response->has_reply() ? response->reply() : ""; LOG(WARNING) << "TrySync Failed: " << reply; - delete task_arg; return; } - const InnerMessage::InnerResponse_TrySync& try_sync_response = response->try_sync(); - const InnerMessage::Partition& partition_response = try_sync_response.partition(); - std::string table_name = partition_response.table_name(); - uint32_t partition_id = partition_response.partition_id(); - std::shared_ptr partition = g_pika_server->GetTablePartitionById(table_name, partition_id); - if (!partition) { - LOG(WARNING) << "Partition: " << table_name << ":" << partition_id << " Not Found"; - delete task_arg; + const InnerMessage::Slot& db_response = try_sync_response.slot(); + std::string db_name = db_response.db_name(); + std::shared_ptr db = + g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name)); + if (!db) { + LOG(WARNING) << "DB: " << db_name << " Not Found"; return; } - std::shared_ptr slave_partition = - g_pika_rm->GetSyncSlavePartitionByName( - PartitionInfo(table_name, partition_id)); - if (!slave_partition) { - LOG(WARNING) << "Slave Partition: " << table_name << ":" << partition_id << " Not Found"; - delete task_arg; + std::shared_ptr slave_db = + g_pika_rm->GetSyncSlaveDBByName(DBInfo(db_name)); + if (!slave_db) { + LOG(WARNING) << "DB: " << db_name << "Not Found"; return; } - std::string partition_name = partition->GetPartitionName(); + LogicOffset logic_last_offset; if (try_sync_response.reply_code() == InnerMessage::InnerResponse::TrySync::kOk) { BinlogOffset boffset; int32_t session_id = try_sync_response.session_id(); - partition->logger()->GetProducerStatus(&boffset.filenum, &boffset.offset); - g_pika_rm->UpdateSyncSlavePartitionSessionId(PartitionInfo(table_name, partition_id), session_id); - g_pika_rm->SendPartitionBinlogSyncAckRequest(table_name, partition_id, boffset, boffset, true); - slave_partition->SetReplState(ReplState::kConnected); - LOG(INFO) << "Partition: " << partition_name << " TrySync Ok"; + db->Logger()->GetProducerStatus(&boffset.filenum, &boffset.offset); + slave_db->SetMasterSessionId(session_id); + LogOffset offset(boffset, logic_last_offset); + g_pika_rm->SendBinlogSyncAckRequest(db_name, offset, offset, true); + slave_db->SetReplState(ReplState::kConnected); + // after connected, update receive time first to avoid connection timeout + slave_db->SetLastRecvTime(pstd::NowMicros()); + + LOG(INFO) << "DB: " << db_name << " TrySync Ok"; } else if (try_sync_response.reply_code() == InnerMessage::InnerResponse::TrySync::kSyncPointBePurged) { - slave_partition->SetReplState(ReplState::kTryDBSync); - LOG(INFO) << "Partition: " << partition_name << " Need To Try DBSync"; + slave_db->SetReplState(ReplState::kTryDBSync); + LOG(INFO) << "DB: " << db_name << " Need To Try DBSync"; } else if (try_sync_response.reply_code() == InnerMessage::InnerResponse::TrySync::kSyncPointLarger) { - slave_partition->SetReplState(ReplState::kError); - LOG(WARNING) << "Partition: " << partition_name << " TrySync Error, Because the invalid filenum and offset"; + slave_db->SetReplState(ReplState::kError); + LOG(WARNING) << "DB: " << db_name << " TrySync Error, Because the invalid filenum and offset"; } else if (try_sync_response.reply_code() == InnerMessage::InnerResponse::TrySync::kError) { - slave_partition->SetReplState(ReplState::kError); - LOG(WARNING) << "Partition: " << partition_name << " TrySync Error"; + slave_db->SetReplState(ReplState::kError); + LOG(WARNING) << "DB: " << db_name << " TrySync Error"; } - delete task_arg; } -void PikaReplClientConn::DispatchBinlogRes(const std::shared_ptr res) { - // partition to a bunch of binlog chips - std::unordered_map*, hash_partition_info> par_binlog; +void PikaReplClientConn::DispatchBinlogRes(const std::shared_ptr& res) { + // db to a bunch of binlog chips + std::unordered_map*, hash_db_info> par_binlog; for (int i = 0; i < res->binlog_sync_size(); ++i) { const InnerMessage::InnerResponse::BinlogSync& binlog_res = res->binlog_sync(i); - // hash key: table + partition_id - PartitionInfo p_info(binlog_res.partition().table_name(), - binlog_res.partition().partition_id()); + // hash key: db + DBInfo p_info(binlog_res.slot().db_name()); if (par_binlog.find(p_info) == par_binlog.end()) { par_binlog[p_info] = new std::vector(); } par_binlog[p_info]->push_back(i); } + std::shared_ptr slave_db; for (auto& binlog_nums : par_binlog) { - RmNode node(binlog_nums.first.table_name_, binlog_nums.first.partition_id_); - g_pika_rm->SetSlaveLastRecvTime(node, slash::NowMicros()); - g_pika_rm->ScheduleWriteBinlogTask( - binlog_nums.first.table_name_ + std::to_string(binlog_nums.first.partition_id_), - res, - std::dynamic_pointer_cast(shared_from_this()), - reinterpret_cast(binlog_nums.second)); + RmNode node(binlog_nums.first.db_name_); + slave_db = g_pika_rm->GetSyncSlaveDBByName( + DBInfo(binlog_nums.first.db_name_)); + if (!slave_db) { + LOG(WARNING) << "Slave DB: " << binlog_nums.first.db_name_ << " not exist"; + break; + } + slave_db->SetLastRecvTime(pstd::NowMicros()); + g_pika_rm->ScheduleWriteBinlogTask(binlog_nums.first.db_name_, res, + std::dynamic_pointer_cast(shared_from_this()), + reinterpret_cast(binlog_nums.second)); } } void PikaReplClientConn::HandleRemoveSlaveNodeResponse(void* arg) { - ReplClientTaskArg* task_arg = static_cast(arg); - std::shared_ptr conn = task_arg->conn; + std::unique_ptr task_arg(static_cast(arg)); + std::shared_ptr conn = task_arg->conn; std::shared_ptr response = task_arg->res; if (response->code() != InnerMessage::kOk) { std::string reply = response->has_reply() ? response->reply() : ""; LOG(WARNING) << "Remove slave node Failed: " << reply; - delete task_arg; return; } - delete task_arg; } - diff --git a/tools/pika_migrate/src/pika_repl_client_thread.cc b/tools/pika_migrate/src/pika_repl_client_thread.cc index cfb8de7500..2a7c666d81 100644 --- a/tools/pika_migrate/src/pika_repl_client_thread.cc +++ b/tools/pika_migrate/src/pika_repl_client_thread.cc @@ -5,44 +5,47 @@ #include "include/pika_repl_client_thread.h" +#include "include/pika_rm.h" #include "include/pika_server.h" -#include "slash/include/slash_string.h" +#include "pstd/include/pstd_string.h" extern PikaServer* g_pika_server; +extern std::unique_ptr g_pika_rm; -PikaReplClientThread::PikaReplClientThread(int cron_interval, int keepalive_timeout) : - ClientThread(&conn_factory_, cron_interval, keepalive_timeout, &handle_, NULL) { -} +PikaReplClientThread::PikaReplClientThread(int cron_interval, int keepalive_timeout) + : ClientThread(&conn_factory_, cron_interval, keepalive_timeout, &handle_, nullptr) {} void PikaReplClientThread::ReplClientHandle::FdClosedHandle(int fd, const std::string& ip_port) const { LOG(INFO) << "ReplClient Close conn, fd=" << fd << ", ip_port=" << ip_port; std::string ip; int port = 0; - if (!slash::ParseIpPortString(ip_port, ip, port)) { + if (!pstd::ParseIpPortString(ip_port, ip, port)) { LOG(WARNING) << "Parse ip_port error " << ip_port; return; } - if (ip == g_pika_server->master_ip() - && port == g_pika_server->master_port() + kPortShiftReplServer - && PIKA_REPL_ERROR != g_pika_server->repl_state()) { // if state machine in error state, no retry + if (ip == g_pika_server->master_ip() && port == g_pika_server->master_port() + kPortShiftReplServer && + PIKA_REPL_ERROR != g_pika_server->repl_state()) { // if state machine in error state, no retry LOG(WARNING) << "Master conn disconnect : " << ip_port << " try reconnect"; g_pika_server->ResetMetaSyncStatus(); } + g_pika_server->UpdateMetaSyncTimestamp(); }; void PikaReplClientThread::ReplClientHandle::FdTimeoutHandle(int fd, const std::string& ip_port) const { LOG(INFO) << "ReplClient Timeout conn, fd=" << fd << ", ip_port=" << ip_port; std::string ip; int port = 0; - if (!slash::ParseIpPortString(ip_port, ip, port)) { + if (!pstd::ParseIpPortString(ip_port, ip, port)) { LOG(WARNING) << "Parse ip_port error " << ip_port; return; } - if (ip == g_pika_server->master_ip() - && port == g_pika_server->master_port() + kPortShiftReplServer - && PIKA_REPL_ERROR != g_pika_server->repl_state()) { // if state machine in error state, no retry + if (ip == g_pika_server->master_ip() && port == g_pika_server->master_port() + kPortShiftReplServer && + PIKA_REPL_ERROR != g_pika_server->repl_state() && + PikaReplicaManager::CheckSlaveDBState(ip, port)) { + // if state machine equal to kDBNoConnect(execute cmd 'dbslaveof db no one'), no retry LOG(WARNING) << "Master conn timeout : " << ip_port << " try reconnect"; g_pika_server->ResetMetaSyncStatus(); } + g_pika_server->UpdateMetaSyncTimestamp(); }; diff --git a/tools/pika_migrate/src/pika_repl_server.cc b/tools/pika_migrate/src/pika_repl_server.cc index 6587780561..b92d239b18 100644 --- a/tools/pika_migrate/src/pika_repl_server.cc +++ b/tools/pika_migrate/src/pika_repl_server.cc @@ -7,40 +7,39 @@ #include -#include "include/pika_rm.h" #include "include/pika_conf.h" +#include "include/pika_rm.h" #include "include/pika_server.h" -extern PikaConf* g_pika_conf; +using pstd::Status; + extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; +extern std::unique_ptr g_pika_rm; -PikaReplServer::PikaReplServer(const std::set& ips, - int port, - int cron_interval) { - server_tp_ = new pink::ThreadPool(PIKA_REPL_SERVER_TP_SIZE, 100000); - pika_repl_server_thread_ = new PikaReplServerThread(ips, port, cron_interval); +PikaReplServer::PikaReplServer(const std::set& ips, int port, int cron_interval) { + server_tp_ = std::make_unique(PIKA_REPL_SERVER_TP_SIZE, 100000, "PikaReplServer"); + pika_repl_server_thread_ = std::make_unique(ips, port, cron_interval); pika_repl_server_thread_->set_thread_name("PikaReplServer"); - pthread_rwlock_init(&client_conn_rwlock_, NULL); } PikaReplServer::~PikaReplServer() { - delete pika_repl_server_thread_; - delete server_tp_; - pthread_rwlock_destroy(&client_conn_rwlock_); LOG(INFO) << "PikaReplServer exit!!!"; } int PikaReplServer::Start() { + pika_repl_server_thread_->set_thread_name("PikaReplServer"); int res = pika_repl_server_thread_->StartThread(); - if (res != pink::kSuccess) { + if (res != net::kSuccess) { LOG(FATAL) << "Start Pika Repl Server Thread Error: " << res - << (res == pink::kBindError ? ": bind port " + std::to_string(pika_repl_server_thread_->ListenPort()) + " conflict" : ": create thread error ") - << ", Listen on this port to handle the request sent by the Slave"; + << (res == net::kBindError + ? ": bind port " + std::to_string(pika_repl_server_thread_->ListenPort()) + " conflict" + : ": create thread error ") + << ", Listen on this port to handle the request sent by the Slave"; } res = server_tp_->start_thread_pool(); - if (res != pink::kSuccess) { - LOG(FATAL) << "Start ThreadPool Error: " << res << (res == pink::kCreateThreadError ? ": create thread error " : ": other error"); + if (res != net::kSuccess) { + LOG(FATAL) << "Start ThreadPool Error: " << res + << (res == net::kCreateThreadError ? ": create thread error " : ": other error"); } return res; } @@ -48,46 +47,75 @@ int PikaReplServer::Start() { int PikaReplServer::Stop() { server_tp_->stop_thread_pool(); pika_repl_server_thread_->StopThread(); + pika_repl_server_thread_->Cleanup(); return 0; } -slash::Status PikaReplServer::SendSlaveBinlogChips(const std::string& ip, - int port, - const std::vector& tasks) { +pstd::Status PikaReplServer::SendSlaveBinlogChips(const std::string& ip, int port, + const std::vector& tasks) { InnerMessage::InnerResponse response; - response.set_code(InnerMessage::kOk); - response.set_type(InnerMessage::Type::kBinlogSync); - for (const auto task :tasks) { - InnerMessage::InnerResponse::BinlogSync* binlog_sync = response.add_binlog_sync(); - binlog_sync->set_session_id(task.rm_node_.SessionId()); - InnerMessage::Partition* partition = binlog_sync->mutable_partition(); - partition->set_table_name(task.rm_node_.TableName()); - partition->set_partition_id(task.rm_node_.PartitionId()); - InnerMessage::BinlogOffset* boffset = binlog_sync->mutable_binlog_offset(); - boffset->set_filenum(task.binlog_chip_.offset_.filenum); - boffset->set_offset(task.binlog_chip_.offset_.offset); - binlog_sync->set_binlog(task.binlog_chip_.binlog_); - } + BuildBinlogSyncResp(tasks, &response); std::string binlog_chip_pb; if (!response.SerializeToString(&binlog_chip_pb)) { return Status::Corruption("Serialized Failed"); } + + if (binlog_chip_pb.size() > static_cast(g_pika_conf->max_conn_rbuf_size())) { + for (const auto& task : tasks) { + InnerMessage::InnerResponse response; + std::vector tmp_tasks; + tmp_tasks.push_back(task); + BuildBinlogSyncResp(tmp_tasks, &response); + if (!response.SerializeToString(&binlog_chip_pb)) { + return Status::Corruption("Serialized Failed"); + } + pstd::Status s = Write(ip, port, binlog_chip_pb); + if (!s.ok()) { + return s; + } + } + return pstd::Status::OK(); + } return Write(ip, port, binlog_chip_pb); } -slash::Status PikaReplServer::Write(const std::string& ip, - const int port, - const std::string& msg) { - slash::RWLock l(&client_conn_rwlock_, false); - const std::string ip_port = slash::IpPortString(ip, port); +void PikaReplServer::BuildBinlogOffset(const LogOffset& offset, InnerMessage::BinlogOffset* boffset) { + boffset->set_filenum(offset.b_offset.filenum); + boffset->set_offset(offset.b_offset.offset); + boffset->set_term(offset.l_offset.term); + boffset->set_index(offset.l_offset.index); +} + +void PikaReplServer::BuildBinlogSyncResp(const std::vector& tasks, InnerMessage::InnerResponse* response) { + response->set_code(InnerMessage::kOk); + response->set_type(InnerMessage::Type::kBinlogSync); + for (const auto& task : tasks) { + InnerMessage::InnerResponse::BinlogSync* binlog_sync = response->add_binlog_sync(); + binlog_sync->set_session_id(task.rm_node_.SessionId()); + InnerMessage::Slot* db = binlog_sync->mutable_slot(); + db->set_db_name(task.rm_node_.DBName()); + /* + * Since the slot field is written in protobuffer, + * slot_id is set to the default value 0 for compatibility + * with older versions, but slot_id is not used + */ + db->set_slot_id(0); + InnerMessage::BinlogOffset* boffset = binlog_sync->mutable_binlog_offset(); + BuildBinlogOffset(task.binlog_chip_.offset_, boffset); + binlog_sync->set_binlog(task.binlog_chip_.binlog_); + } +} + +pstd::Status PikaReplServer::Write(const std::string& ip, const int port, const std::string& msg) { + std::shared_lock l(client_conn_rwlock_); + const std::string ip_port = pstd::IpPortString(ip, port); if (client_conn_map_.find(ip_port) == client_conn_map_.end()) { return Status::NotFound("The " + ip_port + " fd cannot be found"); } int fd = client_conn_map_[ip_port]; - std::shared_ptr conn = - std::dynamic_pointer_cast(pika_repl_server_thread_->get_conn(fd)); - if (conn == nullptr) { + std::shared_ptr conn = std::dynamic_pointer_cast(pika_repl_server_thread_->get_conn(fd)); + if (!conn) { return Status::NotFound("The" + ip_port + " conn cannot be found"); } @@ -99,18 +127,16 @@ slash::Status PikaReplServer::Write(const std::string& ip, return Status::OK(); } -void PikaReplServer::Schedule(pink::TaskFunc func, void* arg){ - server_tp_->Schedule(func, arg); -} +void PikaReplServer::Schedule(net::TaskFunc func, void* arg) { server_tp_->Schedule(func, arg); } void PikaReplServer::UpdateClientConnMap(const std::string& ip_port, int fd) { - slash::RWLock l(&client_conn_rwlock_, true); + std::lock_guard l(client_conn_rwlock_); client_conn_map_[ip_port] = fd; } void PikaReplServer::RemoveClientConn(int fd) { - slash::RWLock l(&client_conn_rwlock_, true); - std::map::const_iterator iter = client_conn_map_.begin(); + std::lock_guard l(client_conn_rwlock_); + auto iter = client_conn_map_.begin(); while (iter != client_conn_map_.end()) { if (iter->second == fd) { iter = client_conn_map_.erase(iter); @@ -120,7 +146,4 @@ void PikaReplServer::RemoveClientConn(int fd) { } } -void PikaReplServer::KillAllConns() { - return pika_repl_server_thread_->KillAllConns(); -} - +void PikaReplServer::KillAllConns() { return pika_repl_server_thread_->KillAllConns(); } diff --git a/tools/pika_migrate/src/pika_repl_server_conn.cc b/tools/pika_migrate/src/pika_repl_server_conn.cc index 85b3273741..41cec0e02f 100644 --- a/tools/pika_migrate/src/pika_repl_server_conn.cc +++ b/tools/pika_migrate/src/pika_repl_server_conn.cc @@ -10,439 +10,455 @@ #include "include/pika_rm.h" #include "include/pika_server.h" +using pstd::Status; extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; +extern std::unique_ptr g_pika_rm; -PikaReplServerConn::PikaReplServerConn(int fd, - std::string ip_port, - pink::Thread* thread, - void* worker_specific_data, pink::PinkEpoll* epoll) - : PbConn(fd, ip_port, thread, epoll) { -} +PikaReplServerConn::PikaReplServerConn(int fd, const std::string& ip_port, net::Thread* thread, void* worker_specific_data, + net::NetMultiplexer* mpx) + : PbConn(fd, ip_port, thread, mpx) {} -PikaReplServerConn::~PikaReplServerConn() { -} +PikaReplServerConn::~PikaReplServerConn() = default; void PikaReplServerConn::HandleMetaSyncRequest(void* arg) { - ReplServerTaskArg* task_arg = static_cast(arg); + std::unique_ptr task_arg(static_cast(arg)); const std::shared_ptr req = task_arg->req; - std::shared_ptr conn = task_arg->conn; + std::shared_ptr conn = task_arg->conn; InnerMessage::InnerRequest::MetaSync meta_sync_request = req->meta_sync(); - InnerMessage::Node node = meta_sync_request.node(); + const InnerMessage::Node& node = meta_sync_request.node(); std::string masterauth = meta_sync_request.has_auth() ? meta_sync_request.auth() : ""; InnerMessage::InnerResponse response; response.set_type(InnerMessage::kMetaSync); - if (!g_pika_conf->requirepass().empty() - && g_pika_conf->requirepass() != masterauth) { + if (!g_pika_conf->requirepass().empty() && g_pika_conf->requirepass() != masterauth) { response.set_code(InnerMessage::kError); response.set_reply("Auth with master error, Invalid masterauth"); } else { - std::vector table_structs = g_pika_conf->table_structs(); - bool success = g_pika_server->TryAddSlave(node.ip(), node.port(), conn->fd(), table_structs); - const std::string ip_port = slash::IpPortString(node.ip(), node.port()); + LOG(INFO) << "Receive MetaSync, Slave ip: " << node.ip() << ", Slave port:" << node.port(); + std::vector db_structs = g_pika_conf->db_structs(); + bool success = g_pika_server->TryAddSlave(node.ip(), node.port(), conn->fd(), db_structs); + const std::string ip_port = pstd::IpPortString(node.ip(), node.port()); g_pika_rm->ReplServerUpdateClientConnMap(ip_port, conn->fd()); if (!success) { - response.set_code(InnerMessage::kError); + response.set_code(InnerMessage::kOther); response.set_reply("Slave AlreadyExist"); } else { g_pika_server->BecomeMaster(); response.set_code(InnerMessage::kOk); InnerMessage::InnerResponse_MetaSync* meta_sync = response.mutable_meta_sync(); + if (g_pika_conf->replication_id() == "") { + std::string replication_id = pstd::getRandomHexChars(configReplicationIDSize); + g_pika_conf->SetReplicationID(replication_id); + g_pika_conf->ConfigRewriteReplicationID(); + } meta_sync->set_classic_mode(g_pika_conf->classic_mode()); - for (const auto& table_struct : table_structs) { - InnerMessage::InnerResponse_MetaSync_TableInfo* table_info = meta_sync->add_tables_info(); - table_info->set_table_name(table_struct.table_name); - table_info->set_partition_num(table_struct.partition_num); + meta_sync->set_run_id(g_pika_conf->run_id()); + meta_sync->set_replication_id(g_pika_conf->replication_id()); + for (const auto& db_struct : db_structs) { + InnerMessage::InnerResponse_MetaSync_DBInfo* db_info = meta_sync->add_dbs_info(); + db_info->set_db_name(db_struct.db_name); + /* + * Since the slot field is written in protobuffer, + * slot_num is set to the default value 1 for compatibility + * with older versions, but slot_num is not used + */ + db_info->set_slot_num(1); + db_info->set_db_instance_num(db_struct.db_instance_num); } } } std::string reply_str; - if (!response.SerializeToString(&reply_str) - || conn->WriteResp(reply_str)) { + if (!response.SerializeToString(&reply_str) || (conn->WriteResp(reply_str) != 0)) { LOG(WARNING) << "Process MetaSync request serialization failed"; conn->NotifyClose(); - delete task_arg; return; } conn->NotifyWrite(); - delete task_arg; } void PikaReplServerConn::HandleTrySyncRequest(void* arg) { - ReplServerTaskArg* task_arg = static_cast(arg); + std::unique_ptr task_arg(static_cast(arg)); const std::shared_ptr req = task_arg->req; - std::shared_ptr conn = task_arg->conn; + std::shared_ptr conn = task_arg->conn; InnerMessage::InnerRequest::TrySync try_sync_request = req->try_sync(); - InnerMessage::Partition partition_request = try_sync_request.partition(); - InnerMessage::BinlogOffset slave_boffset = try_sync_request.binlog_offset(); - InnerMessage::Node node = try_sync_request.node(); + const InnerMessage::Slot& db_request = try_sync_request.slot(); + const InnerMessage::BinlogOffset& slave_boffset = try_sync_request.binlog_offset(); + const InnerMessage::Node& node = try_sync_request.node(); + std::string db_name = db_request.db_name(); InnerMessage::InnerResponse response; InnerMessage::InnerResponse::TrySync* try_sync_response = response.mutable_try_sync(); - InnerMessage::Partition* partition_response = try_sync_response->mutable_partition(); - InnerMessage::BinlogOffset* master_partition_boffset = try_sync_response->mutable_binlog_offset(); - - std::string table_name = partition_request.table_name(); - uint32_t partition_id = partition_request.partition_id(); + try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kError); + InnerMessage::Slot* db_response = try_sync_response->mutable_slot(); + db_response->set_db_name(db_name); + /* + * Since the slot field is written in protobuffer, + * slot_id is set to the default value 0 for compatibility + * with older versions, but slot_id is not used + */ + db_response->set_slot_id(0); bool pre_success = true; response.set_type(InnerMessage::Type::kTrySync); - std::shared_ptr partition = g_pika_server->GetTablePartitionById(table_name, partition_id); - if (!partition) { + std::shared_ptr db = + g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name)); + if (!db) { response.set_code(InnerMessage::kError); - response.set_reply("Partition not found"); - LOG(WARNING) << "Table Name: " << table_name << " Partition ID: " - << partition_id << " Not Found, TrySync Error"; + response.set_reply("DB not found"); + LOG(WARNING) << "DB Name: " << db_name << "Not Found, TrySync Error"; pre_success = false; + } else { + LOG(INFO) << "Receive Trysync, Slave ip: " << node.ip() << ", Slave port:" << node.port() + << ", DB: " << db_name << ", filenum: " << slave_boffset.filenum() + << ", pro_offset: " << slave_boffset.offset(); + response.set_code(InnerMessage::kOk); } - BinlogOffset boffset; - std::string partition_name; - if (pre_success) { - partition_name = partition->GetPartitionName(); - LOG(INFO) << "Receive Trysync, Slave ip: " << node.ip() << ", Slave port:" - << node.port() << ", Partition: " << partition_name << ", filenum: " - << slave_boffset.filenum() << ", pro_offset: " << slave_boffset.offset(); + if (pre_success && TrySyncOffsetCheck(db, try_sync_request, try_sync_response)) { + TrySyncUpdateSlaveNode(db, try_sync_request, conn, try_sync_response); + } - response.set_code(InnerMessage::kOk); - partition_response->set_table_name(table_name); - partition_response->set_partition_id(partition_id); - if (!partition->GetBinlogOffset(&boffset)) { - try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kError); - LOG(WARNING) << "Handle TrySync, Partition: " - << partition_name << " Get binlog offset error, TrySync failed"; - pre_success = false; - } + std::string reply_str; + if (!response.SerializeToString(&reply_str) || (conn->WriteResp(reply_str) != 0)) { + LOG(WARNING) << "Handle Try Sync Failed"; + conn->NotifyClose(); + return; } + conn->NotifyWrite(); +} - if (pre_success) { - master_partition_boffset->set_filenum(boffset.filenum); - master_partition_boffset->set_offset(boffset.offset); - if (boffset.filenum < slave_boffset.filenum() - || (boffset.filenum == slave_boffset.filenum() && boffset.offset < slave_boffset.offset())) { - try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kSyncPointLarger); - LOG(WARNING) << "Slave offset is larger than mine, Slave ip: " - << node.ip() << ", Slave port: " << node.port() << ", Partition: " - << partition_name << ", filenum: " << slave_boffset.filenum() - << ", pro_offset_: " << slave_boffset.offset(); - pre_success = false; +bool PikaReplServerConn::TrySyncUpdateSlaveNode(const std::shared_ptr& db, + const InnerMessage::InnerRequest::TrySync& try_sync_request, + const std::shared_ptr& conn, + InnerMessage::InnerResponse::TrySync* try_sync_response) { + const InnerMessage::Node& node = try_sync_request.node(); + if (!db->CheckSlaveNodeExist(node.ip(), node.port())) { + int32_t session_id = db->GenSessionId(); + if (session_id == -1) { + try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kError); + LOG(WARNING) << "DB: " << db->DBName() << ", Gen Session id Failed"; + return false; } - if (pre_success) { - std::string confile = NewFileName(partition->logger()->filename, slave_boffset.filenum()); - if (!slash::FileExists(confile)) { - LOG(INFO) << "Partition: " << partition_name << " binlog has been purged, may need full sync"; - try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kSyncPointBePurged); - pre_success = false; - } + try_sync_response->set_session_id(session_id); + // incremental sync + Status s = db->AddSlaveNode(node.ip(), node.port(), session_id); + if (!s.ok()) { + try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kError); + LOG(WARNING) << "DB: " << db->DBName() << " TrySync Failed, " << s.ToString(); + return false; } - if (pre_success) { - PikaBinlogReader reader; - reader.Seek(partition->logger(), slave_boffset.filenum(), slave_boffset.offset()); - BinlogOffset seeked_offset; - reader.GetReaderStatus(&(seeked_offset.filenum), &(seeked_offset.offset)); - if (seeked_offset.filenum != slave_boffset.filenum() || seeked_offset.offset != slave_boffset.offset()) { - try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kError); - LOG(WARNING) << "Slave offset is not a start point of cur log, Slave ip: " - << node.ip() << ", Slave port: " << node.port() << ", Partition: " - << partition_name << ", cloest start point, filenum: " << seeked_offset.filenum - << ", offset: " << seeked_offset.offset; - pre_success = false; - } + const std::string ip_port = pstd::IpPortString(node.ip(), node.port()); + g_pika_rm->ReplServerUpdateClientConnMap(ip_port, conn->fd()); + try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kOk); + LOG(INFO) << "DB: " << db->DBName() << " TrySync Success, Session: " << session_id; + } else { + int32_t session_id; + Status s = db->GetSlaveNodeSession(node.ip(), node.port(), &session_id); + if (!s.ok()) { + try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kError); + LOG(WARNING) << "DB: " << db->DBName() << " Get Session id Failed" << s.ToString(); + return false; } + try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kOk); + try_sync_response->set_session_id(session_id); + LOG(INFO) << "DB: " << db->DBName() << " TrySync Success, Session: " << session_id; } + return true; +} - if (pre_success) { - if (!g_pika_rm->CheckPartitionSlaveExist(RmNode(node.ip(), node.port(), table_name, partition_id))) { - int32_t session_id = g_pika_rm->GenPartitionSessionId(table_name, partition_id); - if (session_id != -1) { - try_sync_response->set_session_id(session_id); - // incremental sync - Status s = g_pika_rm->AddPartitionSlave(RmNode(node.ip(), node.port(), table_name, partition_id, session_id)); - if (!s.ok()) { - try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kError); - LOG(WARNING) << "Partition: " << partition_name << " TrySync Failed, " << s.ToString(); - pre_success = false; - } else { - const std::string ip_port = slash::IpPortString(node.ip(), node.port()); - g_pika_rm->ReplServerUpdateClientConnMap(ip_port, conn->fd()); - try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kOk); - LOG(INFO) << "Partition: " << partition_name << " TrySync Success, Session: " << session_id; - } - } else { - try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kError); - LOG(WARNING) << "Partition: " << partition_name << ", Gen Session id Failed"; - pre_success = false; - } - } else { - int32_t session_id; - Status s = g_pika_rm->GetPartitionSlaveSession( - RmNode(node.ip(), node.port(), table_name, partition_id), &session_id); - if (!s.ok()) { - try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kError); - LOG(WARNING) << "Partition: " << partition_name << ", Get Session id Failed" << s.ToString(); - pre_success = false; - } else { - try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kOk); - try_sync_response->set_session_id(session_id); - LOG(INFO) << "Partition: " << partition_name << " TrySync Success, Session: " << session_id; - } - } +bool PikaReplServerConn::TrySyncOffsetCheck(const std::shared_ptr& db, + const InnerMessage::InnerRequest::TrySync& try_sync_request, + InnerMessage::InnerResponse::TrySync* try_sync_response) { + const InnerMessage::Node& node = try_sync_request.node(); + const InnerMessage::BinlogOffset& slave_boffset = try_sync_request.binlog_offset(); + std::string db_name = db->DBName(); + BinlogOffset boffset; + Status s = db->Logger()->GetProducerStatus(&(boffset.filenum), &(boffset.offset)); + if (!s.ok()) { + try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kError); + LOG(WARNING) << "Handle TrySync, DB: " << db_name << " Get binlog offset error, TrySync failed"; + return false; + } + InnerMessage::BinlogOffset* master_db_boffset = try_sync_response->mutable_binlog_offset(); + master_db_boffset->set_filenum(boffset.filenum); + master_db_boffset->set_offset(boffset.offset); + + if (boffset.filenum < slave_boffset.filenum() || + (boffset.filenum == slave_boffset.filenum() && boffset.offset < slave_boffset.offset())) { + try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kSyncPointLarger); + LOG(WARNING) << "Slave offset is larger than mine, Slave ip: " << node.ip() << ", Slave port: " << node.port() + << ", DB: " << db_name << ", slave filenum: " << slave_boffset.filenum() + << ", slave pro_offset_: " << slave_boffset.offset() << ", local filenum: " << boffset.filenum << ", local pro_offset_: " << boffset.offset; + return false; } - std::string reply_str; - if (!response.SerializeToString(&reply_str) - || conn->WriteResp(reply_str)) { - LOG(WARNING) << "Handle Try Sync Failed"; - conn->NotifyClose(); - delete task_arg; - return; + std::string confile = NewFileName(db->Logger()->filename(), slave_boffset.filenum()); + if (!pstd::FileExists(confile)) { + LOG(INFO) << "DB: " << db_name << " binlog has been purged, may need full sync"; + try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kSyncPointBePurged); + return false; } - conn->NotifyWrite(); - delete task_arg; -} + PikaBinlogReader reader; + reader.Seek(db->Logger(), slave_boffset.filenum(), slave_boffset.offset()); + BinlogOffset seeked_offset; + reader.GetReaderStatus(&(seeked_offset.filenum), &(seeked_offset.offset)); + if (seeked_offset.filenum != slave_boffset.filenum() || seeked_offset.offset != slave_boffset.offset()) { + try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kError); + LOG(WARNING) << "Slave offset is not a start point of cur log, Slave ip: " << node.ip() + << ", Slave port: " << node.port() << ", DB: " << db_name << " closest start point, filenum: " + << seeked_offset.filenum << ", offset: " << seeked_offset.offset; + return false; + } + return true; +} void PikaReplServerConn::HandleDBSyncRequest(void* arg) { - ReplServerTaskArg* task_arg = static_cast(arg); + std::unique_ptr task_arg(static_cast(arg)); const std::shared_ptr req = task_arg->req; - std::shared_ptr conn = task_arg->conn; + std::shared_ptr conn = task_arg->conn; InnerMessage::InnerRequest::DBSync db_sync_request = req->db_sync(); - InnerMessage::Partition partition_request = db_sync_request.partition(); - InnerMessage::Node node = db_sync_request.node(); - InnerMessage::BinlogOffset slave_boffset = db_sync_request.binlog_offset(); - std::string table_name = partition_request.table_name(); - uint32_t partition_id = partition_request.partition_id(); - std::string partition_name = table_name + "_" + std::to_string(partition_id); + const InnerMessage::Slot& db_request = db_sync_request.slot(); + const InnerMessage::Node& node = db_sync_request.node(); + const InnerMessage::BinlogOffset& slave_boffset = db_sync_request.binlog_offset(); + std::string db_name = db_request.db_name(); InnerMessage::InnerResponse response; response.set_code(InnerMessage::kOk); response.set_type(InnerMessage::Type::kDBSync); InnerMessage::InnerResponse::DBSync* db_sync_response = response.mutable_db_sync(); - InnerMessage::Partition* partition_response = db_sync_response->mutable_partition(); - partition_response->set_table_name(table_name); - partition_response->set_partition_id(partition_id); - - LOG(INFO) << "Handle partition DBSync Request"; + InnerMessage::Slot* db_response = db_sync_response->mutable_slot(); + db_response->set_db_name(db_name); + /* + * Since the slot field is written in protobuffer, + * slot_id is set to the default value 0 for compatibility + * with older versions, but slot_id is not used + */ + db_response->set_slot_id(0); + + LOG(INFO) << "Handle DBSync Request"; bool prior_success = true; - if (!g_pika_rm->CheckPartitionSlaveExist(RmNode(node.ip(), node.port(), table_name, partition_id))) { - int32_t session_id = g_pika_rm->GenPartitionSessionId(table_name, partition_id); - if (session_id == -1) { - response.set_code(InnerMessage::kError); - LOG(WARNING) << "Partition: " << partition_name << ", Gen Session id Failed"; - prior_success = false; - } - if (prior_success) { + std::shared_ptr master_db = + g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name)); + if (!master_db) { + LOG(WARNING) << "Sync Master DB: " << db_name << ", NotFound"; + prior_success = false; + response.set_code(InnerMessage::kError); + } + if (prior_success) { + if (!master_db->CheckSlaveNodeExist(node.ip(), node.port())) { + int32_t session_id = master_db->GenSessionId(); db_sync_response->set_session_id(session_id); - Status s = g_pika_rm->AddPartitionSlave(RmNode(node.ip(), node.port(), table_name, partition_id, session_id)); - if (s.ok()) { - const std::string ip_port = slash::IpPortString(node.ip(), node.port()); - g_pika_rm->ReplServerUpdateClientConnMap(ip_port, conn->fd()); - LOG(INFO) << "Partition: " << partition_name << " Handle DBSync Request Success, Session: " << session_id; - } else { + if (session_id == -1) { response.set_code(InnerMessage::kError); - LOG(WARNING) << "Partition: " << partition_name << " Handle DBSync Request Failed, " << s.ToString(); - prior_success = false; + LOG(WARNING) << "DB: " << db_name << ", Gen Session id Failed"; + } else { + Status s = master_db->AddSlaveNode(node.ip(), node.port(), session_id); + if (s.ok()) { + const std::string ip_port = pstd::IpPortString(node.ip(), node.port()); + g_pika_rm->ReplServerUpdateClientConnMap(ip_port, conn->fd()); + LOG(INFO) << "DB: " << db_name << " Handle DBSync Request Success, Session: " << session_id; + } else { + response.set_code(InnerMessage::kError); + LOG(WARNING) << "DB: " << db_name << " Handle DBSync Request Failed, " << s.ToString(); + } } } else { - db_sync_response->set_session_id(-1); - } - } else { - int32_t session_id; - Status s = g_pika_rm->GetPartitionSlaveSession( - RmNode(node.ip(), node.port(), table_name, partition_id), &session_id); - if (!s.ok()) { - response.set_code(InnerMessage::kError); - LOG(WARNING) << "Partition: " << partition_name << ", Get Session id Failed" << s.ToString(); - prior_success = false; - db_sync_response->set_session_id(-1); - } else { - db_sync_response->set_session_id(session_id); - LOG(INFO) << "Partition: " << partition_name << " Handle DBSync Request Success, Session: " << session_id; + int32_t session_id = 0; + Status s = master_db->GetSlaveNodeSession(node.ip(), node.port(), &session_id); + if (!s.ok()) { + response.set_code(InnerMessage::kError); + db_sync_response->set_session_id(-1); + LOG(WARNING) << "DB: " << db_name << ", Get Session id Failed" << s.ToString(); + } else { + db_sync_response->set_session_id(session_id); + LOG(INFO) << "DB: " << db_name << " Handle DBSync Request Success, Session: " << session_id; + } } } - g_pika_server->TryDBSync(node.ip(), node.port() + kPortShiftRSync, - table_name, partition_id, slave_boffset.filenum()); + // Change slave node's state to kSlaveDbSync so that the binlog will perserved. + // See details in SyncMasterSlot::BinlogCloudPurge. + master_db->ActivateSlaveDbSync(node.ip(), node.port()); + + g_pika_server->TryDBSync(node.ip(), node.port() + kPortShiftRSync, db_name, + static_cast(slave_boffset.filenum())); std::string reply_str; - if (!response.SerializeToString(&reply_str) - || conn->WriteResp(reply_str)) { + if (!response.SerializeToString(&reply_str) || (conn->WriteResp(reply_str) != 0)) { LOG(WARNING) << "Handle DBSync Failed"; conn->NotifyClose(); - delete task_arg; return; } conn->NotifyWrite(); - delete task_arg; } void PikaReplServerConn::HandleBinlogSyncRequest(void* arg) { - ReplServerTaskArg* task_arg = static_cast(arg); + std::unique_ptr task_arg(static_cast(arg)); const std::shared_ptr req = task_arg->req; - std::shared_ptr conn = task_arg->conn; + std::shared_ptr conn = task_arg->conn; if (!req->has_binlog_sync()) { LOG(WARNING) << "Pb parse error"; - //conn->NotifyClose(); - delete task_arg; return; } const InnerMessage::InnerRequest::BinlogSync& binlog_req = req->binlog_sync(); const InnerMessage::Node& node = binlog_req.node(); - const std::string& table_name = binlog_req.table_name(); - uint32_t partition_id = binlog_req.partition_id(); + const std::string& db_name = binlog_req.db_name(); bool is_first_send = binlog_req.first_send(); int32_t session_id = binlog_req.session_id(); const InnerMessage::BinlogOffset& ack_range_start = binlog_req.ack_range_start(); const InnerMessage::BinlogOffset& ack_range_end = binlog_req.ack_range_end(); - BinlogOffset range_start(ack_range_start.filenum(), ack_range_start.offset()); - BinlogOffset range_end(ack_range_end.filenum(), ack_range_end.offset()); - - if (!g_pika_rm->CheckMasterPartitionSessionId(node.ip(), - node.port(), table_name, partition_id, session_id)) { - LOG(WARNING) << "Check Session failed " << node.ip() << ":" << node.port() - << ", " << table_name << "_" << partition_id; - //conn->NotifyClose(); - delete task_arg; + BinlogOffset b_range_start(ack_range_start.filenum(), ack_range_start.offset()); + BinlogOffset b_range_end(ack_range_end.filenum(), ack_range_end.offset()); + LogicOffset l_range_start(ack_range_start.term(), ack_range_start.index()); + LogicOffset l_range_end(ack_range_end.term(), ack_range_end.index()); + LogOffset range_start(b_range_start, l_range_start); + LogOffset range_end(b_range_end, l_range_end); + + std::shared_ptr master_db = + g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name)); + if (!master_db) { + LOG(WARNING) << "Sync Master DB: " << db_name << ", NotFound"; + return; + } + + if (!master_db->CheckSessionId(node.ip(), node.port(), db_name, session_id)) { + LOG(WARNING) << "Check Session failed " << node.ip() << ":" << node.port() << ", " << db_name; return; } // Set ack info from slave - RmNode slave_node = RmNode(node.ip(), node.port(), table_name, partition_id); + RmNode slave_node = RmNode(node.ip(), node.port(), db_name); - Status s = g_pika_rm->SetMasterLastRecvTime(slave_node, slash::NowMicros()); + Status s = master_db->SetLastRecvTime(node.ip(), node.port(), pstd::NowMicros()); if (!s.ok()) { - LOG(WARNING) << "SetMasterLastRecvTime failed " << node.ip() << ":" << node.port() - << ", " << table_name << "_" << partition_id << " " << s.ToString(); + LOG(WARNING) << "SetMasterLastRecvTime failed " << node.ip() << ":" << node.port() << ", " << db_name << " " << s.ToString(); conn->NotifyClose(); - delete task_arg; return; } if (is_first_send) { - if (!(range_start == range_end)) { + if (range_start.b_offset != range_end.b_offset) { LOG(WARNING) << "first binlogsync request pb argument invalid"; conn->NotifyClose(); - delete task_arg; return; } - Status s = g_pika_rm->ActivateBinlogSync(slave_node, range_start); + + Status s = master_db->ActivateSlaveBinlogSync(node.ip(), node.port(), range_start); if (!s.ok()) { LOG(WARNING) << "Activate Binlog Sync failed " << slave_node.ToString() << " " << s.ToString(); conn->NotifyClose(); - delete task_arg; return; } - delete task_arg; return; } // not the first_send the range_ack cant be 0 // set this case as ping - if (range_start == BinlogOffset() && range_end == BinlogOffset()) { - delete task_arg; + if (range_start.b_offset == BinlogOffset() && range_end.b_offset == BinlogOffset()) { return; } s = g_pika_rm->UpdateSyncBinlogStatus(slave_node, range_start, range_end); if (!s.ok()) { - LOG(WARNING) << "Update binlog ack failed " << table_name << " " << partition_id << " " << s.ToString(); + LOG(WARNING) << "Update binlog ack failed " << db_name << " " << s.ToString(); conn->NotifyClose(); - delete task_arg; return; } - delete task_arg; + g_pika_server->SignalAuxiliary(); - return; } void PikaReplServerConn::HandleRemoveSlaveNodeRequest(void* arg) { - ReplServerTaskArg* task_arg = static_cast(arg); + std::unique_ptr task_arg(static_cast(arg)); const std::shared_ptr req = task_arg->req; - std::shared_ptr conn = task_arg->conn; - if (!req->remove_slave_node_size()) { + std::shared_ptr conn = task_arg->conn; + if (req->remove_slave_node_size() == 0) { LOG(WARNING) << "Pb parse error"; conn->NotifyClose(); - delete task_arg; return; } const InnerMessage::InnerRequest::RemoveSlaveNode& remove_slave_node_req = req->remove_slave_node(0); const InnerMessage::Node& node = remove_slave_node_req.node(); - const InnerMessage::Partition& partition = remove_slave_node_req.partition(); + const InnerMessage::Slot& slot = remove_slave_node_req.slot(); - std::string table_name = partition.table_name(); - uint32_t partition_id = partition.partition_id(); - Status s = g_pika_rm->RemovePartitionSlave(RmNode(node.ip(), - node.port(), table_name, partition_id)); + std::string db_name = slot.db_name(); + std::shared_ptr master_db = + g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name)); + if (!master_db) { + LOG(WARNING) << "Sync Master DB: " << db_name << ", NotFound"; + } + Status s = master_db->RemoveSlaveNode(node.ip(), node.port()); InnerMessage::InnerResponse response; response.set_code(InnerMessage::kOk); response.set_type(InnerMessage::Type::kRemoveSlaveNode); InnerMessage::InnerResponse::RemoveSlaveNode* remove_slave_node_response = response.add_remove_slave_node(); - InnerMessage::Partition* partition_response = remove_slave_node_response->mutable_partition(); - partition_response->set_table_name(table_name); - partition_response->set_partition_id(partition_id); + InnerMessage::Slot* db_response = remove_slave_node_response->mutable_slot (); + db_response->set_db_name(db_name); + /* + * Since the slot field is written in protobuffer, + * slot_id is set to the default value 0 for compatibility + * with older versions, but slot_id is not used + */ + db_response->set_slot_id(0); InnerMessage::Node* node_response = remove_slave_node_response->mutable_node(); node_response->set_ip(g_pika_server->host()); node_response->set_port(g_pika_server->port()); std::string reply_str; - if (!response.SerializeToString(&reply_str) - || conn->WriteResp(reply_str)) { + if (!response.SerializeToString(&reply_str) || (conn->WriteResp(reply_str) != 0)) { LOG(WARNING) << "Remove Slave Node Failed"; conn->NotifyClose(); - delete task_arg; return; } conn->NotifyWrite(); - delete task_arg; } int PikaReplServerConn::DealMessage() { std::shared_ptr req = std::make_shared(); - bool parse_res = req->ParseFromArray(rbuf_ + cur_pos_ - header_len_, header_len_); + bool parse_res = req->ParseFromArray(rbuf_ + cur_pos_ - header_len_, static_cast(header_len_)); if (!parse_res) { LOG(WARNING) << "Pika repl server connection pb parse error."; return -1; } - int res = 0; switch (req->type()) { - case InnerMessage::kMetaSync: - { - ReplServerTaskArg* task_arg = new ReplServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); + case InnerMessage::kMetaSync: { + auto task_arg = + new ReplServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); g_pika_rm->ScheduleReplServerBGTask(&PikaReplServerConn::HandleMetaSyncRequest, task_arg); break; } - case InnerMessage::kTrySync: - { - ReplServerTaskArg* task_arg = new ReplServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); + case InnerMessage::kTrySync: { + auto task_arg = + new ReplServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); g_pika_rm->ScheduleReplServerBGTask(&PikaReplServerConn::HandleTrySyncRequest, task_arg); break; } - case InnerMessage::kDBSync: - { - ReplServerTaskArg* task_arg = new ReplServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); + case InnerMessage::kDBSync: { + auto task_arg = + new ReplServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); g_pika_rm->ScheduleReplServerBGTask(&PikaReplServerConn::HandleDBSyncRequest, task_arg); break; } - case InnerMessage::kBinlogSync: - { - ReplServerTaskArg* task_arg = new ReplServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); + case InnerMessage::kBinlogSync: { + auto task_arg = + new ReplServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); g_pika_rm->ScheduleReplServerBGTask(&PikaReplServerConn::HandleBinlogSyncRequest, task_arg); break; } - case InnerMessage::kRemoveSlaveNode: - { - ReplServerTaskArg* task_arg = new ReplServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); + case InnerMessage::kRemoveSlaveNode: { + auto task_arg = + new ReplServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); g_pika_rm->ScheduleReplServerBGTask(&PikaReplServerConn::HandleRemoveSlaveNodeRequest, task_arg); break; } default: break; } - return res; + return 0; } diff --git a/tools/pika_migrate/src/pika_repl_server_thread.cc b/tools/pika_migrate/src/pika_repl_server_thread.cc index edc33f8fd0..590ba02f7f 100644 --- a/tools/pika_migrate/src/pika_repl_server_thread.cc +++ b/tools/pika_migrate/src/pika_repl_server_thread.cc @@ -9,21 +9,16 @@ #include "include/pika_server.h" extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; +extern std::unique_ptr g_pika_rm; -PikaReplServerThread::PikaReplServerThread(const std::set& ips, - int port, - int cron_interval) : - HolyThread(ips, port, &conn_factory_, cron_interval, &handle_, true), - conn_factory_(this), - port_(port), - serial_(0) { +PikaReplServerThread::PikaReplServerThread(const std::set& ips, int port, int cron_interval) + : HolyThread(ips, port, &conn_factory_, cron_interval, &handle_, true), + conn_factory_(this), + port_(port) { set_keepalive_timeout(180); } -int PikaReplServerThread::ListenPort() { - return port_; -} +int PikaReplServerThread::ListenPort() { return port_; } void PikaReplServerThread::ReplServerHandle::FdClosedHandle(int fd, const std::string& ip_port) const { LOG(INFO) << "ServerThread Close Slave Conn, fd: " << fd << ", ip_port: " << ip_port; diff --git a/tools/pika_migrate/src/pika_rm.cc b/tools/pika_migrate/src/pika_rm.cc index 2c240ba9a4..9df7b82101 100644 --- a/tools/pika_migrate/src/pika_rm.cc +++ b/tools/pika_migrate/src/pika_rm.cc @@ -3,214 +3,106 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#include "set" +#include "include/pika_rm.h" -#include -#include #include #include +#include +#include -#include "pink/include/pink_cli.h" +#include + +#include "net/include/net_cli.h" -#include "include/pika_rm.h" #include "include/pika_conf.h" #include "include/pika_server.h" -#include "include/pika_repl_client.h" -#include "include/pika_repl_server.h" +#include "include/pika_admin.h" +#include "include/pika_command.h" -extern PikaConf *g_pika_conf; -extern PikaReplicaManager* g_pika_rm; -extern PikaServer *g_pika_server; - -/* BinlogReaderManager */ - -BinlogReaderManager::~BinlogReaderManager() { -} - -Status BinlogReaderManager::FetchBinlogReader(const RmNode& rm_node, std::shared_ptr* reader) { - slash::MutexLock l(&reader_mu_); - if (occupied_.find(rm_node) != occupied_.end()) { - return Status::Corruption(rm_node.ToString() + " exist"); - } - if (vacant_.empty()) { - *reader = std::make_shared(); - } else { - *reader = *(vacant_.begin()); - vacant_.erase(vacant_.begin()); - } - occupied_[rm_node] = *reader; - return Status::OK(); -} - -Status BinlogReaderManager::ReleaseBinlogReader(const RmNode& rm_node) { - slash::MutexLock l(&reader_mu_); - if (occupied_.find(rm_node) == occupied_.end()) { - return Status::NotFound(rm_node.ToString()); - } - std::shared_ptr reader = occupied_[rm_node]; - occupied_.erase(rm_node); - vacant_.push_back(reader); - return Status::OK(); -} +using pstd::Status; -/* SlaveNode */ +extern std::unique_ptr g_pika_rm; +extern PikaServer* g_pika_server; -SlaveNode::SlaveNode(const std::string& ip, int port, - const std::string& table_name, - uint32_t partition_id, int session_id) - : RmNode(ip, port, table_name, partition_id, session_id), - slave_state(kSlaveNotSync), - b_state(kNotSync), sent_offset(), acked_offset() { -} +/* SyncDB */ -SlaveNode::~SlaveNode() { - if (b_state == kReadFromFile && binlog_reader != nullptr) { - RmNode rm_node(Ip(), Port(), TableName(), PartitionId()); - ReleaseBinlogFileReader(); - } -} +SyncDB::SyncDB(const std::string& db_name) + : db_info_(db_name) {} -Status SlaveNode::InitBinlogFileReader(const std::shared_ptr& binlog, - const BinlogOffset& offset) { - Status s = g_pika_rm->binlog_reader_mgr.FetchBinlogReader( - RmNode(Ip(), Port(), NodePartitionInfo()), &binlog_reader); - if (!s.ok()) { - return s; - } - int res = binlog_reader->Seek(binlog, offset.filenum, offset.offset); - if (res) { - g_pika_rm->binlog_reader_mgr.ReleaseBinlogReader( - RmNode(Ip(), Port(), NodePartitionInfo())); - return Status::Corruption(ToString() + " binlog reader init failed"); - } - return Status::OK(); +std::string SyncDB::DBName() { + return db_info_.db_name_; } -void SlaveNode::ReleaseBinlogFileReader() { - g_pika_rm->binlog_reader_mgr.ReleaseBinlogReader( - RmNode(Ip(), Port(), NodePartitionInfo())); - binlog_reader = nullptr; -} +/* SyncMasterDB*/ -std::string SlaveNode::ToStringStatus() { - std::stringstream tmp_stream; - tmp_stream << " Slave_state: " << SlaveStateMsg[slave_state] << "\r\n"; - tmp_stream << " Binlog_sync_state: " << BinlogSyncStateMsg[b_state] << "\r\n"; - tmp_stream << " Sync_window: " << "\r\n" << sync_win.ToStringStatus(); - tmp_stream << " Sent_offset: " << sent_offset.ToString() << "\r\n"; - tmp_stream << " Acked_offset: " << acked_offset.ToString() << "\r\n"; - tmp_stream << " Binlog_reader activated: " << (binlog_reader != nullptr) << "\r\n"; - return tmp_stream.str(); -} +SyncMasterDB::SyncMasterDB(const std::string& db_name) + : SyncDB(db_name), coordinator_(db_name) {} -/* SyncPartition */ +int SyncMasterDB::GetNumberOfSlaveNode() { return coordinator_.SyncPros().SlaveSize(); } -SyncPartition::SyncPartition(const std::string& table_name, uint32_t partition_id) - : partition_info_(table_name, partition_id) { +bool SyncMasterDB::CheckSlaveNodeExist(const std::string& ip, int port) { + std::shared_ptr slave_ptr = GetSlaveNode(ip, port); + return static_cast(slave_ptr); } -/* SyncMasterPartition*/ - -SyncMasterPartition::SyncMasterPartition(const std::string& table_name, uint32_t partition_id) - : SyncPartition(table_name, partition_id), - session_id_(0) {} - -bool SyncMasterPartition::CheckReadBinlogFromCache() { - return false; -} +Status SyncMasterDB::GetSlaveNodeSession(const std::string& ip, int port, int32_t* session) { + std::shared_ptr slave_ptr = GetSlaveNode(ip, port); + if (!slave_ptr) { + return Status::NotFound("slave " + ip + ":" + std::to_string(port) + " not found"); + } -int SyncMasterPartition::GetNumberOfSlaveNode() { - slash::MutexLock l(&partition_mu_); - return slaves_.size(); -} + slave_ptr->Lock(); + *session = slave_ptr->SessionId(); + slave_ptr->Unlock(); -bool SyncMasterPartition::CheckSlaveNodeExist(const std::string& ip, int port) { - slash::MutexLock l(&partition_mu_); - for (auto& slave : slaves_) { - if (ip == slave->Ip() && port == slave->Port()) { - return true; - } - } - return false; + return Status::OK(); } -Status SyncMasterPartition::GetSlaveNodeSession( - const std::string& ip, int port, int32_t* session) { - slash::MutexLock l(&partition_mu_); - for (auto& slave : slaves_) { - if (ip == slave->Ip() && port == slave->Port()) { - *session = slave->SessionId(); - return Status::OK(); - } +Status SyncMasterDB::AddSlaveNode(const std::string& ip, int port, int session_id) { + Status s = coordinator_.AddSlaveNode(ip, port, session_id); + if (!s.ok()) { + LOG(WARNING) << "Add Slave Node Failed, db: " << SyncDBInfo().ToString() << ", ip_port: " << ip << ":" + << port; + return s; } - return Status::NotFound("slave " + ip + ":" + std::to_string(port) + " not found"); + LOG(INFO) << "Add Slave Node, db: " << SyncDBInfo().ToString() << ", ip_port: " << ip << ":" << port; + return Status::OK(); } -Status SyncMasterPartition::AddSlaveNode(const std::string& ip, int port, int session_id) { - slash::MutexLock l(&partition_mu_); - for (auto& slave : slaves_) { - if (ip == slave->Ip() && port == slave->Port()) { - slave->SetSessionId(session_id); - return Status::OK(); - } +Status SyncMasterDB::RemoveSlaveNode(const std::string& ip, int port) { + Status s = coordinator_.RemoveSlaveNode(ip, port); + if (!s.ok()) { + LOG(WARNING) << "Remove Slave Node Failed, db: " << SyncDBInfo().ToString() << ", ip_port: " << ip + << ":" << port; + return s; } - std::shared_ptr slave_ptr = - std::make_shared(ip, port, SyncPartitionInfo().table_name_, SyncPartitionInfo().partition_id_, session_id); - slave_ptr->SetLastSendTime(slash::NowMicros()); - slave_ptr->SetLastRecvTime(slash::NowMicros()); - slaves_.push_back(slave_ptr); - LOG(INFO) << "Add Slave Node, partition: " << SyncPartitionInfo().ToString() << ", ip_port: "<< ip << ":" << port; + LOG(INFO) << "Remove Slave Node, DB: " << SyncDBInfo().ToString() << ", ip_port: " << ip << ":" << port; return Status::OK(); } -Status SyncMasterPartition::RemoveSlaveNode(const std::string& ip, int port) { - slash::MutexLock l(&partition_mu_); - for (size_t i = 0; i < slaves_.size(); ++i) { - std::shared_ptr slave = slaves_[i]; - if (ip == slave->Ip() && port == slave->Port()) { - slaves_.erase(slaves_.begin() + i); - LOG(INFO) << "Remove Slave Node, Partition: " << SyncPartitionInfo().ToString() - << ", ip_port: "<< ip << ":" << port; - return Status::OK(); - } +Status SyncMasterDB::ActivateSlaveBinlogSync(const std::string& ip, int port, const LogOffset& offset) { + std::shared_ptr slave_ptr = GetSlaveNode(ip, port); + if (!slave_ptr) { + return Status::NotFound("ip " + ip + " port " + std::to_string(port)); } - return Status::NotFound("RemoveSlaveNode" + ip + std::to_string(port)); -} -Status SyncMasterPartition::ActivateSlaveBinlogSync(const std::string& ip, - int port, - const std::shared_ptr binlog, - const BinlogOffset& offset) { { - slash::MutexLock l(&partition_mu_); - std::shared_ptr slave_ptr = nullptr; - Status s = GetSlaveNode(ip, port, &slave_ptr); - if (!s.ok()) { - return s; - } - bool read_cache = CheckReadBinlogFromCache(); - - slave_ptr->Lock(); - slave_ptr->slave_state = kSlaveBinlogSync; - slave_ptr->sent_offset = offset; - slave_ptr->acked_offset = offset; - if (read_cache) { - slave_ptr->Unlock(); - // RegistToBinlogCacheWindow(ip, port, offset); - slave_ptr->Lock(); - slave_ptr->b_state = kReadFromCache; - } else { + std::lock_guard l(slave_ptr->slave_mu); + slave_ptr->slave_state = kSlaveBinlogSync; + slave_ptr->sent_offset = offset; + slave_ptr->acked_offset = offset; // read binlog file from file - s = slave_ptr->InitBinlogFileReader(binlog, offset); + Status s = slave_ptr->InitBinlogFileReader(Logger(), offset.b_offset); if (!s.ok()) { - slave_ptr->Unlock(); return Status::Corruption("Init binlog file reader failed" + s.ToString()); } + //Since we init a new reader, we should drop items in write queue and reset sync_window. + //Or the sent_offset and acked_offset will not match + g_pika_rm->DropItemInOneWriteQueue(ip, port, slave_ptr->DBName()); + slave_ptr->sync_win.Reset(); slave_ptr->b_state = kReadFromFile; } - slave_ptr->Unlock(); - } Status s = SyncBinlogToWq(ip, port); if (!s.ok()) { @@ -219,280 +111,197 @@ Status SyncMasterPartition::ActivateSlaveBinlogSync(const std::string& ip, return Status::OK(); } -Status SyncMasterPartition::SyncBinlogToWq(const std::string& ip, int port) { - slash::MutexLock l(&partition_mu_); - std::shared_ptr slave_ptr = nullptr; - Status s = GetSlaveNode(ip, port, &slave_ptr); +Status SyncMasterDB::SyncBinlogToWq(const std::string& ip, int port) { + std::shared_ptr slave_ptr = GetSlaveNode(ip, port); + if (!slave_ptr) { + return Status::NotFound("ip " + ip + " port " + std::to_string(port)); + } + Status s; + slave_ptr->Lock(); + s = ReadBinlogFileToWq(slave_ptr); + slave_ptr->Unlock(); if (!s.ok()) { return s; } - - { - slash::MutexLock l(&slave_ptr->slave_mu); - if (slave_ptr->b_state == kReadFromFile) { - ReadBinlogFileToWq(slave_ptr); - } else if (slave_ptr->b_state == kReadFromCache) { - ReadCachedBinlogToWq(slave_ptr); - } - } return Status::OK(); } -Status SyncMasterPartition::ActivateSlaveDbSync(const std::string& ip, int port) { - slash::MutexLock l(&partition_mu_); - std::shared_ptr slave_ptr = nullptr; - Status s = GetSlaveNode(ip, port, &slave_ptr); - if (!s.ok()) { - return s; +Status SyncMasterDB::ActivateSlaveDbSync(const std::string& ip, int port) { + std::shared_ptr slave_ptr = GetSlaveNode(ip, port); + if (!slave_ptr) { + return Status::NotFound("ip " + ip + " port " + std::to_string(port)); } - { - slash::MutexLock l(&slave_ptr->slave_mu); + slave_ptr->Lock(); slave_ptr->slave_state = kSlaveDbSync; // invoke db sync - } - return Status::OK(); -} + slave_ptr->Unlock(); -Status SyncMasterPartition::ReadCachedBinlogToWq(const std::shared_ptr& slave_ptr) { return Status::OK(); } -Status SyncMasterPartition::ReadBinlogFileToWq(const std::shared_ptr& slave_ptr) { - int cnt = slave_ptr->sync_win.Remainings(); +Status SyncMasterDB::ReadBinlogFileToWq(const std::shared_ptr& slave_ptr) { + int cnt = slave_ptr->sync_win.Remaining(); std::shared_ptr reader = slave_ptr->binlog_reader; + if (!reader) { + return Status::OK(); + } std::vector tasks; for (int i = 0; i < cnt; ++i) { std::string msg; uint32_t filenum; uint64_t offset; + if (slave_ptr->sync_win.GetTotalBinlogSize() > PIKA_MAX_CONN_RBUF_HB * 2) { + LOG(INFO) << slave_ptr->ToString() + << " total binlog size in sync window is :" << slave_ptr->sync_win.GetTotalBinlogSize(); + break; + } Status s = reader->Get(&msg, &filenum, &offset); if (s.IsEndFile()) { break; } else if (s.IsCorruption() || s.IsIOError()) { - LOG(WARNING) << SyncPartitionInfo().ToString() - << " Read Binlog error : " << s.ToString(); + LOG(WARNING) << SyncDBInfo().ToString() << " Read Binlog error : " << s.ToString(); return s; } - slave_ptr->sync_win.Push(SyncWinItem(filenum, offset)); - - BinlogOffset sent_offset = BinlogOffset(filenum, offset); - slave_ptr->sent_offset = sent_offset; - slave_ptr->SetLastSendTime(slash::NowMicros()); - RmNode rm_node(slave_ptr->Ip(), slave_ptr->Port(), slave_ptr->TableName(), slave_ptr->PartitionId(), slave_ptr->SessionId()); - WriteTask task(rm_node, BinlogChip(sent_offset, msg)); + BinlogItem item; + if (!PikaBinlogTransverter::BinlogItemWithoutContentDecode(TypeFirst, msg, &item)) { + LOG(WARNING) << "Binlog item decode failed"; + return Status::Corruption("Binlog item decode failed"); + } + BinlogOffset sent_b_offset = BinlogOffset(filenum, offset); + LogicOffset sent_l_offset = LogicOffset(item.term_id(), item.logic_id()); + LogOffset sent_offset(sent_b_offset, sent_l_offset); + + slave_ptr->sync_win.Push(SyncWinItem(sent_offset, msg.size())); + slave_ptr->SetLastSendTime(pstd::NowMicros()); + RmNode rm_node(slave_ptr->Ip(), slave_ptr->Port(), slave_ptr->DBName(), slave_ptr->SessionId()); + WriteTask task(rm_node, BinlogChip(sent_offset, msg), slave_ptr->sent_offset); tasks.push_back(task); + slave_ptr->sent_offset = sent_offset; } if (!tasks.empty()) { - g_pika_rm->ProduceWriteQueue(slave_ptr->Ip(), slave_ptr->Port(), tasks); + g_pika_rm->ProduceWriteQueue(slave_ptr->Ip(), slave_ptr->Port(), db_info_.db_name_, tasks); } return Status::OK(); } -Status SyncMasterPartition::GetSlaveNode(const std::string& ip, int port, std::shared_ptr* slave_node) { - for (size_t i = 0; i < slaves_.size(); ++i) { - std::shared_ptr tmp_slave = slaves_[i]; - if (ip == tmp_slave->Ip() && port == tmp_slave->Port()) { - *slave_node = tmp_slave; - return Status::OK(); - } - } - return Status::NotFound("ip " + ip + " port " + std::to_string(port)); -} - -Status SyncMasterPartition::UpdateSlaveBinlogAckInfo(const std::string& ip, int port, const BinlogOffset& start, const BinlogOffset& end) { - slash::MutexLock l(&partition_mu_); - std::shared_ptr slave_ptr = nullptr; - Status s = GetSlaveNode(ip, port, &slave_ptr); +Status SyncMasterDB::ConsensusUpdateSlave(const std::string& ip, int port, const LogOffset& start, const LogOffset& end) { + Status s = coordinator_.UpdateSlave(ip, port, start, end); if (!s.ok()) { + LOG(WARNING) << SyncDBInfo().ToString() << s.ToString(); return s; } - - { - slash::MutexLock l(&slave_ptr->slave_mu); - if (slave_ptr->slave_state != kSlaveBinlogSync) { - return Status::Corruption(ip + std::to_string(port) + "state not BinlogSync"); - } - bool res = slave_ptr->sync_win.Update(SyncWinItem(start), SyncWinItem(end), &(slave_ptr->acked_offset)); - if (!res) { - return Status::Corruption("UpdateAckedInfo failed"); - } - } return Status::OK(); } -Status SyncMasterPartition::GetSlaveSyncBinlogInfo(const std::string& ip, - int port, - BinlogOffset* sent_offset, - BinlogOffset* acked_offset) { - slash::MutexLock l(&partition_mu_); - std::shared_ptr slave_ptr = nullptr; - Status s = GetSlaveNode(ip, port, &slave_ptr); - if (!s.ok()) { - return s; +Status SyncMasterDB::GetSlaveSyncBinlogInfo(const std::string& ip, int port, BinlogOffset* sent_offset, + BinlogOffset* acked_offset) { + std::shared_ptr slave_ptr = GetSlaveNode(ip, port); + if (!slave_ptr) { + return Status::NotFound("ip " + ip + " port " + std::to_string(port)); } - { - slash::MutexLock l(&slave_ptr->slave_mu); - *sent_offset = slave_ptr->sent_offset; - *acked_offset = slave_ptr->acked_offset; - } + slave_ptr->Lock(); + *sent_offset = slave_ptr->sent_offset.b_offset; + *acked_offset = slave_ptr->acked_offset.b_offset; + slave_ptr->Unlock(); + return Status::OK(); } -Status SyncMasterPartition::GetSlaveState(const std::string& ip, - int port, - SlaveState* const slave_state) { - slash::MutexLock l(&partition_mu_); - std::shared_ptr slave_ptr = nullptr; - Status s = GetSlaveNode(ip, port, &slave_ptr); - if (!s.ok()) { - return s; +Status SyncMasterDB::GetSlaveState(const std::string& ip, int port, SlaveState* const slave_state) { + std::shared_ptr slave_ptr = GetSlaveNode(ip, port); + if (!slave_ptr) { + return Status::NotFound("ip " + ip + " port " + std::to_string(port)); } - { - slash::MutexLock l(&slave_ptr->slave_mu); + slave_ptr->Lock(); *slave_state = slave_ptr->slave_state; - } + slave_ptr->Unlock(); + return Status::OK(); } -Status SyncMasterPartition::WakeUpSlaveBinlogSync() { - slash::MutexLock l(&partition_mu_); - for (auto& slave_ptr : slaves_) { - { - slash::MutexLock l(&slave_ptr->slave_mu); +Status SyncMasterDB::WakeUpSlaveBinlogSync() { + std::unordered_map> slaves = GetAllSlaveNodes(); + std::vector> to_del; + for (auto& slave_iter : slaves) { + std::shared_ptr slave_ptr = slave_iter.second; + std::lock_guard l(slave_ptr->slave_mu); if (slave_ptr->sent_offset == slave_ptr->acked_offset) { - if (slave_ptr->b_state == kReadFromFile) { - ReadBinlogFileToWq(slave_ptr); - } else if (slave_ptr->b_state == kReadFromCache) { - ReadCachedBinlogToWq(slave_ptr); + Status s = ReadBinlogFileToWq(slave_ptr); + if (!s.ok()) { + to_del.push_back(slave_ptr); + LOG(WARNING) << "WakeUpSlaveBinlogSync falied, Delete from RM, slave: " << slave_ptr->ToStringStatus() << " " + << s.ToString(); } } - } } - return Status::OK(); -} - -Status SyncMasterPartition::SetLastSendTime(const std::string& ip, int port, uint64_t time) { - slash::MutexLock l(&partition_mu_); - - std::shared_ptr slave_ptr = nullptr; - Status s = GetSlaveNode(ip, port, &slave_ptr); - if (!s.ok()) { - return s; - } - - { - slash::MutexLock l(&slave_ptr->slave_mu); - slave_ptr->SetLastSendTime(time); + for (auto& to_del_slave : to_del) { + RemoveSlaveNode(to_del_slave->Ip(), to_del_slave->Port()); } - return Status::OK(); } -Status SyncMasterPartition::GetLastSendTime(const std::string& ip, int port, uint64_t* time) { - slash::MutexLock l(&partition_mu_); - - std::shared_ptr slave_ptr = nullptr; - Status s = GetSlaveNode(ip, port, &slave_ptr); - if (!s.ok()) { - return s; +Status SyncMasterDB::SetLastRecvTime(const std::string& ip, int port, uint64_t time) { + std::shared_ptr slave_ptr = GetSlaveNode(ip, port); + if (!slave_ptr) { + return Status::NotFound("ip " + ip + " port " + std::to_string(port)); } - { - slash::MutexLock l(&slave_ptr->slave_mu); - *time = slave_ptr->LastSendTime(); - } - - return Status::OK(); -} - -Status SyncMasterPartition::SetLastRecvTime(const std::string& ip, int port, uint64_t time) { - slash::MutexLock l(&partition_mu_); - - std::shared_ptr slave_ptr = nullptr; - Status s = GetSlaveNode(ip, port, &slave_ptr); - if (!s.ok()) { - return s; - } - - { - slash::MutexLock l(&slave_ptr->slave_mu); + slave_ptr->Lock(); slave_ptr->SetLastRecvTime(time); - } + slave_ptr->Unlock(); return Status::OK(); } -Status SyncMasterPartition::GetLastRecvTime(const std::string& ip, int port, uint64_t* time) { - slash::MutexLock l(&partition_mu_); - - std::shared_ptr slave_ptr = nullptr; - Status s = GetSlaveNode(ip, port, &slave_ptr); +Status SyncMasterDB::GetSafetyPurgeBinlog(std::string* safety_purge) { + BinlogOffset boffset; + Status s = Logger()->GetProducerStatus(&(boffset.filenum), &(boffset.offset)); if (!s.ok()) { return s; } - - { - slash::MutexLock l(&slave_ptr->slave_mu); - *time = slave_ptr->LastRecvTime(); - } - - return Status::OK(); -} - -Status SyncMasterPartition::GetSafetyPurgeBinlog(std::string* safety_purge) { - BinlogOffset boffset; - std::string table_name = partition_info_.table_name_; - uint32_t partition_id = partition_info_.partition_id_; - std::shared_ptr partition = - g_pika_server->GetTablePartitionById(table_name, partition_id); - if (!partition || !partition->GetBinlogOffset(&boffset)) { - return Status::NotFound("Partition NotFound"); - } else { - bool success = false; - uint32_t purge_max = boffset.filenum; - if (purge_max >= 10) { - success = true; - purge_max -= 10; - slash::MutexLock l(&partition_mu_); - for (const auto& slave : slaves_) { - if (slave->slave_state == SlaveState::kSlaveBinlogSync - && slave->acked_offset.filenum > 0) { - purge_max = std::min(slave->acked_offset.filenum - 1, purge_max); - } else { - success = false; - break; - } + bool success = false; + uint32_t purge_max = boffset.filenum; + if (purge_max >= 10) { + success = true; + purge_max -= 10; + std::unordered_map> slaves = GetAllSlaveNodes(); + for (const auto& slave_iter : slaves) { + std::shared_ptr slave_ptr = slave_iter.second; + std::lock_guard l(slave_ptr->slave_mu); + if (slave_ptr->slave_state == SlaveState::kSlaveBinlogSync && slave_ptr->acked_offset.b_offset.filenum > 0) { + purge_max = std::min(slave_ptr->acked_offset.b_offset.filenum - 1, purge_max); + } else { + success = false; + break; } } - *safety_purge = (success ? kBinlogPrefix + std::to_string(static_cast(purge_max)) : "none"); } + *safety_purge = (success ? kBinlogPrefix + std::to_string(static_cast(purge_max)) : "none"); return Status::OK(); } -bool SyncMasterPartition::BinlogCloudPurge(uint32_t index) { +bool SyncMasterDB::BinlogCloudPurge(uint32_t index) { BinlogOffset boffset; - std::string table_name = partition_info_.table_name_; - uint32_t partition_id = partition_info_.partition_id_; - std::shared_ptr partition = - g_pika_server->GetTablePartitionById(table_name, partition_id); - if (!partition || !partition->GetBinlogOffset(&boffset)) { + Status s = Logger()->GetProducerStatus(&(boffset.filenum), &(boffset.offset)); + if (!s.ok()) { + return false; + } + if (index > (boffset.filenum - 10)) { // remain some more return false; } else { - if (index > boffset.filenum - 10) { // remain some more - return false; - } else { - slash::MutexLock l(&partition_mu_); - for (const auto& slave : slaves_) { - if (slave->slave_state == SlaveState::kSlaveDbSync) { + std::unordered_map> slaves = GetAllSlaveNodes(); + for (const auto& slave_iter : slaves) { + std::shared_ptr slave_ptr = slave_iter.second; + std::lock_guard l(slave_ptr->slave_mu); + if (slave_ptr->slave_state == SlaveState::kSlaveDbSync) { + return false; + } else if (slave_ptr->slave_state == SlaveState::kSlaveBinlogSync) { + if (index >= slave_ptr->acked_offset.b_offset.filenum) { return false; - } else if (slave->slave_state == SlaveState::kSlaveBinlogSync) { - if (index >= slave->acked_offset.filenum) { - return false; - } } } } @@ -500,266 +309,243 @@ bool SyncMasterPartition::BinlogCloudPurge(uint32_t index) { return true; } -Status SyncMasterPartition::CheckSyncTimeout(uint64_t now) { - slash::MutexLock l(&partition_mu_); +Status SyncMasterDB::CheckSyncTimeout(uint64_t now) { + std::unordered_map> slaves = GetAllSlaveNodes(); - std::shared_ptr slave_ptr = nullptr; std::vector to_del; - - for (auto& slave_ptr : slaves_) { - slash::MutexLock l(&slave_ptr->slave_mu); + for (auto& slave_iter : slaves) { + std::shared_ptr slave_ptr = slave_iter.second; + std::lock_guard l(slave_ptr->slave_mu); if (slave_ptr->LastRecvTime() + kRecvKeepAliveTimeout < now) { - to_del.push_back(Node(slave_ptr->Ip(), slave_ptr->Port())); - } else if (slave_ptr->LastSendTime() + kSendKeepAliveTimeout < now) { + to_del.emplace_back(slave_ptr->Ip(), slave_ptr->Port()); + } else if (slave_ptr->LastSendTime() + kSendKeepAliveTimeout < now && + slave_ptr->sent_offset == slave_ptr->acked_offset) { std::vector task; - RmNode rm_node(slave_ptr->Ip(), slave_ptr->Port(), slave_ptr->TableName(), slave_ptr->PartitionId(), slave_ptr->SessionId()); - WriteTask empty_task(rm_node, BinlogChip(BinlogOffset(0, 0), "")); + RmNode rm_node(slave_ptr->Ip(), slave_ptr->Port(), slave_ptr->DBName(), slave_ptr->SessionId()); + WriteTask empty_task(rm_node, BinlogChip(LogOffset(), ""), LogOffset()); task.push_back(empty_task); Status s = g_pika_rm->SendSlaveBinlogChipsRequest(slave_ptr->Ip(), slave_ptr->Port(), task); slave_ptr->SetLastSendTime(now); if (!s.ok()) { - LOG(INFO)<< "Send ping failed: " << s.ToString(); + LOG(INFO) << "Send ping failed: " << s.ToString(); return Status::Corruption("Send ping failed: " + slave_ptr->Ip() + ":" + std::to_string(slave_ptr->Port())); } } } + for (auto& node : to_del) { - for (size_t i = 0; i < slaves_.size(); ++i) { - if (node.Ip() == slaves_[i]->Ip() && node.Port() == slaves_[i]->Port()) { - slaves_.erase(slaves_.begin() + i); - LOG(WARNING) << SyncPartitionInfo().ToString() << " Master del Recv Timeout slave success " << node.ToString(); - break; - } - } + coordinator_.SyncPros().RemoveSlaveNode(node.Ip(), node.Port()); + g_pika_rm->DropItemInOneWriteQueue(node.Ip(), node.Port(), DBName()); + LOG(WARNING) << SyncDBInfo().ToString() << " Master del Recv Timeout slave success " << node.ToString(); } return Status::OK(); } -std::string SyncMasterPartition::ToStringStatus() { +std::string SyncMasterDB::ToStringStatus() { std::stringstream tmp_stream; tmp_stream << " Current Master Session: " << session_id_ << "\r\n"; - slash::MutexLock l(&partition_mu_); - for (size_t i = 0; i < slaves_.size(); ++i) { - std::shared_ptr slave_ptr = slaves_[i]; - slash::MutexLock l(&slave_ptr->slave_mu); - tmp_stream << " slave[" << i << "]: " << slave_ptr->ToString() << - "\r\n" << slave_ptr->ToStringStatus(); + tmp_stream << " Consensus: " + << "\r\n" + << coordinator_.ToStringStatus(); + std::unordered_map> slaves = GetAllSlaveNodes(); + int i = 0; + for (const auto& slave_iter : slaves) { + std::shared_ptr slave_ptr = slave_iter.second; + std::lock_guard l(slave_ptr->slave_mu); + tmp_stream << " slave[" << i << "]: " << slave_ptr->ToString() << "\r\n" << slave_ptr->ToStringStatus(); + i++; } return tmp_stream.str(); } -void SyncMasterPartition::GetValidSlaveNames(std::vector* slavenames) { - slash::MutexLock l(&partition_mu_); - for (auto ptr : slaves_) { - if (ptr->slave_state != kSlaveBinlogSync) { - continue; - } - std::string name = ptr->Ip() + ":" + std::to_string(ptr->Port()); - slavenames->push_back(name); - } -} - -Status SyncMasterPartition::GetInfo(std::string* info) { - std::stringstream tmp_stream; - slash::MutexLock l(&partition_mu_); - tmp_stream << " Role: Master" << "\r\n"; - tmp_stream << " connected_slaves: " << slaves_.size() << "\r\n"; - for (size_t i = 0; i < slaves_.size(); ++i) { - std::shared_ptr slave_ptr = slaves_[i]; - slash::MutexLock l(&slave_ptr->slave_mu); - tmp_stream << " slave[" << i << "]: " - << slave_ptr->Ip() << ":" << std::to_string(slave_ptr->Port()) << "\r\n"; - tmp_stream << " replication_status: " << SlaveStateMsg[slave_ptr->slave_state] << "\r\n"; - if (slave_ptr->slave_state == kSlaveBinlogSync) { - std::shared_ptr partition = g_pika_server->GetTablePartitionById(slave_ptr->TableName(), slave_ptr->PartitionId()); - BinlogOffset binlog_offset; - if (!partition || !partition->GetBinlogOffset(&binlog_offset)) { - return Status::Corruption("Get Info failed."); - } - uint64_t lag = (binlog_offset.filenum - slave_ptr->acked_offset.filenum) * - g_pika_conf->binlog_file_size() - + (binlog_offset.offset - slave_ptr->acked_offset.offset); - tmp_stream << " lag: " << lag << "\r\n"; - } - } - info->append(tmp_stream.str()); - return Status::OK(); -} - -int32_t SyncMasterPartition::GenSessionId() { - slash::MutexLock ml(&session_mu_); +int32_t SyncMasterDB::GenSessionId() { + std::lock_guard ml(session_mu_); return session_id_++; } -bool SyncMasterPartition::CheckSessionId(const std::string& ip, int port, - const std::string& table_name, - uint64_t partition_id, int session_id) { - slash::MutexLock l(&partition_mu_); - std::shared_ptr slave_ptr = nullptr; - Status s = GetSlaveNode(ip, port, &slave_ptr); - if (!s.ok()) { - LOG(WARNING)<< "Check SessionId Get Slave Node Error: " - << ip << ":" << port << "," << table_name << "_" << partition_id; +bool SyncMasterDB::CheckSessionId(const std::string& ip, int port, const std::string& db_name, + int session_id) { + std::shared_ptr slave_ptr = GetSlaveNode(ip, port); + if (!slave_ptr) { + LOG(WARNING) << "Check SessionId Get Slave Node Error: " << ip << ":" << port << "," << db_name; return false; } + + std::lock_guard l(slave_ptr->slave_mu); if (session_id != slave_ptr->SessionId()) { - LOG(WARNING)<< "Check SessionId Mismatch: " << ip << ":" << port << ", " - << table_name << "_" << partition_id << " expected_session: " << session_id - << ", actual_session:" << slave_ptr->SessionId(); + LOG(WARNING) << "Check SessionId Mismatch: " << ip << ":" << port << ", " << db_name << "_" + << " expected_session: " << session_id << ", actual_session:" << slave_ptr->SessionId(); return false; } return true; } +Status SyncMasterDB::ConsensusProposeLog(const std::shared_ptr& cmd_ptr) { + return coordinator_.ProposeLog(cmd_ptr); +} + +Status SyncMasterDB::ConsensusProcessLeaderLog(const std::shared_ptr& cmd_ptr, const BinlogItem& attribute) { + return coordinator_.ProcessLeaderLog(cmd_ptr, attribute); +} + +LogOffset SyncMasterDB::ConsensusCommittedIndex() { return coordinator_.committed_index(); } + +LogOffset SyncMasterDB::ConsensusLastIndex() { return coordinator_.MemLogger()->last_offset(); } -/* SyncSlavePartition */ -SyncSlavePartition::SyncSlavePartition(const std::string& table_name, - uint32_t partition_id) - : SyncPartition(table_name, partition_id), - m_info_(), - repl_state_(kNoConnect), - local_ip_("") { - m_info_.SetLastRecvTime(slash::NowMicros()); +std::shared_ptr SyncMasterDB::GetSlaveNode(const std::string& ip, int port) { + return coordinator_.SyncPros().GetSlaveNode(ip, port); } -void SyncSlavePartition::SetReplState(const ReplState& repl_state) { +std::unordered_map> SyncMasterDB::GetAllSlaveNodes() { + return coordinator_.SyncPros().GetAllSlaveNodes(); +} + +/* SyncSlaveDB */ +SyncSlaveDB::SyncSlaveDB(const std::string& db_name) + : SyncDB(db_name) { + std::string dbsync_path = g_pika_conf->db_sync_path() + "/" + db_name; + rsync_cli_.reset(new rsync::RsyncClient(dbsync_path, db_name)); + m_info_.SetLastRecvTime(pstd::NowMicros()); +} + +void SyncSlaveDB::SetReplState(const ReplState& repl_state) { if (repl_state == ReplState::kNoConnect) { - // deactivate Deactivate(); return; } - slash::MutexLock l(&partition_mu_); + std::lock_guard l(db_mu_); repl_state_ = repl_state; } -ReplState SyncSlavePartition::State() { - slash::MutexLock l(&partition_mu_); +ReplState SyncSlaveDB::State() { + std::lock_guard l(db_mu_); return repl_state_; } -void SyncSlavePartition::SetLastRecvTime(uint64_t time) { - slash::MutexLock l(&partition_mu_); +void SyncSlaveDB::SetLastRecvTime(uint64_t time) { + std::lock_guard l(db_mu_); m_info_.SetLastRecvTime(time); } -uint64_t SyncSlavePartition::LastRecvTime() { - slash::MutexLock l(&partition_mu_); - return m_info_.LastRecvTime(); -} - -Status SyncSlavePartition::CheckSyncTimeout(uint64_t now) { - slash::MutexLock l(&partition_mu_); +Status SyncSlaveDB::CheckSyncTimeout(uint64_t now) { + std::lock_guard l(db_mu_); // no need to do session keepalive return ok if (repl_state_ != ReplState::kWaitDBSync && repl_state_ != ReplState::kConnected) { return Status::OK(); } if (m_info_.LastRecvTime() + kRecvKeepAliveTimeout < now) { - m_info_ = RmNode(); + // update slave state to kTryConnect, and try reconnect to master node repl_state_ = ReplState::kTryConnect; - g_pika_server->SetLoopPartitionStateMachine(true); } return Status::OK(); } -Status SyncSlavePartition::GetInfo(std::string* info) { +Status SyncSlaveDB::GetInfo(std::string* info) { std::string tmp_str = " Role: Slave\r\n"; tmp_str += " master: " + MasterIp() + ":" + std::to_string(MasterPort()) + "\r\n"; + tmp_str += " slave status: " + ReplStateMsg[repl_state_] + "\r\n"; info->append(tmp_str); return Status::OK(); } -void SyncSlavePartition::Activate(const RmNode& master, const ReplState& repl_state) { - slash::MutexLock l(&partition_mu_); +void SyncSlaveDB::Activate(const RmNode& master, const ReplState& repl_state) { + std::lock_guard l(db_mu_); m_info_ = master; repl_state_ = repl_state; - m_info_.SetLastRecvTime(slash::NowMicros()); + m_info_.SetLastRecvTime(pstd::NowMicros()); } -void SyncSlavePartition::Deactivate() { - slash::MutexLock l(&partition_mu_); +void SyncSlaveDB::Deactivate() { + std::lock_guard l(db_mu_); m_info_ = RmNode(); repl_state_ = ReplState::kNoConnect; + rsync_cli_->Stop(); } -std::string SyncSlavePartition::ToStringStatus() { +std::string SyncSlaveDB::ToStringStatus() { return " Master: " + MasterIp() + ":" + std::to_string(MasterPort()) + "\r\n" + - " SessionId: " + std::to_string(MasterSessionId()) + "\r\n" + - " SyncStatus " + ReplStateMsg[repl_state_] + "\r\n"; + " SessionId: " + std::to_string(MasterSessionId()) + "\r\n" + " SyncStatus " + ReplStateMsg[repl_state_] + + "\r\n"; } -/* SyncWindow */ +const std::string& SyncSlaveDB::MasterIp() { + std::lock_guard l(db_mu_); + return m_info_.Ip(); +} -void SyncWindow::Push(const SyncWinItem& item) { - win_.push_back(item); +int SyncSlaveDB::MasterPort() { + std::lock_guard l(db_mu_); + return m_info_.Port(); } -bool SyncWindow::Update(const SyncWinItem& start_item, - const SyncWinItem& end_item, BinlogOffset* acked_offset) { - size_t start_pos = win_.size(), end_pos = win_.size(); - for (size_t i = 0; i < win_.size(); ++i) { - if (win_[i] == start_item) { - start_pos = i; - } - if (win_[i] == end_item) { - end_pos = i; - break; - } - } - if (start_pos == win_.size() || end_pos == win_.size()) { - LOG(WARNING) << "Ack offset Start: " << - start_item.ToString() << "End: " << end_item.ToString() << - " not found in binlog controller window." << - std::endl << "window status "<< std::endl << ToStringStatus(); - return false; - } - for (size_t i = start_pos; i <= end_pos; ++i) { - win_[i].acked_ = true; +void SyncSlaveDB::SetMasterSessionId(int32_t session_id) { + std::lock_guard l(db_mu_); + m_info_.SetSessionId(session_id); +} + +int32_t SyncSlaveDB::MasterSessionId() { + std::lock_guard l(db_mu_); + return m_info_.SessionId(); +} + +void SyncSlaveDB::SetLocalIp(const std::string& local_ip) { + std::lock_guard l(db_mu_); + local_ip_ = local_ip; +} + +std::string SyncSlaveDB::LocalIp() { + std::lock_guard l(db_mu_); + return local_ip_; +} + +void SyncSlaveDB::StopRsync() { + rsync_cli_->Stop(); +} + +pstd::Status SyncSlaveDB::ActivateRsync() { + Status s = Status::OK(); + if (!rsync_cli_->IsIdle()) { + return s; } - while (!win_.empty()) { - if (win_[0].acked_) { - *acked_offset = win_[0].offset_; - win_.pop_front(); - } else { - break; + LOG(WARNING) << "Slave DB: " << DBName() << " Activating Rsync ... (retry count:" << rsync_init_retry_count_ << ")"; + if (rsync_cli_->Init()) { + rsync_init_retry_count_ = 0; + rsync_cli_->Start(); + return s; + } else { + rsync_init_retry_count_ += 1; + if (rsync_init_retry_count_ >= kMaxRsyncInitReTryTimes) { + SetReplState(ReplState::kError); + LOG(ERROR) << "Full Sync Stage - Rsync Init failed: Slave failed to pull meta info(generated by bgsave task in Master) from Master after MaxRsyncInitReTryTimes(" + << kMaxRsyncInitReTryTimes << " times) is reached. This usually means the Master's bgsave task has costed an unexpected-long time."; } + return Status::Error("rsync client init failed!"); } - return true; -} - -int SyncWindow::Remainings() { - std::size_t remaining_size = g_pika_conf->sync_window_size() - win_.size(); - return remaining_size > 0? remaining_size:0 ; } /* PikaReplicaManger */ -PikaReplicaManager::PikaReplicaManager() - : last_meta_sync_timestamp_(0) { +PikaReplicaManager::PikaReplicaManager() { std::set ips; ips.insert("0.0.0.0"); int port = g_pika_conf->port() + kPortShiftReplServer; - pika_repl_client_ = new PikaReplClient(3000, 60); - pika_repl_server_ = new PikaReplServer(ips, port, 3000); - InitPartition(); - pthread_rwlock_init(&partitions_rw_, NULL); -} - -PikaReplicaManager::~PikaReplicaManager() { - delete pika_repl_client_; - delete pika_repl_server_; - pthread_rwlock_destroy(&partitions_rw_); + pika_repl_client_ = std::make_unique(3000, 60); + pika_repl_server_ = std::make_unique(ips, port, 3000); + InitDB(); } void PikaReplicaManager::Start() { int ret = 0; ret = pika_repl_client_->Start(); - if (ret != pink::kSuccess) { - LOG(FATAL) << "Start Repl Client Error: " << ret << (ret == pink::kCreateThreadError ? ": create thread error " : ": other error"); + if (ret != net::kSuccess) { + LOG(FATAL) << "Start Repl Client Error: " << ret + << (ret == net::kCreateThreadError ? ": create thread error " : ": other error"); } ret = pika_repl_server_->Start(); - if (ret != pink::kSuccess) { - LOG(FATAL) << "Start Repl Server Error: " << ret << (ret == pink::kCreateThreadError ? ": create thread error " : ": other error"); + if (ret != net::kSuccess) { + LOG(FATAL) << "Start Repl Server Error: " << ret + << (ret == net::kCreateThreadError ? ": create thread error " : ": other error"); } } @@ -768,56 +554,81 @@ void PikaReplicaManager::Stop() { pika_repl_server_->Stop(); } -void PikaReplicaManager::InitPartition() { - std::vector table_structs = g_pika_conf->table_structs(); - for (const auto& table : table_structs) { - const std::string& table_name = table.table_name; - for (const auto& partition_id : table.partition_ids) { - sync_master_partitions_[PartitionInfo(table_name, partition_id)] - = std::make_shared(table_name, partition_id); - sync_slave_partitions_[PartitionInfo(table_name, partition_id)] - = std::make_shared(table_name, partition_id); +bool PikaReplicaManager::CheckMasterSyncFinished() { + for (auto& iter : sync_master_dbs_) { + std::shared_ptr db = iter.second; + LogOffset commit = db->ConsensusCommittedIndex(); + BinlogOffset binlog; + Status s = db->StableLogger()->Logger()->GetProducerStatus(&binlog.filenum, &binlog.offset); + if (!s.ok()) { + return false; + } + if (commit.b_offset < binlog) { + return false; } } + return true; +} + +void PikaReplicaManager::InitDB() { + std::vector db_structs = g_pika_conf->db_structs(); + for (const auto& db : db_structs) { + const std::string& db_name = db.db_name; + sync_master_dbs_[DBInfo(db_name)] = std::make_shared(db_name); + sync_slave_dbs_[DBInfo(db_name)] = std::make_shared(db_name); + } } -void PikaReplicaManager::ProduceWriteQueue(const std::string& ip, int port, const std::vector& tasks) { - slash::MutexLock l(&write_queue_mu_); +void PikaReplicaManager::ProduceWriteQueue(const std::string& ip, int port, std::string db_name, + const std::vector& tasks) { + std::lock_guard l(write_queue_mu_); std::string index = ip + ":" + std::to_string(port); for (auto& task : tasks) { - write_queues_[index].push(task); + write_queues_[index][db_name].push(task); } } int PikaReplicaManager::ConsumeWriteQueue() { - std::vector to_delete; std::unordered_map>> to_send_map; int counter = 0; { - slash::MutexLock l(&write_queue_mu_); - std::vector to_delete; + std::lock_guard l(write_queue_mu_); for (auto& iter : write_queues_) { - std::queue& queue = iter.second; - for (int i = 0; i < kBinlogSendPacketNum; ++i) { - if (queue.empty()) { - break; - } - size_t batch_index = queue.size() > kBinlogSendBatchNum ? kBinlogSendBatchNum : queue.size(); - std::vector to_send; - for (size_t i = 0; i < batch_index; ++i) { - to_send.push_back(queue.front()); - queue.pop(); - counter++; + const std::string& ip_port = iter.first; + std::unordered_map>& p_map = iter.second; + for (auto& db_queue : p_map) { + std::queue& queue = db_queue.second; + for (int i = 0; i < kBinlogSendPacketNum; ++i) { + if (queue.empty()) { + break; + } + size_t batch_index = queue.size() > kBinlogSendBatchNum ? kBinlogSendBatchNum : queue.size(); + std::vector to_send; + size_t batch_size = 0; + for (size_t i = 0; i < batch_index; ++i) { + WriteTask& task = queue.front(); + batch_size += task.binlog_chip_.binlog_.size(); + // make sure SerializeToString will not over 2G + if (batch_size > PIKA_MAX_CONN_RBUF_HB) { + break; + } + to_send.push_back(task); + queue.pop(); + counter++; + } + if (!to_send.empty()) { + to_send_map[ip_port].push_back(std::move(to_send)); + } } - to_send_map[iter.first].push_back(std::move(to_send)); } } } + std::vector to_delete; for (auto& iter : to_send_map) { std::string ip; int port = 0; - if (!slash::ParseIpPortString(iter.first, ip, port)) { + if (!pstd::ParseIpPortString(iter.first, ip, port)) { LOG(WARNING) << "Parse ip_port error " << iter.first; continue; } @@ -832,220 +643,123 @@ int PikaReplicaManager::ConsumeWriteQueue() { } if (!to_delete.empty()) { - { - slash::MutexLock l(&write_queue_mu_); - for (auto& del_queue : to_delete) { - write_queues_.erase(del_queue); - } + std::lock_guard l(write_queue_mu_); + for (auto& del_queue : to_delete) { + write_queues_.erase(del_queue); } } return counter; } +void PikaReplicaManager::DropItemInOneWriteQueue(const std::string& ip, int port, const std::string& db_name) { + std::lock_guard l(write_queue_mu_); + std::string index = ip + ":" + std::to_string(port); + if (write_queues_.find(index) != write_queues_.end()) { + write_queues_[index].erase(db_name); + } +} + void PikaReplicaManager::DropItemInWriteQueue(const std::string& ip, int port) { - slash::MutexLock l(&write_queue_mu_); + std::lock_guard l(write_queue_mu_); std::string index = ip + ":" + std::to_string(port); write_queues_.erase(index); } -void PikaReplicaManager::ScheduleReplServerBGTask(pink::TaskFunc func, void* arg) { +void PikaReplicaManager::ScheduleReplServerBGTask(net::TaskFunc func, void* arg) { pika_repl_server_->Schedule(func, arg); } -void PikaReplicaManager::ScheduleReplClientBGTask(pink::TaskFunc func, void* arg) { +void PikaReplicaManager::ScheduleReplClientBGTask(net::TaskFunc func, void* arg) { pika_repl_client_->Schedule(func, arg); } -void PikaReplicaManager::ScheduleWriteBinlogTask(const std::string& table_partition, - const std::shared_ptr res, - std::shared_ptr conn, - void* res_private_data) { - pika_repl_client_->ScheduleWriteBinlogTask(table_partition, res, conn, res_private_data); +void PikaReplicaManager::ScheduleReplClientBGTaskByDBName(net::TaskFunc func, void* arg, const std::string &db_name) { + pika_repl_client_->ScheduleByDBName(func, arg, db_name); } -void PikaReplicaManager::ScheduleWriteDBTask(const std::string& dispatch_key, - PikaCmdArgsType* argv, BinlogItem* binlog_item, - const std::string& table_name, uint32_t partition_id) { - pika_repl_client_->ScheduleWriteDBTask(dispatch_key, argv, binlog_item, table_name, partition_id); +void PikaReplicaManager::ScheduleWriteBinlogTask(const std::string& db, + const std::shared_ptr& res, + const std::shared_ptr& conn, void* res_private_data) { + pika_repl_client_->ScheduleWriteBinlogTask(db, res, conn, res_private_data); } -void PikaReplicaManager::ReplServerRemoveClientConn(int fd) { - pika_repl_server_->RemoveClientConn(fd); +void PikaReplicaManager::ScheduleWriteDBTask(const std::shared_ptr& cmd_ptr, const std::string& db_name) { + pika_repl_client_->ScheduleWriteDBTask(cmd_ptr, db_name); } -void PikaReplicaManager::ReplServerUpdateClientConnMap(const std::string& ip_port, - int fd) { - pika_repl_server_->UpdateClientConnMap(ip_port, fd); -} +void PikaReplicaManager::ReplServerRemoveClientConn(int fd) { pika_repl_server_->RemoveClientConn(fd); } -Status PikaReplicaManager::UpdateSyncBinlogStatus(const RmNode& slave, const BinlogOffset& range_start, const BinlogOffset& range_end) { - slash::RWLock l(&partitions_rw_, false); - if (sync_master_partitions_.find(slave.NodePartitionInfo()) == sync_master_partitions_.end()) { - return Status::NotFound(slave.ToString() + " not found"); - } - std::shared_ptr partition = sync_master_partitions_[slave.NodePartitionInfo()]; - Status s = partition->UpdateSlaveBinlogAckInfo(slave.Ip(), slave.Port(), range_start, range_end); - if (!s.ok()) { - return s; - } - s = partition->SyncBinlogToWq(slave.Ip(), slave.Port()); - if (!s.ok()) { - return s; - } - return Status::OK(); +void PikaReplicaManager::ReplServerUpdateClientConnMap(const std::string& ip_port, int fd) { + pika_repl_server_->UpdateClientConnMap(ip_port, fd); } -Status PikaReplicaManager::GetSyncBinlogStatus(const RmNode& slave, BinlogOffset* sent_offset, BinlogOffset* acked_offset) { - slash::RWLock l(&partitions_rw_, false); - if (sync_master_partitions_.find(slave.NodePartitionInfo()) == sync_master_partitions_.end()) { +Status PikaReplicaManager::UpdateSyncBinlogStatus(const RmNode& slave, const LogOffset& offset_start, + const LogOffset& offset_end) { + std::shared_lock l(dbs_rw_); + if (sync_master_dbs_.find(slave.NodeDBInfo()) == sync_master_dbs_.end()) { return Status::NotFound(slave.ToString() + " not found"); } - std::shared_ptr partition = sync_master_partitions_[slave.NodePartitionInfo()]; - Status s = partition->GetSlaveSyncBinlogInfo(slave.Ip(), slave.Port(), sent_offset, acked_offset); + std::shared_ptr db = sync_master_dbs_[slave.NodeDBInfo()]; + Status s = db->ConsensusUpdateSlave(slave.Ip(), slave.Port(), offset_start, offset_end); if (!s.ok()) { return s; } - return Status::OK(); -} - -Status PikaReplicaManager::GetSyncMasterPartitionSlaveState(const RmNode& slave, - SlaveState* const slave_state) { - slash::RWLock l(&partitions_rw_, false); - if (sync_master_partitions_.find(slave.NodePartitionInfo()) == sync_master_partitions_.end()) { - return Status::NotFound(slave.ToString() + " not found"); - } - std::shared_ptr partition = sync_master_partitions_[slave.NodePartitionInfo()]; - Status s = partition->GetSlaveState(slave.Ip(), slave.Port(), slave_state); + s = db->SyncBinlogToWq(slave.Ip(), slave.Port()); if (!s.ok()) { return s; } return Status::OK(); } -bool PikaReplicaManager::CheckPartitionSlaveExist(const RmNode& slave) { - slash::RWLock l(&partitions_rw_, false); - if (sync_master_partitions_.find(slave.NodePartitionInfo()) == sync_master_partitions_.end()) { - return false; - } - std::shared_ptr partition = sync_master_partitions_[slave.NodePartitionInfo()]; - return partition->CheckSlaveNodeExist(slave.Ip(), slave.Port()); -} - -Status PikaReplicaManager::GetPartitionSlaveSession(const RmNode& slave, int32_t* session) { - slash::RWLock l(&partitions_rw_, false); - if (sync_master_partitions_.find(slave.NodePartitionInfo()) == sync_master_partitions_.end()) { - return Status::NotFound(slave.ToString(), + "not found"); - } - std::shared_ptr partition = sync_master_partitions_[slave.NodePartitionInfo()]; - return partition->GetSlaveNodeSession(slave.Ip(), slave.Port(), session); -} - -Status PikaReplicaManager::AddPartitionSlave(const RmNode& slave) { - slash::RWLock l(&partitions_rw_, false); - if (sync_master_partitions_.find(slave.NodePartitionInfo()) == sync_master_partitions_.end()) { - return Status::NotFound(slave.ToString() + " not found"); - } - std::shared_ptr partition = sync_master_partitions_[slave.NodePartitionInfo()]; - Status s= partition->RemoveSlaveNode(slave.Ip(), slave.Port()); - if (!s.ok() && !s.IsNotFound()) { - return s; - } - s = partition->AddSlaveNode(slave.Ip(), slave.Port(), slave.SessionId()); - if (!s.ok()) { - return s; +bool PikaReplicaManager::CheckSlaveDBState(const std::string& ip, const int port) { + std::shared_ptr db = nullptr; + for (const auto& iter : g_pika_rm->sync_slave_dbs_) { + db = iter.second; + if (db->State() == ReplState::kDBNoConnect && db->MasterIp() == ip && + db->MasterPort() + kPortShiftReplServer == port) { + LOG(INFO) << "DB: " << db->SyncDBInfo().ToString() + << " has been dbslaveof no one, then will not try reconnect."; + return false; + } } - return Status::OK(); + return true; } -Status PikaReplicaManager::RemovePartitionSlave(const RmNode& slave) { - slash::RWLock l(&partitions_rw_, false); - if (sync_master_partitions_.find(slave.NodePartitionInfo()) == sync_master_partitions_.end()) { - return Status::NotFound(slave.ToString() + " not found"); - } - std::shared_ptr partition = sync_master_partitions_[slave.NodePartitionInfo()]; - Status s = partition->RemoveSlaveNode(slave.Ip(), slave.Port()); - if (!s.ok()) { - return s; +Status PikaReplicaManager::DeactivateSyncSlaveDB(const std::string& ip, int port) { + std::shared_lock l(dbs_rw_); + for (auto& iter : sync_slave_dbs_) { + std::shared_ptr db = iter.second; + if (db->MasterIp() == ip && db->MasterPort() == port) { + db->Deactivate(); + } } return Status::OK(); } Status PikaReplicaManager::LostConnection(const std::string& ip, int port) { - slash::RWLock l(&partitions_rw_, false); - for (auto& iter : sync_master_partitions_) { - std::shared_ptr partition = iter.second; - Status s = partition->RemoveSlaveNode(ip, port); + std::shared_lock l(dbs_rw_); + for (auto& iter : sync_master_dbs_) { + std::shared_ptr db = iter.second; + Status s = db->RemoveSlaveNode(ip, port); if (!s.ok() && !s.IsNotFound()) { LOG(WARNING) << "Lost Connection failed " << s.ToString(); } } - for (auto& iter : sync_slave_partitions_) { - std::shared_ptr partition = iter.second; - if (partition->MasterIp() == ip && partition->MasterPort() == port) { - partition->Deactivate(); + for (auto& iter : sync_slave_dbs_) { + std::shared_ptr db = iter.second; + if (db->MasterIp() == ip && db->MasterPort() == port) { + db->Deactivate(); } } return Status::OK(); } -Status PikaReplicaManager::ActivateBinlogSync(const RmNode& slave, const BinlogOffset& offset) { - slash::RWLock l(&partitions_rw_, false); - if (sync_master_partitions_.find(slave.NodePartitionInfo()) == sync_master_partitions_.end()) { - return Status::NotFound(slave.ToString() + " not found"); - } - std::shared_ptr sync_partition = sync_master_partitions_[slave.NodePartitionInfo()]; - - std::shared_ptr partition = g_pika_server->GetTablePartitionById(slave.TableName(), slave.PartitionId()); - if (!partition) { - return Status::Corruption("Found Binlog faile"); - } - - Status s = sync_partition->ActivateSlaveBinlogSync(slave.Ip(), slave.Port(), partition->logger(), offset); - if (!s.ok()) { - return s; - } - return Status::OK(); -} - -Status PikaReplicaManager::ActivateDbSync(const RmNode& slave) { - slash::RWLock l(&partitions_rw_, false); - if (sync_master_partitions_.find(slave.NodePartitionInfo()) == sync_master_partitions_.end()) { - return Status::NotFound(slave.ToString() + " not found"); - } - std::shared_ptr partition = sync_master_partitions_[slave.NodePartitionInfo()]; - Status s = partition->ActivateSlaveDbSync(slave.Ip(), slave.Port()); - if (!s.ok()) { - return s; - } - return Status::OK(); -} - -Status PikaReplicaManager::SetMasterLastRecvTime(const RmNode& node, uint64_t time) { - slash::RWLock l(&partitions_rw_, false); - if (sync_master_partitions_.find(node.NodePartitionInfo()) == sync_master_partitions_.end()) { - return Status::NotFound(node.ToString() + " not found"); - } - std::shared_ptr partition = sync_master_partitions_[node.NodePartitionInfo()]; - partition->SetLastRecvTime(node.Ip(), node.Port(), time); - return Status::OK(); -} - -Status PikaReplicaManager::SetSlaveLastRecvTime(const RmNode& node, uint64_t time) { - slash::RWLock l(&partitions_rw_, false); - if (sync_slave_partitions_.find(node.NodePartitionInfo()) == sync_slave_partitions_.end()) { - return Status::NotFound(node.ToString() + " not found"); - } - std::shared_ptr partition = sync_slave_partitions_[node.NodePartitionInfo()]; - partition->SetLastRecvTime(time); - return Status::OK(); -} - Status PikaReplicaManager::WakeUpBinlogSync() { - slash::RWLock l(&partitions_rw_, false); - for (auto& iter : sync_master_partitions_) { - std::shared_ptr partition = iter.second; - Status s = partition->WakeUpSlaveBinlogSync(); + std::shared_lock l(dbs_rw_); + for (auto& iter : sync_master_dbs_) { + std::shared_ptr db = iter.second; + Status s = db->WakeUpSlaveBinlogSync(); if (!s.ok()) { return s; } @@ -1053,78 +767,19 @@ Status PikaReplicaManager::WakeUpBinlogSync() { return Status::OK(); } -int32_t PikaReplicaManager::GenPartitionSessionId(const std::string& table_name, - uint32_t partition_id) { - slash::RWLock l(&partitions_rw_, false); - PartitionInfo p_info(table_name, partition_id); - if (sync_master_partitions_.find(p_info) == sync_master_partitions_.end()) { - return -1; - } else { - std::shared_ptr sync_master_partition = sync_master_partitions_[p_info]; - return sync_master_partition->GenSessionId(); - } -} - -int32_t PikaReplicaManager::GetSlavePartitionSessionId(const std::string& table_name, - uint32_t partition_id) { - slash::RWLock l(&partitions_rw_, false); - PartitionInfo p_info(table_name, partition_id); - if (sync_slave_partitions_.find(p_info) == sync_slave_partitions_.end()) { - return -1; - } else { - std::shared_ptr sync_slave_partition = sync_slave_partitions_[p_info]; - return sync_slave_partition->MasterSessionId(); - } -} - -bool PikaReplicaManager::CheckSlavePartitionSessionId(const std::string& table_name, - uint32_t partition_id, - int session_id) { - slash::RWLock l(&partitions_rw_, false); - PartitionInfo p_info(table_name, partition_id); - if (sync_slave_partitions_.find(p_info) == sync_slave_partitions_.end()) { - LOG(WARNING)<< "Slave Partition Not Found: " << p_info.ToString().data(); - return false; - } else { - std::shared_ptr sync_slave_partition = sync_slave_partitions_[p_info]; - if (sync_slave_partition->MasterSessionId() != session_id) { - LOG(WARNING)<< "Check SessionId Mismatch: " << sync_slave_partition->MasterIp() - << ":" << sync_slave_partition->MasterPort() << ", " - << sync_slave_partition->SyncPartitionInfo().ToString() - << " expected_session: " << session_id << ", actual_session:" - << sync_slave_partition->MasterSessionId(); - return false; - } - } - return true; -} - -bool PikaReplicaManager::CheckMasterPartitionSessionId(const std::string& ip, int port, - const std::string& table_name, - uint32_t partition_id, int session_id) { - slash::RWLock l(&partitions_rw_, false); - PartitionInfo p_info(table_name, partition_id); - if (sync_master_partitions_.find(p_info) == sync_master_partitions_.end()) { - return false; - } else { - std::shared_ptr sync_master_partition = sync_master_partitions_[p_info]; - return sync_master_partition->CheckSessionId(ip, port, table_name, partition_id, session_id); - } -} - Status PikaReplicaManager::CheckSyncTimeout(uint64_t now) { - slash::RWLock l(&partitions_rw_, false); + std::shared_lock l(dbs_rw_); - for (auto& iter : sync_master_partitions_) { - std::shared_ptr partition = iter.second; - Status s = partition->CheckSyncTimeout(now); + for (auto& iter : sync_master_dbs_) { + std::shared_ptr db = iter.second; + Status s = db->CheckSyncTimeout(now); if (!s.ok()) { LOG(WARNING) << "CheckSyncTimeout Failed " << s.ToString(); } } - for (auto& iter : sync_slave_partitions_) { - std::shared_ptr partition = iter.second; - Status s = partition->CheckSyncTimeout(now); + for (auto& iter : sync_slave_dbs_) { + std::shared_ptr db = iter.second; + Status s = db->CheckSyncTimeout(now); if (!s.ok()) { LOG(WARNING) << "CheckSyncTimeout Failed " << s.ToString(); } @@ -1132,264 +787,155 @@ Status PikaReplicaManager::CheckSyncTimeout(uint64_t now) { return Status::OK(); } -Status PikaReplicaManager::CheckPartitionRole( - const std::string& table, uint32_t partition_id, int* role) { - slash::RWLock l(&partitions_rw_, false); +Status PikaReplicaManager::CheckDBRole(const std::string& db, int* role) { + std::shared_lock l(dbs_rw_); *role = 0; - PartitionInfo p_info(table, partition_id); - if (sync_master_partitions_.find(p_info) == sync_master_partitions_.end()) { - return Status::NotFound(table + std::to_string(partition_id) + " not found"); + DBInfo p_info(db); + if (sync_master_dbs_.find(p_info) == sync_master_dbs_.end()) { + return Status::NotFound(db + " not found"); } - if (sync_slave_partitions_.find(p_info) == sync_slave_partitions_.end()) { - return Status::NotFound(table + std::to_string(partition_id) + " not found"); + if (sync_slave_dbs_.find(p_info) == sync_slave_dbs_.end()) { + return Status::NotFound(db + " not found"); } - if (sync_master_partitions_[p_info]->GetNumberOfSlaveNode() != 0) { + if (sync_master_dbs_[p_info]->GetNumberOfSlaveNode() != 0 || + (sync_master_dbs_[p_info]->GetNumberOfSlaveNode() == 0 && + sync_slave_dbs_[p_info]->State() == kNoConnect)) { *role |= PIKA_ROLE_MASTER; } - if (sync_slave_partitions_[p_info]->State() == ReplState::kConnected) { + if (sync_slave_dbs_[p_info]->State() != ReplState::kNoConnect) { *role |= PIKA_ROLE_SLAVE; } // if role is not master or slave, the rest situations are all single return Status::OK(); } -Status PikaReplicaManager::GetPartitionInfo( - const std::string& table, uint32_t partition_id, std::string* info) { - int role = 0; - std::string tmp_res; - Status s = CheckPartitionRole(table, partition_id, &role); - if (!s.ok()) { - return s; - } - - bool add_divider_line = ((role & PIKA_ROLE_MASTER) && (role & PIKA_ROLE_SLAVE)); - slash::RWLock l(&partitions_rw_, false); - PartitionInfo p_info(table, partition_id); - if (role & PIKA_ROLE_MASTER) { - if (sync_master_partitions_.find(p_info) == sync_master_partitions_.end()) { - return Status::NotFound(table + std::to_string(partition_id) + " not found"); - } - Status s = sync_master_partitions_[p_info]->GetInfo(info); - if (!s.ok()) { - return s; - } - } - if (add_divider_line) { - info->append(" -----------\r\n"); - } - if (role & PIKA_ROLE_SLAVE) { - if (sync_slave_partitions_.find(p_info) == sync_slave_partitions_.end()) { - return Status::NotFound(table + std::to_string(partition_id) + " not found"); - } - Status s = sync_slave_partitions_[p_info]->GetInfo(info); - if (!s.ok()) { - return s; - } - } - info->append("\r\n"); - return Status::OK(); -} - -Status PikaReplicaManager::SelectLocalIp(const std::string& remote_ip, - const int remote_port, +Status PikaReplicaManager::SelectLocalIp(const std::string& remote_ip, const int remote_port, std::string* const local_ip) { - pink::PinkCli* cli = pink::NewRedisCli(); + std::unique_ptr cli(net::NewRedisCli()); cli->set_connect_timeout(1500); if ((cli->Connect(remote_ip, remote_port, "")).ok()) { struct sockaddr_in laddr; socklen_t llen = sizeof(laddr); - getsockname(cli->fd(), (struct sockaddr*) &laddr, &llen); + getsockname(cli->fd(), reinterpret_cast(&laddr), &llen); std::string tmp_ip(inet_ntoa(laddr.sin_addr)); *local_ip = tmp_ip; cli->Close(); - delete cli; } else { - LOG(WARNING) << "Failed to connect remote node(" - << remote_ip << ":" << remote_port << ")"; - delete cli; + LOG(WARNING) << "Failed to connect remote node(" << remote_ip << ":" << remote_port << ")"; return Status::Corruption("connect remote node error"); } return Status::OK(); } -Status PikaReplicaManager::ActivateSyncSlavePartition(const RmNode& node, - const ReplState& repl_state) { - slash::RWLock l(&partitions_rw_, false); - const PartitionInfo& p_info = node.NodePartitionInfo(); - if (sync_slave_partitions_.find(p_info) == sync_slave_partitions_.end()) { - return Status::NotFound("Sync Slave partition " + node.ToString() + " not found"); +Status PikaReplicaManager::ActivateSyncSlaveDB(const RmNode& node, const ReplState& repl_state) { + std::shared_lock l(dbs_rw_); + const DBInfo& p_info = node.NodeDBInfo(); + if (sync_slave_dbs_.find(p_info) == sync_slave_dbs_.end()) { + return Status::NotFound("Sync Slave DB " + node.ToString() + " not found"); } - ReplState ssp_state = sync_slave_partitions_[p_info]->State(); - if (ssp_state != ReplState::kNoConnect) { - return Status::Corruption("Sync Slave partition in " + ReplStateMsg[ssp_state]); + ReplState ssp_state = sync_slave_dbs_[p_info]->State(); + if (ssp_state != ReplState::kNoConnect && ssp_state != ReplState::kDBNoConnect) { + return Status::Corruption("Sync Slave DB in " + ReplStateMsg[ssp_state]); } std::string local_ip; Status s = SelectLocalIp(node.Ip(), node.Port(), &local_ip); if (s.ok()) { - sync_slave_partitions_[p_info]->SetLocalIp(local_ip); - sync_slave_partitions_[p_info]->Activate(node, repl_state); + sync_slave_dbs_[p_info]->SetLocalIp(local_ip); + sync_slave_dbs_[p_info]->Activate(node, repl_state); } return s; } -Status PikaReplicaManager::UpdateSyncSlavePartitionSessionId(const PartitionInfo& p_info, - int32_t session_id) { - slash::RWLock l(&partitions_rw_, false); - if (sync_slave_partitions_.find(p_info) == sync_slave_partitions_.end()) { - return Status::NotFound("Sync Slave partition " + p_info.ToString()); - } - sync_slave_partitions_[p_info]->SetMasterSessionId(session_id); - return Status::OK(); -} - -Status PikaReplicaManager::DeactivateSyncSlavePartition(const PartitionInfo& p_info) { - slash::RWLock l(&partitions_rw_, false); - if (sync_slave_partitions_.find(p_info) == sync_slave_partitions_.end()) { - return Status::NotFound("Sync Slave partition " + p_info.ToString()); - } - sync_slave_partitions_[p_info]->Deactivate(); - return Status::OK(); -} - -Status PikaReplicaManager::SetSlaveReplState(const PartitionInfo& p_info, - const ReplState& repl_state) { - slash::RWLock l(&partitions_rw_, false); - if (sync_slave_partitions_.find(p_info) == sync_slave_partitions_.end()) { - return Status::NotFound("Sync Slave partition " + p_info.ToString()); - } - sync_slave_partitions_[p_info]->SetReplState(repl_state); - return Status::OK(); -} - -Status PikaReplicaManager::GetSlaveReplState(const PartitionInfo& p_info, - ReplState* repl_state) { - slash::RWLock l(&partitions_rw_, false); - if (sync_slave_partitions_.find(p_info) == sync_slave_partitions_.end()) { - return Status::NotFound("Sync Slave partition " + p_info.ToString()); - } - *repl_state = sync_slave_partitions_[p_info]->State(); - return Status::OK(); -} - Status PikaReplicaManager::SendMetaSyncRequest() { Status s; - int now = time(NULL); - if (now - last_meta_sync_timestamp_ >= PIKA_META_SYNC_MAX_WAIT_TIME) { + if (time(nullptr) - g_pika_server->GetMetaSyncTimestamp() >= PIKA_META_SYNC_MAX_WAIT_TIME || + g_pika_server->IsFirstMetaSync()) { s = pika_repl_client_->SendMetaSync(); if (s.ok()) { - last_meta_sync_timestamp_ = now; + g_pika_server->UpdateMetaSyncTimestamp(); + g_pika_server->SetFirstMetaSync(false); } } return s; } -Status PikaReplicaManager::SendRemoveSlaveNodeRequest(const std::string& table, - uint32_t partition_id) { - slash::Status s; - slash::RWLock l(&partitions_rw_, false); - PartitionInfo p_info(table, partition_id); - if (sync_slave_partitions_.find(p_info) == sync_slave_partitions_.end()) { - return Status::NotFound("Sync Slave partition " + p_info.ToString()); +Status PikaReplicaManager::SendRemoveSlaveNodeRequest(const std::string& db) { + pstd::Status s; + std::shared_lock l(dbs_rw_); + DBInfo p_info(db); + if (sync_slave_dbs_.find(p_info) == sync_slave_dbs_.end()) { + return Status::NotFound("Sync Slave DB " + p_info.ToString()); } else { - std::shared_ptr s_partition = sync_slave_partitions_[p_info]; - s = pika_repl_client_->SendRemoveSlaveNode(s_partition->MasterIp(), - s_partition->MasterPort(), table, partition_id, s_partition->LocalIp()); + std::shared_ptr s_db = sync_slave_dbs_[p_info]; + s = pika_repl_client_->SendRemoveSlaveNode(s_db->MasterIp(), s_db->MasterPort(), db, s_db->LocalIp()); if (s.ok()) { - s_partition->Deactivate(); + s_db->SetReplState(ReplState::kDBNoConnect); } } if (s.ok()) { - LOG(INFO) << "SlaveNode (" << table << ":" << partition_id - << "), stop sync success"; + LOG(INFO) << "SlaveNode (" << db << ", stop sync success"; } else { - LOG(WARNING) << "SlaveNode (" << table << ":" << partition_id - << "), stop sync faild, " << s.ToString(); + LOG(WARNING) << "SlaveNode (" << db << ", stop sync faild, " << s.ToString(); } return s; } -Status PikaReplicaManager::SendPartitionTrySyncRequest( - const std::string& table_name, size_t partition_id) { +Status PikaReplicaManager::SendTrySyncRequest(const std::string& db_name) { BinlogOffset boffset; - if (!g_pika_server->GetTablePartitionBinlogOffset( - table_name, partition_id, &boffset)) { - LOG(WARNING) << "Partition: " << table_name << ":" << partition_id - << ", Get partition binlog offset failed"; - return Status::Corruption("Partition get binlog offset error"); + if (!g_pika_server->GetDBBinlogOffset(db_name, &boffset)) { + LOG(WARNING) << "DB: " << db_name << ", Get DB binlog offset failed"; + return Status::Corruption("DB get binlog offset error"); } - std::shared_ptr slave_partition = - GetSyncSlavePartitionByName(PartitionInfo(table_name, partition_id)); - if (!slave_partition) { - LOG(WARNING) << "Slave Partition: " << table_name << ":" << partition_id - << ", NotFound"; - return Status::Corruption("Slave Partition not found"); + std::shared_ptr slave_db = GetSyncSlaveDBByName(DBInfo(db_name)); + if (!slave_db) { + LOG(WARNING) << "Slave DB: " << db_name << ", NotFound"; + return Status::Corruption("Slave DB not found"); } - Status status = pika_repl_client_->SendPartitionTrySync(slave_partition->MasterIp(), - slave_partition->MasterPort(), - table_name, partition_id, boffset, - slave_partition->LocalIp()); + Status status = + pika_repl_client_->SendTrySync(slave_db->MasterIp(), slave_db->MasterPort(), db_name, + boffset, slave_db->LocalIp()); - Status s; if (status.ok()) { - s = g_pika_rm->SetSlaveReplState(PartitionInfo(table_name, partition_id), ReplState::kWaitReply); + slave_db->SetReplState(ReplState::kWaitReply); } else { - s = g_pika_rm->SetSlaveReplState(PartitionInfo(table_name, partition_id), ReplState::kError); - LOG(WARNING) << "SendPartitionTrySyncRequest failed " << status.ToString(); - } - if (!s.ok()) { - LOG(WARNING) << s.ToString(); + slave_db->SetReplState(ReplState::kError); + LOG(WARNING) << "SendDBTrySyncRequest failed " << status.ToString(); } return status; } -static bool already_dbsync = false; -Status PikaReplicaManager::SendPartitionDBSyncRequest( - const std::string& table_name, size_t partition_id) { - if (!already_dbsync) { - already_dbsync = true; - } else { - LOG(FATAL) << "we only allow one DBSync action to avoid passing duplicate commands to target Redis multiple times"; - } - +Status PikaReplicaManager::SendDBSyncRequest(const std::string& db_name) { BinlogOffset boffset; - if (!g_pika_server->GetTablePartitionBinlogOffset( - table_name, partition_id, &boffset)) { - LOG(WARNING) << "Partition: " << table_name << ":" << partition_id - << ", Get partition binlog offset failed"; - return Status::Corruption("Partition get binlog offset error"); + if (!g_pika_server->GetDBBinlogOffset(db_name, &boffset)) { + LOG(WARNING) << "DB: " << db_name << ", Get DB binlog offset failed"; + return Status::Corruption("DB get binlog offset error"); } - std::shared_ptr partition = - g_pika_server->GetTablePartitionById(table_name, partition_id); - if (!partition) { - LOG(WARNING) << "Partition: " << table_name << ":" << partition_id - << ", NotFound"; - return Status::Corruption("Partition not found"); + std::shared_ptr db = g_pika_server->GetDB(db_name); + if (!db) { + LOG(WARNING) << "DB: " << db_name << " NotFound"; + return Status::Corruption("DB not found"); } - partition->PrepareRsync(); + db->PrepareRsync(); - std::shared_ptr slave_partition = - GetSyncSlavePartitionByName(PartitionInfo(table_name, partition_id)); - if (!slave_partition) { - LOG(WARNING) << "Slave Partition: " << table_name << ":" << partition_id - << ", NotFound"; - return Status::Corruption("Slave Partition not found"); + std::shared_ptr slave_db = GetSyncSlaveDBByName(DBInfo(db_name)); + if (!slave_db) { + LOG(WARNING) << "Slave DB: " << db_name << ", NotFound"; + return Status::Corruption("Slave DB not found"); } - Status status = pika_repl_client_->SendPartitionDBSync(slave_partition->MasterIp(), - slave_partition->MasterPort(), - table_name, partition_id, boffset, - slave_partition->LocalIp()); + Status status = pika_repl_client_->SendDBSync(slave_db->MasterIp(), slave_db->MasterPort(), + db_name, boffset, slave_db->LocalIp()); Status s; if (status.ok()) { - s = g_pika_rm->SetSlaveReplState(PartitionInfo(table_name, partition_id), ReplState::kWaitReply); + slave_db->SetReplState(ReplState::kWaitReply); } else { - LOG(WARNING) << "SendPartitionDbSync failed " << status.ToString(); - s = g_pika_rm->SetSlaveReplState(PartitionInfo(table_name, partition_id), ReplState::kError); + slave_db->SetReplState(ReplState::kError); + LOG(WARNING) << "SendDBSync failed " << status.ToString(); } if (!s.ok()) { LOG(WARNING) << s.ToString(); @@ -1397,208 +943,82 @@ Status PikaReplicaManager::SendPartitionDBSyncRequest( return status; } -Status PikaReplicaManager::SendPartitionBinlogSyncAckRequest( - const std::string& table, uint32_t partition_id, - const BinlogOffset& ack_start, const BinlogOffset& ack_end, - bool is_first_send) { - std::shared_ptr slave_partition = - GetSyncSlavePartitionByName(PartitionInfo(table, partition_id)); - if (!slave_partition) { - LOG(WARNING) << "Slave Partition: " << table << ":" << partition_id - << ", NotFound"; - return Status::Corruption("Slave Partition not found"); +Status PikaReplicaManager::SendBinlogSyncAckRequest(const std::string& db, const LogOffset& ack_start, + const LogOffset& ack_end, bool is_first_send) { + std::shared_ptr slave_db = GetSyncSlaveDBByName(DBInfo(db)); + if (!slave_db) { + LOG(WARNING) << "Slave DB: " << db << ":, NotFound"; + return Status::Corruption("Slave DB not found"); } - return pika_repl_client_->SendPartitionBinlogSync( - slave_partition->MasterIp(), slave_partition->MasterPort(), - table, partition_id, ack_start, ack_end, slave_partition->LocalIp(), - is_first_send); + return pika_repl_client_->SendBinlogSync(slave_db->MasterIp(), slave_db->MasterPort(), db, + ack_start, ack_end, slave_db->LocalIp(), is_first_send); } Status PikaReplicaManager::CloseReplClientConn(const std::string& ip, int32_t port) { return pika_repl_client_->Close(ip, port); } -Status PikaReplicaManager::SendSlaveBinlogChipsRequest(const std::string& ip, - int port, +Status PikaReplicaManager::SendSlaveBinlogChipsRequest(const std::string& ip, int port, const std::vector& tasks) { return pika_repl_server_->SendSlaveBinlogChips(ip, port, tasks); } -std::shared_ptr -PikaReplicaManager::GetSyncMasterPartitionByName(const PartitionInfo& p_info) { - slash::RWLock l(&partitions_rw_, false); - if (sync_master_partitions_.find(p_info) == sync_master_partitions_.end()) { +std::shared_ptr PikaReplicaManager::GetSyncMasterDBByName(const DBInfo& p_info) { + std::shared_lock l(dbs_rw_); + if (sync_master_dbs_.find(p_info) == sync_master_dbs_.end()) { return nullptr; } - return sync_master_partitions_[p_info]; + return sync_master_dbs_[p_info]; } -Status PikaReplicaManager::GetSafetyPurgeBinlogFromSMP(const std::string& table_name, - uint32_t partition_id, - std::string* safety_purge) { - std::shared_ptr master_partition = - GetSyncMasterPartitionByName(PartitionInfo(table_name, partition_id)); - if (!master_partition) { - LOG(WARNING) << "Sync Master Partition: " << table_name << ":" << partition_id - << ", NotFound"; - return Status::NotFound("SyncMasterPartition NotFound"); - } else { - return master_partition->GetSafetyPurgeBinlog(safety_purge); - } -} - -bool PikaReplicaManager::BinlogCloudPurgeFromSMP(const std::string& table_name, - uint32_t partition_id, uint32_t index) { - std::shared_ptr master_partition = - GetSyncMasterPartitionByName(PartitionInfo(table_name, partition_id)); - if (!master_partition) { - LOG(WARNING) << "Sync Master Partition: " << table_name << ":" << partition_id - << ", NotFound"; - return false; - } else { - return master_partition->BinlogCloudPurge(index); - } -} - -std::shared_ptr -PikaReplicaManager::GetSyncSlavePartitionByName(const PartitionInfo& p_info) { - slash::RWLock l(&partitions_rw_, false); - if (sync_slave_partitions_.find(p_info) == sync_slave_partitions_.end()) { +std::shared_ptr PikaReplicaManager::GetSyncSlaveDBByName(const DBInfo& p_info) { + std::shared_lock l(dbs_rw_); + if (sync_slave_dbs_.find(p_info) == sync_slave_dbs_.end()) { return nullptr; } - return sync_slave_partitions_[p_info]; + return sync_slave_dbs_[p_info]; } -Status PikaReplicaManager::RunSyncSlavePartitionStateMachine() { - slash::RWLock l(&partitions_rw_, false); - for (const auto& item : sync_slave_partitions_) { - PartitionInfo p_info = item.first; - std::shared_ptr s_partition = item.second; - if (s_partition->State() == ReplState::kTryConnect) { - SendPartitionTrySyncRequest(p_info.table_name_, p_info.partition_id_); - } else if (s_partition->State() == ReplState::kTryDBSync) { - SendPartitionDBSyncRequest(p_info.table_name_, p_info.partition_id_); - } else if (s_partition->State() == ReplState::kWaitReply) { +Status PikaReplicaManager::RunSyncSlaveDBStateMachine() { + std::shared_lock l(dbs_rw_); + for (const auto& item : sync_slave_dbs_) { + DBInfo p_info = item.first; + std::shared_ptr s_db = item.second; + if (s_db->State() == ReplState::kTryConnect) { + SendTrySyncRequest(p_info.db_name_); + } else if (s_db->State() == ReplState::kTryDBSync) { + SendDBSyncRequest(p_info.db_name_); + } else if (s_db->State() == ReplState::kWaitReply) { continue; - } else if (s_partition->State() == ReplState::kWaitDBSync) { - std::shared_ptr partition = - g_pika_server->GetTablePartitionById( - p_info.table_name_, p_info.partition_id_); - if (partition) { - partition->TryUpdateMasterOffset(); + } else if (s_db->State() == ReplState::kWaitDBSync) { + Status s = s_db->ActivateRsync(); + if (!s.ok()) { + LOG(WARNING) << "Slave DB: " << s_db->DBName() << " rsync failed! full synchronization will be retried later"; + continue; + } + + std::shared_ptr db = + g_pika_server->GetDB(p_info.db_name_); + if (db) { + if (s_db->IsRsyncExited()) { + db->TryUpdateMasterOffset(); + } } else { - LOG(WARNING) << "Partition not found, Table Name: " - << p_info.table_name_ << " Partition Id: " << p_info.partition_id_; + LOG(WARNING) << "DB not found, DB Name: " << p_info.db_name_; } - } else if (s_partition->State() == ReplState::kConnected - || s_partition->State() == ReplState::kNoConnect) { + } else if (s_db->State() == ReplState::kConnected || s_db->State() == ReplState::kNoConnect || + s_db->State() == ReplState::kDBNoConnect) { continue; } } return Status::OK(); } -Status PikaReplicaManager::AddSyncPartitionSanityCheck(const std::set& p_infos) { - slash::RWLock l(&partitions_rw_, false); - for (const auto& p_info : p_infos) { - if (sync_master_partitions_.find(p_info) != sync_master_partitions_.end() - || sync_slave_partitions_.find(p_info) != sync_slave_partitions_.end()) { - LOG(WARNING) << "sync partition: " << p_info.ToString() << " exist"; - return Status::Corruption("sync partition " + p_info.ToString() - + " exist"); - } - } - return Status::OK(); -} - -Status PikaReplicaManager::AddSyncPartition( - const std::set& p_infos) { - Status s = AddSyncPartitionSanityCheck(p_infos); - if (!s.ok()) { - return s; - } - - slash::RWLock l(&partitions_rw_, true); - for (const auto& p_info : p_infos) { - sync_master_partitions_[p_info] = - std::make_shared(p_info.table_name_, - p_info.partition_id_); - sync_slave_partitions_[p_info] = - std::make_shared(p_info.table_name_, - p_info.partition_id_); - } - return Status::OK(); -} - -Status PikaReplicaManager::RemoveSyncPartitionSanityCheck( - const std::set& p_infos) { - slash::RWLock l(&partitions_rw_, false); - for (const auto& p_info : p_infos) { - if (sync_master_partitions_.find(p_info) == sync_master_partitions_.end() - || sync_slave_partitions_.find(p_info) == sync_slave_partitions_.end()) { - LOG(WARNING) << "sync partition: " << p_info.ToString() << " not found"; - return Status::Corruption("sync partition " + p_info.ToString() - + " not found"); - } - - if (sync_master_partitions_[p_info]->GetNumberOfSlaveNode() != 0) { - LOG(WARNING) << "sync master partition: " << p_info.ToString() - << " in syncing"; - return Status::Corruption("sync master partition " + p_info.ToString() - + " in syncing"); - } - - ReplState state = sync_slave_partitions_[p_info]->State(); - if (state != kNoConnect && state != kError) { - LOG(WARNING) << "sync slave partition: " << p_info.ToString() - << " in " << ReplStateMsg[state] + " state"; - return Status::Corruption("sync slave partition " + p_info.ToString() - + " in " + ReplStateMsg[state] + " state"); - } - } - return Status::OK(); -} - -Status PikaReplicaManager::RemoveSyncPartition( - const std::set& p_infos) { - Status s = RemoveSyncPartitionSanityCheck(p_infos); - if (!s.ok()) { - return s; - } - - slash::RWLock l(&partitions_rw_, true); - for (const auto& p_info : p_infos) { - sync_master_partitions_.erase(p_info); - sync_slave_partitions_.erase(p_info); - } - return Status::OK(); -} - -void PikaReplicaManager::FindCompleteReplica(std::vector* replica) { - std::unordered_map replica_slotnum; - slash::RWLock l(&partitions_rw_, false); - for (auto& iter : sync_master_partitions_) { - std::vector names; - iter.second->GetValidSlaveNames(&names); - for (auto& name : names) { - if (replica_slotnum.find(name) == replica_slotnum.end()) { - replica_slotnum[name] = 0; - } - replica_slotnum[name]++; - } - } - for (auto item : replica_slotnum) { - if (item.second == sync_master_partitions_.size()) { - replica->push_back(item.first); - } - } -} - void PikaReplicaManager::FindCommonMaster(std::string* master) { - slash::RWLock l(&partitions_rw_, false); + std::shared_lock l(dbs_rw_); std::string common_master_ip; int common_master_port = 0; - for (auto& iter : sync_slave_partitions_) { + for (auto& iter : sync_slave_dbs_) { if (iter.second->State() != kConnected) { return; } @@ -1618,17 +1038,19 @@ void PikaReplicaManager::FindCommonMaster(std::string* master) { } void PikaReplicaManager::RmStatus(std::string* info) { - slash::RWLock l(&partitions_rw_, false); + std::shared_lock l(dbs_rw_); std::stringstream tmp_stream; - tmp_stream << "Master partition(" << sync_master_partitions_.size() << "):" << "\r\n"; - for (auto& iter : sync_master_partitions_) { - tmp_stream << " Partition " << iter.second->SyncPartitionInfo().ToString() - << "\r\n" << iter.second->ToStringStatus() << "\r\n"; - } - tmp_stream << "Slave partition(" << sync_slave_partitions_.size() << "):" << "\r\n"; - for (auto& iter : sync_slave_partitions_) { - tmp_stream << " Partition " << iter.second->SyncPartitionInfo().ToString() - << "\r\n" << iter.second->ToStringStatus() << "\r\n"; + tmp_stream << "Master DB(" << sync_master_dbs_.size() << "):" + << "\r\n"; + for (auto& iter : sync_master_dbs_) { + tmp_stream << " DB " << iter.second->SyncDBInfo().ToString() << "\r\n" + << iter.second->ToStringStatus() << "\r\n"; + } + tmp_stream << "Slave DB(" << sync_slave_dbs_.size() << "):" + << "\r\n"; + for (auto& iter : sync_slave_dbs_) { + tmp_stream << " DB " << iter.second->SyncDBInfo().ToString() << "\r\n" + << iter.second->ToStringStatus() << "\r\n"; } info->append(tmp_stream.str()); } diff --git a/tools/pika_migrate/src/pika_rsync_service.cc b/tools/pika_migrate/src/pika_rsync_service.cc index 00f3e70ee4..5071a1cfc1 100644 --- a/tools/pika_migrate/src/pika_rsync_service.cc +++ b/tools/pika_migrate/src/pika_rsync_service.cc @@ -7,30 +7,33 @@ #include #include +#include -#include "slash/include/env.h" -#include "slash/include/rsync.h" +#include "pstd/include/env.h" +#include "pstd/include/rsync.h" -#include "include/pika_define.h" #include "include/pika_conf.h" +#include "include/pika_define.h" -extern PikaConf *g_pika_conf; +#ifdef __FreeBSD__ +# include +#endif -PikaRsyncService::PikaRsyncService(const std::string& raw_path, - const int port) - : raw_path_(raw_path), port_(port) { +extern std::unique_ptr g_pika_conf; + +PikaRsyncService::PikaRsyncService(const std::string& raw_path, const int port) : raw_path_(raw_path), port_(port) { if (raw_path_.back() != '/') { raw_path_ += "/"; } - rsync_path_ = raw_path_ + slash::kRsyncSubDir + "/"; - pid_path_ = rsync_path_ + slash::kRsyncPidFile; + rsync_path_ = raw_path_ + pstd::kRsyncSubDir + "/"; + pid_path_ = rsync_path_ + pstd::kRsyncPidFile; } PikaRsyncService::~PikaRsyncService() { if (!CheckRsyncAlive()) { - slash::DeleteDirIfExist(rsync_path_); + pstd::DeleteDirIfExist(rsync_path_); } else { - slash::StopRsync(raw_path_); + pstd::StopRsync(raw_path_); } LOG(INFO) << "PikaRsyncService exit!!!"; } @@ -43,13 +46,13 @@ int PikaRsyncService::StartRsync() { } else { auth = g_pika_conf->masterauth(); } - ret = slash::StartRsync(raw_path_, kDBSyncModule, "0.0.0.0", port_, auth); - if (ret != 0) { + ret = pstd::StartRsync(raw_path_, kDBSyncModule, "0.0.0.0", port_, auth); + if (ret) { LOG(WARNING) << "Failed to start rsync, path:" << raw_path_ << " error : " << ret; return -1; } ret = CreateSecretFile(); - if (ret != 0) { + if (ret) { LOG(WARNING) << "Failed to create secret file"; return -1; } @@ -69,15 +72,16 @@ int PikaRsyncService::CreateSecretFile() { if (g_pika_conf->db_sync_path().back() != '/') { secret_file_path += "/"; } - secret_file_path += slash::kRsyncSubDir + "/"; - slash::CreatePath(secret_file_path); + secret_file_path += pstd::kRsyncSubDir + "/"; + pstd::CreatePath(secret_file_path); secret_file_path += kPikaSecretFile; std::string auth; - if (g_pika_conf->requirepass().empty()) { + // unify rsync auth with masterauth + if (g_pika_conf->masterauth().empty()) { auth = kDefaultRsyncAuth; } else { - auth = g_pika_conf->requirepass(); + auth = g_pika_conf->masterauth(); } std::ofstream secret_stream(secret_file_path.c_str()); @@ -96,10 +100,6 @@ int PikaRsyncService::CreateSecretFile() { return ret; } -bool PikaRsyncService::CheckRsyncAlive() { - return slash::FileExists(pid_path_); -} +bool PikaRsyncService::CheckRsyncAlive() { return pstd::FileExists(pid_path_); } -int PikaRsyncService::ListenPort() { - return port_; -} +int PikaRsyncService::ListenPort() { return port_; } diff --git a/tools/pika_migrate/src/pika_sender.cc b/tools/pika_migrate/src/pika_sender.cc index a2109b22e8..4936eaf692 100644 --- a/tools/pika_migrate/src/pika_sender.cc +++ b/tools/pika_migrate/src/pika_sender.cc @@ -7,11 +7,8 @@ #include -#include "slash/include/xdebug.h" - PikaSender::PikaSender(std::string ip, int64_t port, std::string password): cli_(NULL), - signal_(&keys_mutex_), ip_(ip), port_(port), password_(password), @@ -24,23 +21,22 @@ PikaSender::~PikaSender() { } int PikaSender::QueueSize() { - slash::MutexLock l(&keys_mutex_); + std::lock_guard lock(keys_queue_mutex_); return keys_queue_.size(); } void PikaSender::Stop() { - should_exit_ = true; - keys_mutex_.Lock(); - signal_.Signal(); - keys_mutex_.Unlock(); + should_exit_.store(true); + wsignal_.notify_all(); + rsignal_.notify_all(); } void PikaSender::ConnectRedis() { while (cli_ == NULL) { // Connect to redis - cli_ = pink::NewRedisCli(); + cli_ = net::NewRedisCli(); cli_->set_connect_timeout(1000); - slash::Status s = cli_->Connect(ip_, port_); + pstd::Status s = cli_->Connect(ip_, port_); if (!s.ok()) { delete cli_; cli_ = NULL; @@ -51,13 +47,13 @@ void PikaSender::ConnectRedis() { // Authentication if (!password_.empty()) { - pink::RedisCmdArgsType argv, resp; + net::RedisCmdArgsType argv, resp; std::string cmd; argv.push_back("AUTH"); argv.push_back(password_); - pink::SerializeRedisCommand(argv, &cmd); - slash::Status s = cli_->Send(&cmd); + net::SerializeRedisCommand(argv, &cmd); + pstd::Status s = cli_->Send(&cmd); if (s.ok()) { s = cli_->Recv(&resp); @@ -79,12 +75,12 @@ void PikaSender::ConnectRedis() { } } else { // If forget to input password - pink::RedisCmdArgsType argv, resp; + net::RedisCmdArgsType argv, resp; std::string cmd; argv.push_back("PING"); - pink::SerializeRedisCommand(argv, &cmd); - slash::Status s = cli_->Send(&cmd); + net::SerializeRedisCommand(argv, &cmd); + pstd::Status s = cli_->Send(&cmd); if (s.ok()) { s = cli_->Recv(&resp); @@ -110,38 +106,32 @@ void PikaSender::ConnectRedis() { } void PikaSender::LoadKey(const std::string &key) { - keys_mutex_.Lock(); - if (keys_queue_.size() < 100000) { - keys_queue_.push(key); - signal_.Signal(); - keys_mutex_.Unlock(); - } else { - while (keys_queue_.size() > 100000 && !should_exit_) { - signal_.TimedWait(100); - } + std::unique_lock lock(signal_mutex); + wsignal_.wait(lock, [this]() { return keys_queue_.size() < 100000 || should_exit_; }); + if(!should_exit_) { + std::lock_guard lock(keys_queue_mutex_); keys_queue_.push(key); - signal_.Signal(); - keys_mutex_.Unlock(); - } + rsignal_.notify_one(); + } } void PikaSender::SendCommand(std::string &command, const std::string &key) { // Send command - slash::Status s = cli_->Send(&command); + pstd::Status s = cli_->Send(&command); if (!s.ok()) { elements_--; LoadKey(key); cli_->Close(); - log_info("%s", s.ToString().data()); + LOG(INFO) << s.ToString().data(); delete cli_; cli_ = NULL; ConnectRedis(); + }else { + cli_->Recv(nullptr); } } void *PikaSender::ThreadMain() { - log_info("Start sender thread..."); - int cnt = 0; if (cli_ == NULL) { ConnectRedis(); @@ -150,32 +140,22 @@ void *PikaSender::ThreadMain() { while (!should_exit_ || QueueSize() != 0) { std::string command; - keys_mutex_.Lock(); - while (keys_queue_.size() == 0 && !should_exit_) { - signal_.TimedWait(200); - } - keys_mutex_.Unlock(); + std::unique_lock lock(signal_mutex); + rsignal_.wait(lock, [this]() { return !QueueSize() == 0 || should_exit_; }); if (QueueSize() == 0 && should_exit_) { - // if (should_exit_) { return NULL; } - - keys_mutex_.Lock(); - std::string key = keys_queue_.front(); - elements_++; - keys_queue_.pop(); - keys_mutex_.Unlock(); - - SendCommand(key, key); - cnt++; - if (cnt >= 200) { - for(; cnt > 0; cnt--) { - cli_->Recv(NULL); - } + lock.unlock(); + + std::string key; + { + std::lock_guard lock(keys_queue_mutex_); + key = keys_queue_.front(); + elements_++; + keys_queue_.pop(); } - } - for(; cnt > 0; cnt--) { - cli_->Recv(NULL); + wsignal_.notify_one(); + SendCommand(key, key); } if (cli_) { @@ -183,7 +163,6 @@ void *PikaSender::ThreadMain() { delete cli_; cli_ = NULL; } - log_info("PikaSender thread complete"); return NULL; } diff --git a/tools/pika_migrate/src/pika_server.cc b/tools/pika_migrate/src/pika_server.cc index c03b06a5ab..6659eda421 100644 --- a/tools/pika_migrate/src/pika_server.cc +++ b/tools/pika_migrate/src/pika_server.cc @@ -3,79 +3,63 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#include "include/pika_server.h" - -#include -#include -#include -#include -#include -#include #include +#include #include +#include +#include +#include +#include +#include +#include "net/include/net_cli.h" +#include "net/include/net_interfaces.h" +#include "net/include/net_stats.h" +#include "net/include/redis_cli.h" +#include "pstd/include/env.h" +#include "pstd/include/rsync.h" +#include "pstd/include/pika_codis_slot.h" -#include "slash/include/env.h" -#include "slash/include/rsync.h" -#include "pink/include/pink_cli.h" -#include "pink/include/redis_cli.h" -#include "pink/include/bg_thread.h" - +#include "include/pika_cmd_table_manager.h" +#include "include/pika_dispatch_thread.h" +#include "include/pika_instant.h" +#include "include/pika_monotonic_time.h" #include "include/pika_rm.h" #include "include/pika_server.h" #include "include/pika_sender.h" #include "include/migrator_thread.h" -#include "include/pika_dispatch_thread.h" -#include "include/pika_cmd_table_manager.h" +using pstd::Status; extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; -extern PikaCmdTableManager* g_pika_cmd_table_manager; +extern std::unique_ptr g_pika_rm; +extern std::unique_ptr g_pika_cmd_table_manager; +extern std::unique_ptr g_network_statistic; +// QUEUE_SIZE_THRESHOLD_PERCENTAGE is used to represent a percentage value and should be within the range of 0 to 100. +const size_t QUEUE_SIZE_THRESHOLD_PERCENTAGE = 75; void DoPurgeDir(void* arg) { - std::string path = *(static_cast(arg)); - LOG(INFO) << "Delete dir: " << path << " start"; - slash::DeleteDir(path); - LOG(INFO) << "Delete dir: " << path << " done"; - delete static_cast(arg); -} - -void DoDBSync(void* arg) { - DBSyncArg* dbsa = reinterpret_cast(arg); - PikaServer* const ps = dbsa->p; - ps->DbSyncSendFile(dbsa->ip, dbsa->port, - dbsa->table_name, dbsa->partition_id); - delete dbsa; -} - -PikaServer::PikaServer() : - exit_(false), - slot_state_(INFREE), - have_scheduled_crontask_(false), - last_check_compact_time_({0, 0}), - master_ip_(""), - master_port_(0), - repl_state_(PIKA_REPL_NO_CONNECT), - role_(PIKA_ROLE_SINGLE), - loop_partition_state_machine_(false), - force_full_sync_(false), - slowlog_entry_id_(0) { - - //Init server ip host + std::unique_ptr path(static_cast(arg)); + LOG(INFO) << "Delete dir: " << *path << " start"; + pstd::DeleteDir(*path); + LOG(INFO) << "Delete dir: " << *path << " done"; +} + + +PikaServer::PikaServer() + : exit_(false), + slow_cmd_thread_pool_flag_(g_pika_conf->slow_cmd_pool()), + last_check_compact_time_({0, 0}), + last_check_resume_time_({0, 0}), + repl_state_(PIKA_REPL_NO_CONNECT), + role_(PIKA_ROLE_SINGLE) { + // Init server ip host if (!ServerInit()) { LOG(FATAL) << "ServerInit iotcl error"; } - InitBlackwidowOptions(); - - pthread_rwlockattr_t tables_rw_attr; - pthread_rwlockattr_init(&tables_rw_attr); - pthread_rwlockattr_setkind_np(&tables_rw_attr, - PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); - pthread_rwlock_init(&tables_rw_, &tables_rw_attr); + InitStorageOptions(); // Create thread - worker_num_ = std::min(g_pika_conf->thread_num(), - PIKA_MAX_WORKER_THREAD_NUM); + worker_num_ = std::min(g_pika_conf->thread_num(), PIKA_MAX_WORKER_THREAD_NUM); std::set ips; if (g_pika_conf->network_interface().empty()) { @@ -87,144 +71,100 @@ PikaServer::PikaServer() : // We estimate the queue size int worker_queue_limit = g_pika_conf->maxclients() / worker_num_ + 100; LOG(INFO) << "Worker queue limit is " << worker_queue_limit; - pika_dispatch_thread_ = new PikaDispatchThread(ips, port_, worker_num_, 3000, - worker_queue_limit); - pika_monitor_thread_ = new PikaMonitorThread(); - pika_rsync_service_ = new PikaRsyncService(g_pika_conf->db_sync_path(), - g_pika_conf->port() + kPortShiftRSync); - pika_pubsub_thread_ = new pink::PubSubThread(); - pika_auxiliary_thread_ = new PikaAuxiliaryThread(); - pika_thread_pool_ = new pink::ThreadPool(g_pika_conf->thread_pool_size(), 100000); - - // Create redis sender - for (int i = 0; i < g_pika_conf->redis_sender_num(); i++) { - redis_senders_.emplace_back( - new RedisSender(int(i), - g_pika_conf->target_redis_host(), - g_pika_conf->target_redis_port(), - g_pika_conf->target_redis_pwd())); + for_each(ips.begin(), ips.end(), [](auto& ip) { LOG(WARNING) << ip; }); + pika_dispatch_thread_ = std::make_unique(ips, port_, worker_num_, 3000, worker_queue_limit, + g_pika_conf->max_conn_rbuf_size()); + pika_rsync_service_ = + std::make_unique(g_pika_conf->db_sync_path(), g_pika_conf->port() + kPortShiftRSync); + // TODO: remove pika_rsync_service_,reuse pika_rsync_service_ port + rsync_server_ = std::make_unique(ips, port_ + kPortShiftRsync2); + pika_pubsub_thread_ = std::make_unique(); + pika_auxiliary_thread_ = std::make_unique(); + pika_migrate_ = std::make_unique(); + pika_migrate_thread_ = std::make_unique(); + + pika_client_processor_ = std::make_unique(g_pika_conf->thread_pool_size(), 100000); + pika_slow_cmd_thread_pool_ = std::make_unique(g_pika_conf->slow_cmd_thread_pool_size(), 100000); + pika_admin_cmd_thread_pool_ = std::make_unique(g_pika_conf->admin_thread_pool_size(), 100000); + instant_ = std::make_unique(); + exit_mutex_.lock(); + int64_t lastsave = GetLastSaveTime(g_pika_conf->bgsave_path()); + UpdateLastSave(lastsave); + + // init role + std::string slaveof = g_pika_conf->slaveof(); + if (!slaveof.empty()) { + auto sep = static_cast(slaveof.find(':')); + std::string master_ip = slaveof.substr(0, sep); + int32_t master_port = std::stoi(slaveof.substr(sep + 1)); + if ((master_ip == "127.0.0.1" || master_ip == host_) && master_port == port_) { + LOG(FATAL) << "you will slaveof yourself as the config file, please check"; + } else { + SetMaster(master_ip, master_port); + } } - pthread_rwlock_init(&state_protector_, NULL); - pthread_rwlock_init(&slowlog_protector_, NULL); + // Create redis sender + for (int i = 0; i < g_pika_conf->redis_sender_num(); ++i) { + redis_senders_.emplace_back(std::make_unique(int(i), + g_pika_conf->target_redis_host(), + g_pika_conf->target_redis_port(), + g_pika_conf->target_redis_pwd())); + } + + acl_ = std::make_unique<::Acl>(); + SetSlowCmdThreadPoolFlag(g_pika_conf->slow_cmd_pool()); + bgsave_thread_.set_thread_name("PikaServer::bgsave_thread_"); + purge_thread_.set_thread_name("PikaServer::purge_thread_"); + bgslots_cleanup_thread_.set_thread_name("PikaServer::bgslots_cleanup_thread_"); + common_bg_thread_.set_thread_name("PikaServer::common_bg_thread_"); + key_scan_thread_.set_thread_name("PikaServer::key_scan_thread_"); } PikaServer::~PikaServer() { - - // DispatchThread will use queue of worker thread, - // so we need to delete dispatch before worker. - pika_thread_pool_->stop_thread_pool(); - delete pika_dispatch_thread_; - + rsync_server_->Stop(); + // DispatchThread will use queue of worker thread + // so we need to Stop dispatch before worker. + pika_dispatch_thread_->StopThread(); + pika_client_processor_->Stop(); + pika_slow_cmd_thread_pool_->stop_thread_pool(); + pika_admin_cmd_thread_pool_->stop_thread_pool(); { - slash::MutexLock l(&slave_mutex_); - std::vector::iterator iter = slaves_.begin(); + std::lock_guard l(slave_mutex_); + auto iter = slaves_.begin(); while (iter != slaves_.end()) { - iter = slaves_.erase(iter); + iter = slaves_.erase(iter); LOG(INFO) << "Delete slave success"; } } + bgsave_thread_.StopThread(); + key_scan_thread_.StopThread(); + pika_migrate_thread_->StopThread(); - delete pika_pubsub_thread_; - delete pika_auxiliary_thread_; - delete pika_rsync_service_; - delete pika_thread_pool_; - delete pika_monitor_thread_; - - for (size_t i = 0; i < redis_senders_.size(); i++) { + for (size_t i = 0; i < redis_senders_.size(); ++i) { redis_senders_[i]->Stop(); } - // wait thread exit - sleep(1); - for (size_t i = 0; i < redis_senders_.size(); i++) { - delete redis_senders_[i]; - } redis_senders_.clear(); - - bgsave_thread_.StopThread(); - key_scan_thread_.StopThread(); - - tables_.clear(); - - pthread_rwlock_destroy(&tables_rw_); - pthread_rwlock_destroy(&state_protector_); - pthread_rwlock_destroy(&slowlog_protector_); + dbs_.clear(); LOG(INFO) << "PikaServer " << pthread_self() << " exit!!!"; } bool PikaServer::ServerInit() { std::string network_interface = g_pika_conf->network_interface(); - - if (network_interface == "") { - - std::ifstream routeFile("/proc/net/route", std::ios_base::in); - if (!routeFile.good()) - { - return false; - } - - std::string line; - std::vector tokens; - while(std::getline(routeFile, line)) - { - std::istringstream stream(line); - std::copy(std::istream_iterator(stream), - std::istream_iterator(), - std::back_inserter >(tokens)); - - // the default interface is the one having the second - // field, Destination, set to "00000000" - if ((tokens.size() >= 2) && (tokens[1] == std::string("00000000"))) - { - network_interface = tokens[0]; - break; - } - - tokens.clear(); - } - routeFile.close(); - } - LOG(INFO) << "Using Networker Interface: " << network_interface; - - struct ifaddrs * ifAddrStruct = NULL; - struct ifaddrs * ifa = NULL; - void * tmpAddrPtr = NULL; - - if (getifaddrs(&ifAddrStruct) == -1) { - LOG(FATAL) << "getifaddrs failed: " << strerror(errno); + if (network_interface.empty()) { + network_interface = GetDefaultInterface(); } - for (ifa = ifAddrStruct; ifa != NULL; ifa = ifa->ifa_next) { - if (ifa->ifa_addr == NULL) { - continue; - } - if (ifa ->ifa_addr->sa_family==AF_INET) { // Check it is - // a valid IPv4 address - tmpAddrPtr = &((struct sockaddr_in *)ifa->ifa_addr)->sin_addr; - char addressBuffer[INET_ADDRSTRLEN]; - inet_ntop(AF_INET, tmpAddrPtr, addressBuffer, INET_ADDRSTRLEN); - if (std::string(ifa->ifa_name) == network_interface) { - host_ = addressBuffer; - break; - } - } else if (ifa->ifa_addr->sa_family==AF_INET6) { // Check it is - // a valid IPv6 address - tmpAddrPtr = &((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr; - char addressBuffer[INET6_ADDRSTRLEN]; - inet_ntop(AF_INET6, tmpAddrPtr, addressBuffer, INET6_ADDRSTRLEN); - if (std::string(ifa->ifa_name) == network_interface) { - host_ = addressBuffer; - break; - } - } + if (network_interface.empty()) { + LOG(FATAL) << "Can't get Networker Interface"; + return false; } - if (ifAddrStruct != NULL) { - freeifaddrs(ifAddrStruct); - } - if (ifa == NULL) { - LOG(FATAL) << "error network interface: " << network_interface << ", please check!"; + host_ = GetIpByInterface(network_interface); + if (host_.empty()) { + LOG(FATAL) << "can't get host ip for " << network_interface; + return false; } port_ = g_pika_conf->port(); @@ -235,136 +175,151 @@ bool PikaServer::ServerInit() { void PikaServer::Start() { int ret = 0; // start rsync first, rocksdb opened fd will not appear in this fork + // TODO: temporarily disable rsync server + /* ret = pika_rsync_service_->StartRsync(); if (0 != ret) { - tables_.clear(); - LOG(FATAL) << "Start Rsync Error: bind port " +std::to_string(pika_rsync_service_->ListenPort()) + " failed" - << ", Listen on this port to receive Master FullSync Data"; + dbs_.clear(); + LOG(FATAL) << "Start Rsync Error: bind port " + std::to_string(pika_rsync_service_->ListenPort()) + " failed" + << ", Listen on this port to receive Master FullSync Data"; } + */ - // We Init Table Struct Before Start The following thread - InitTableStruct(); + ret = pika_client_processor_->Start(); + if (ret != net::kSuccess) { + dbs_.clear(); + LOG(FATAL) << "Start PikaClientProcessor Error: " << ret + << (ret == net::kCreateThreadError ? ": create thread error " : ": other error"); + } - ret = pika_thread_pool_->start_thread_pool(); - if (ret != pink::kSuccess) { - tables_.clear(); - LOG(FATAL) << "Start ThreadPool Error: " << ret << (ret == pink::kCreateThreadError ? ": create thread error " : ": other error"); + ret = pika_slow_cmd_thread_pool_->start_thread_pool(); + if (ret != net::kSuccess) { + dbs_.clear(); + LOG(FATAL) << "Start PikaLowLevelThreadPool Error: " << ret + << (ret == net::kCreateThreadError ? ": create thread error " : ": other error"); + } + ret = pika_admin_cmd_thread_pool_->start_thread_pool(); + if (ret != net::kSuccess) { + dbs_.clear(); + LOG(FATAL) << "Start PikaAdminThreadPool Error: " << ret + << (ret == net::kCreateThreadError ? ": create thread error " : ": other error"); } ret = pika_dispatch_thread_->StartThread(); - if (ret != pink::kSuccess) { - tables_.clear(); - LOG(FATAL) << "Start Dispatch Error: " << ret << (ret == pink::kBindError ? ": bind port " + std::to_string(port_) + " conflict" - : ": other error") << ", Listen on this port to handle the connected redis client"; + if (ret != net::kSuccess) { + dbs_.clear(); + LOG(FATAL) << "Start Dispatch Error: " << ret + << (ret == net::kBindError ? ": bind port " + std::to_string(port_) + " conflict" : ": other error") + << ", Listen on this port to handle the connected redis client"; } ret = pika_pubsub_thread_->StartThread(); - if (ret != pink::kSuccess) { - tables_.clear(); - LOG(FATAL) << "Start Pubsub Error: " << ret << (ret == pink::kBindError ? ": bind port conflict" : ": other error"); + if (ret != net::kSuccess) { + dbs_.clear(); + LOG(FATAL) << "Start Pubsub Error: " << ret << (ret == net::kBindError ? ": bind port conflict" : ": other error"); } ret = pika_auxiliary_thread_->StartThread(); - if (ret != pink::kSuccess) { - tables_.clear(); - LOG(FATAL) << "Start Auxiliary Thread Error: " << ret << (ret == pink::kCreateThreadError ? ": create thread error " : ": other error"); + if (ret != net::kSuccess) { + dbs_.clear(); + LOG(FATAL) << "Start Auxiliary Thread Error: " << ret + << (ret == net::kCreateThreadError ? ": create thread error " : ": other error"); } - for (size_t i = 0; i < redis_senders_.size(); i++) { + + for (size_t i = 0; i < redis_senders_.size(); ++i) { ret = redis_senders_[i]->StartThread(); - if (ret != pink::kSuccess) { - LOG(FATAL) << "Start Redis Sender Thread Error: " << ret << (ret == pink::kCreateThreadError ? ": create thread error " : ": other error"); + if (ret != net::kSuccess) { + dbs_.clear(); + LOG(FATAL) << "Start RedisSender Error: " << ret + << (ret == net::kCreateThreadError ? ": create thread error " : ": other error"); } } time(&start_time_s_); - - std::string slaveof = g_pika_conf->slaveof(); - if (!slaveof.empty()) { - int32_t sep = slaveof.find(":"); - std::string master_ip = slaveof.substr(0, sep); - int32_t master_port = std::stoi(slaveof.substr(sep+1)); - if ((master_ip == "127.0.0.1" || master_ip == host_) && master_port == port_) { - LOG(FATAL) << "you will slaveof yourself as the config file, please check"; - } else { - SetMaster(master_ip, master_port); - } - } - LOG(INFO) << "Pika Server going to start"; + rsync_server_->Start(); while (!exit_) { DoTimingTask(); - // wake up every 10 second - int try_num = 0; - while (!exit_ && try_num++ < 10) { - sleep(1); + // wake up every 5 seconds + if (!exit_ && exit_mutex_.try_lock_for(std::chrono::seconds(5))) { + exit_mutex_.unlock(); } } LOG(INFO) << "Goodbye..."; } +void PikaServer::SetSlowCmdThreadPoolFlag(bool flag) { + slow_cmd_thread_pool_flag_ = flag; + int ret = 0; + if (flag) { + ret = pika_slow_cmd_thread_pool_->start_thread_pool(); + if (ret != net::kSuccess) { + dbs_.clear(); + LOG(ERROR) << "Start PikaLowLevelThreadPool Error: " << ret + << (ret == net::kCreateThreadError ? ": create thread error " : ": other error"); + } + } else { + while (SlowCmdThreadPoolCurQueueSize() != 0) { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } + pika_slow_cmd_thread_pool_->stop_thread_pool(); + } +} + void PikaServer::Exit() { + g_pika_server->DisableCompact(); + exit_mutex_.unlock(); exit_ = true; } -std::string PikaServer::host() { - return host_; -} +std::string PikaServer::host() { return host_; } -int PikaServer::port() { - return port_; -} +int PikaServer::port() { return port_; } -time_t PikaServer::start_time_s() { - return start_time_s_; -} +time_t PikaServer::start_time_s() { return start_time_s_; } std::string PikaServer::master_ip() { - slash::RWLock(&state_protector_, false); + std::shared_lock l(state_protector_); return master_ip_; } int PikaServer::master_port() { - slash::RWLock(&state_protector_, false); + std::shared_lock l(state_protector_); return master_port_; } int PikaServer::role() { - slash::RWLock(&state_protector_, false); + std::shared_lock l(state_protector_); return role_; } -bool PikaServer::readonly(const std::string& table_name, const std::string& key) { - slash::RWLock(&state_protector_, false); - if ((role_ & PIKA_ROLE_SLAVE) - && g_pika_conf->slave_read_only()) { - return true; +bool PikaServer::leader_protected_mode() { + std::shared_lock l(state_protector_); + return leader_protected_mode_; +} + +void PikaServer::CheckLeaderProtectedMode() { + if (!leader_protected_mode()) { + return; } - if (!g_pika_conf->classic_mode()) { - std::shared_ptr
table = GetTable(table_name); - if (table == nullptr) { - // swallow this error will process later - return false; - } - uint32_t index = g_pika_cmd_table_manager->DistributeKey( - key, table->PartitionNum()); - int role = 0; - Status s = g_pika_rm->CheckPartitionRole(table_name, index, &role); - if (!s.ok()) { - // swallow this error will process later - return false; - } - if (role & PIKA_ROLE_SLAVE) { - return true; - } + if (g_pika_rm->CheckMasterSyncFinished()) { + LOG(INFO) << "Master finish sync and commit binlog"; + + std::lock_guard l(state_protector_); + leader_protected_mode_ = false; } - return false; +} + +bool PikaServer::readonly(const std::string& db_name) { + std::shared_lock l(state_protector_); + return ((role_ & PIKA_ROLE_SLAVE) != 0) && g_pika_conf->slave_read_only(); } int PikaServer::repl_state() { - slash::RWLock(&state_protector_, false); + std::shared_lock l(state_protector_); return repl_state_; } std::string PikaServer::repl_state_str() { - slash::RWLock(&state_protector_, false); + std::shared_lock l(state_protector_); switch (repl_state_) { case PIKA_REPL_NO_CONNECT: return "no connect"; @@ -379,13 +334,9 @@ std::string PikaServer::repl_state_str() { } } -bool PikaServer::force_full_sync() { - return force_full_sync_; -} +bool PikaServer::force_full_sync() { return force_full_sync_; } -void PikaServer::SetForceFullSync(bool v) { - force_full_sync_ = v; -} +void PikaServer::SetForceFullSync(bool v) { force_full_sync_ = v; } void PikaServer::SetDispatchQueueLimit(int queue_limit) { rlimit limit; @@ -397,63 +348,55 @@ void PikaServer::SetDispatchQueueLimit(int queue_limit) { limit.rlim_cur = maxfiles; limit.rlim_max = maxfiles; if (setrlimit(RLIMIT_NOFILE, &limit) != -1) { - LOG(WARNING) << "your 'limit -n ' of " << old_limit << " is not enough for Redis to start. pika have successfully reconfig it to " << limit.rlim_cur; + LOG(WARNING) << "your 'limit -n ' of " << old_limit + << " is not enough for Redis to start. pika have successfully reconfig it to " << limit.rlim_cur; } else { - LOG(FATAL) << "your 'limit -n ' of " << old_limit << " is not enough for Redis to start. pika can not reconfig it(" << strerror(errno) << "), do it by yourself"; + LOG(FATAL) << "your 'limit -n ' of " << old_limit + << " is not enough for Redis to start. pika can not reconfig it(" << strerror(errno) + << "), do it by yourself"; } } pika_dispatch_thread_->SetQueueLimit(queue_limit); } -blackwidow::BlackwidowOptions PikaServer::bw_options() { - return bw_options_; +storage::StorageOptions PikaServer::storage_options() { + std::shared_lock rwl(storage_options_rw_); + return storage_options_; } -void PikaServer::InitTableStruct() { +void PikaServer::InitDBStruct() { std::string db_path = g_pika_conf->db_path(); std::string log_path = g_pika_conf->log_path(); - std::vector table_structs = g_pika_conf->table_structs(); - slash::RWLock rwl(&tables_rw_, true); - for (const auto& table : table_structs) { - std::string name = table.table_name; - uint32_t num = table.partition_num; - std::shared_ptr
table_ptr = std::make_shared
( - name, num, db_path, log_path); - table_ptr->AddPartitions(table.partition_ids); - tables_.emplace(name, table_ptr); + std::vector db_structs = g_pika_conf->db_structs(); + std::lock_guard rwl(dbs_rw_); + for (const auto& db : db_structs) { + std::string name = db.db_name; + std::shared_ptr db_ptr = std::make_shared(name, db_path, log_path); + db_ptr->Init(); + dbs_.emplace(name, db_ptr); } } -std::shared_ptr
PikaServer::GetTable(const std::string &table_name) { - slash::RWLock l(&tables_rw_, false); - auto iter = tables_.find(table_name); - return (iter == tables_.end()) ? NULL : iter->second; -} - -std::set PikaServer::GetTablePartitionIds(const std::string& table_name) { - std::set empty; - slash::RWLock l(&tables_rw_, false); - auto iter = tables_.find(table_name); - return (iter == tables_.end()) ? empty : iter->second->GetPartitionIds(); +std::shared_ptr PikaServer::GetDB(const std::string& db_name) { + std::shared_lock l(dbs_rw_); + auto iter = dbs_.find(db_name); + return (iter == dbs_.end()) ? nullptr : iter->second; } bool PikaServer::IsBgSaving() { - slash::RWLock table_rwl(&tables_rw_, false); - for (const auto& table_item : tables_) { - slash::RWLock partition_rwl(&table_item.second->partitions_rw_, false); - for (const auto& patition_item : table_item.second->partitions_) { - if (patition_item.second->IsBgSaving()) { - return true; - } + std::shared_lock l(dbs_rw_); + for (const auto& db_item : dbs_) { + if (db_item.second->IsBgSaving()) { + return true; } } return false; } bool PikaServer::IsKeyScaning() { - slash::RWLock table_rwl(&tables_rw_, false); - for (const auto& table_item : tables_) { - if (table_item.second->IsKeyScaning()) { + std::shared_lock l(dbs_rw_); + for (const auto& db_item : dbs_) { + if (db_item.second->IsKeyScaning()) { return true; } } @@ -461,197 +404,153 @@ bool PikaServer::IsKeyScaning() { } bool PikaServer::IsCompacting() { - slash::RWLock table_rwl(&tables_rw_, false); - for (const auto& table_item : tables_) { - slash::RWLock partition_rwl(&table_item.second->partitions_rw_, false); - for (const auto& partition_item : table_item.second->partitions_) { - partition_item.second->DbRWLockReader(); - std::string task_type = partition_item.second->db()->GetCurrentTaskType(); - partition_item.second->DbRWUnLock(); - if (strcasecmp(task_type.data(), "no")) { - return true; - } + std::shared_lock db_rwl(dbs_rw_); + for (const auto& db_item : dbs_) { + db_item.second->DBLockShared(); + std::string task_type = db_item.second->storage()->GetCurrentTaskType(); + db_item.second->DBUnlockShared(); + if (strcasecmp(task_type.data(), "no") != 0) { + return true; } } return false; } -bool PikaServer::IsTableExist(const std::string& table_name) { - return GetTable(table_name) ? true : false; -} +bool PikaServer::IsDBExist(const std::string& db_name) { return static_cast(GetDB(db_name)); } -bool PikaServer::IsTablePartitionExist(const std::string& table_name, - uint32_t partition_id) { - std::shared_ptr
table_ptr = GetTable(table_name); - if (!table_ptr) { - return false; - } else { - return table_ptr->GetPartitionById(partition_id) ? true : false; - } +bool PikaServer::IsDBBinlogIoError(const std::string& db_name) { + std::shared_ptr db = GetDB(db_name); + return db ? db->IsBinlogIoError() : true; } -bool PikaServer::IsCommandSupport(const std::string& command) { - if (g_pika_conf->classic_mode()) { - return true; - } else { - std::string cmd = command; - slash::StringToLower(cmd); - return !ShardingModeNotSupportCommands.count(cmd); +std::set PikaServer::GetAllDBName() { + std::set dbs; + std::shared_lock l(dbs_rw_); + for (const auto& db_item : dbs_) { + dbs.insert(db_item.first); } + return dbs; } -bool PikaServer::IsTableBinlogIoError(const std::string& table_name) { - std::shared_ptr
table = GetTable(table_name); - return table ? table->IsBinlogIoError() : true; -} - -// If no collection of specified tables is given, we execute task in all tables -Status PikaServer::DoSameThingSpecificTable(const TaskType& type, const std::set& tables) { - slash::RWLock rwl(&tables_rw_, false); - for (const auto& table_item : tables_) { - if (!tables.empty() - && tables.find(table_item.first) == tables.end()) { +Status PikaServer::DoSameThingSpecificDB(const std::set& dbs, const TaskArg& arg) { + std::shared_lock rwl(dbs_rw_); + for (const auto& db_item : dbs_) { + if (dbs.find(db_item.first) == dbs.end()) { continue; - } else { - switch (type) { - case TaskType::kCompactAll: - table_item.second->Compact(blackwidow::DataType::kAll); - break; - case TaskType::kCompactStrings: - table_item.second->Compact(blackwidow::DataType::kStrings); - break; - case TaskType::kCompactHashes: - table_item.second->Compact(blackwidow::DataType::kHashes); - break; - case TaskType::kCompactSets: - table_item.second->Compact(blackwidow::DataType::kSets); - break; - case TaskType::kCompactZSets: - table_item.second->Compact(blackwidow::DataType::kZSets); - break; - case TaskType::kCompactList: - table_item.second->Compact(blackwidow::DataType::kLists); - break; - case TaskType::kStartKeyScan: - table_item.second->KeyScan(); - break; - case TaskType::kStopKeyScan: - table_item.second->StopKeyScan(); - break; - case TaskType::kBgSave: - table_item.second->BgSaveTable(); - break; - default: - break; - } + } + switch (arg.type) { + case TaskType::kCompactAll: + db_item.second->Compact(storage::DataType::kAll); + break; + case TaskType::kStartKeyScan: + db_item.second->KeyScan(); + break; + case TaskType::kStopKeyScan: + db_item.second->StopKeyScan(); + break; + case TaskType::kBgSave: + db_item.second->BgSaveDB(); + break; + case TaskType::kCompactRangeAll: + db_item.second->CompactRange(storage::DataType::kAll, arg.argv[0], arg.argv[1]); + break; + default: + break; } } return Status::OK(); } -void PikaServer::PreparePartitionTrySync() { - slash::RWLock rwl(&tables_rw_, false); - ReplState state = force_full_sync_ ? - ReplState::kTryDBSync : ReplState::kTryConnect; - for (const auto& table_item : tables_) { - for (const auto& partition_item : table_item.second->partitions_) { - Status s = g_pika_rm->ActivateSyncSlavePartition( - RmNode(g_pika_server->master_ip(), - g_pika_server->master_port(), - table_item.second->GetTableName(), - partition_item.second->GetPartitionId()), state); - if (!s.ok()) { - LOG(WARNING) << s.ToString(); - } +void PikaServer::PrepareDBTrySync() { + std::shared_lock rwl(dbs_rw_); + ReplState state = force_full_sync_ ? ReplState::kTryDBSync : ReplState::kTryConnect; + for (const auto& db_item : dbs_) { + Status s = g_pika_rm->ActivateSyncSlaveDB( + RmNode(g_pika_server->master_ip(), g_pika_server->master_port(), db_item.second->GetDBName()), state); + if (!s.ok()) { + LOG(WARNING) << s.ToString(); } } force_full_sync_ = false; - loop_partition_state_machine_ = true; LOG(INFO) << "Mark try connect finish"; } -void PikaServer::PartitionSetMaxCacheStatisticKeys(uint32_t max_cache_statistic_keys) { - slash::RWLock rwl(&tables_rw_, false); - for (const auto& table_item : tables_) { - for (const auto& partition_item : table_item.second->partitions_) { - partition_item.second->DbRWLockReader(); - partition_item.second->db()->SetMaxCacheStatisticKeys(max_cache_statistic_keys); - partition_item.second->DbRWUnLock(); - } +void PikaServer::DBSetMaxCacheStatisticKeys(uint32_t max_cache_statistic_keys) { + std::shared_lock rwl(dbs_rw_); + for (const auto& db_item : dbs_) { + db_item.second->DBLockShared(); + db_item.second->storage()->SetMaxCacheStatisticKeys(max_cache_statistic_keys); + db_item.second->DBUnlockShared(); } } -void PikaServer::PartitionSetSmallCompactionThreshold(uint32_t small_compaction_threshold) { - slash::RWLock rwl(&tables_rw_, false); - for (const auto& table_item : tables_) { - for (const auto& partition_item : table_item.second->partitions_) { - partition_item.second->DbRWLockReader(); - partition_item.second->db()->SetSmallCompactionThreshold(small_compaction_threshold); - partition_item.second->DbRWUnLock(); - } +void PikaServer::DBSetSmallCompactionThreshold(uint32_t small_compaction_threshold) { + std::shared_lock rwl(dbs_rw_); + for (const auto& db_item : dbs_) { + db_item.second->DBLockShared(); + db_item.second->storage()->SetSmallCompactionThreshold(small_compaction_threshold); + db_item.second->DBUnlockShared(); + } +} + +void PikaServer::DBSetSmallCompactionDurationThreshold(uint32_t small_compaction_duration_threshold) { + std::shared_lock rwl(dbs_rw_); + for (const auto& db_item : dbs_) { + db_item.second->DBLockShared(); + db_item.second->storage()->SetSmallCompactionDurationThreshold(small_compaction_duration_threshold); + db_item.second->DBUnlockShared(); } } -bool PikaServer::GetTablePartitionBinlogOffset(const std::string& table_name, - uint32_t partition_id, - BinlogOffset* const boffset) { - std::shared_ptr partition = GetTablePartitionById(table_name, partition_id); - if (!partition) { +bool PikaServer::GetDBBinlogOffset(const std::string& db_name, BinlogOffset* const boffset) { + std::shared_ptr db = g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name)); + if (!db) { return false; - } else { - return partition->GetBinlogOffset(boffset); - } -} - -// Only use in classic mode -std::shared_ptr PikaServer::GetPartitionByDbName(const std::string& db_name) { - std::shared_ptr
table = GetTable(db_name); - return table ? table->GetPartitionById(0) : NULL; -} - -std::shared_ptr PikaServer::GetTablePartitionById( - const std::string& table_name, - uint32_t partition_id) { - std::shared_ptr
table = GetTable(table_name); - return table ? table->GetPartitionById(partition_id) : NULL; -} - -std::shared_ptr PikaServer::GetTablePartitionByKey( - const std::string& table_name, - const std::string& key) { - std::shared_ptr
table = GetTable(table_name); - return table ? table->GetPartitionByKey(key) : NULL; -} - -Status PikaServer::DoSameThingEveryPartition(const TaskType& type) { - slash::RWLock rwl(&tables_rw_, false); - for (const auto& table_item : tables_) { - for (const auto& partition_item : table_item.second->partitions_) { - switch (type) { - case TaskType::kResetReplState: - { - Status s = g_pika_rm->SetSlaveReplState( - PartitionInfo(table_item.second->GetTableName(), - partition_item.second->GetPartitionId()), - ReplState::kNoConnect); - if (!s.ok()) { - LOG(WARNING) << s.ToString(); - } - break; - } - case TaskType::kPurgeLog: - partition_item.second->PurgeLogs(); - break; - default: + } + Status s = db->Logger()->GetProducerStatus(&(boffset->filenum), &(boffset->offset)); + return s.ok(); +} + +Status PikaServer::DoSameThingEveryDB(const TaskType& type) { + std::shared_lock rwl(dbs_rw_); + std::shared_ptr slave_db = nullptr; + for (const auto& db_item : dbs_) { + switch (type) { + case TaskType::kResetReplState: { + slave_db = g_pika_rm->GetSyncSlaveDBByName(DBInfo(db_item.second->GetDBName())); + if (!slave_db) { + LOG(WARNING) << "Slave DB: " << db_item.second->GetDBName() << ":" + << " Not Found"; + } + slave_db->SetReplState(ReplState::kNoConnect); + break; + } + case TaskType::kPurgeLog: { + std::shared_ptr db = g_pika_rm->GetSyncMasterDBByName( + DBInfo(db_item.second->GetDBName())); + if (!db) { + LOG(WARNING) << "DB: " << db_item.second->GetDBName() << ":" + << " Not Found."; break; + } + db->StableLogger()->PurgeStableLogs(); + break; } + case TaskType::kCompactAll: + db_item.second->Compact(storage::DataType::kAll); + break; + case TaskType::kCompactOldestOrBestDeleteRatioSst: + db_item.second->LongestNotCompactionSstCompact(storage::DataType::kAll); + break; + default: + break; } } return Status::OK(); } void PikaServer::BecomeMaster() { - slash::RWLock l(&state_protector_, true); + std::lock_guard l(state_protector_); role_ |= PIKA_ROLE_MASTER; } @@ -661,22 +560,20 @@ void PikaServer::DeleteSlave(int fd) { bool is_find = false; int slave_num = -1; { - slash::MutexLock l(&slave_mutex_); - std::vector::iterator iter = slaves_.begin(); + std::lock_guard l(slave_mutex_); + auto iter = slaves_.begin(); while (iter != slaves_.end()) { if (iter->conn_fd == fd) { ip = iter->ip; port = iter->port; is_find = true; - g_pika_rm->LostConnection(iter->ip, iter->port); - g_pika_rm->DropItemInWriteQueue(iter->ip, iter->port); LOG(INFO) << "Delete Slave Success, ip_port: " << iter->ip << ":" << iter->port; slaves_.erase(iter); break; } iter++; } - slave_num = slaves_.size(); + slave_num = static_cast(slaves_.size()); } if (is_find) { @@ -685,31 +582,29 @@ void PikaServer::DeleteSlave(int fd) { } if (slave_num == 0) { - slash::RWLock l(&state_protector_, true); + std::lock_guard l(state_protector_); role_ &= ~PIKA_ROLE_MASTER; + leader_protected_mode_ = false; // explicitly cancel protected mode } } int32_t PikaServer::CountSyncSlaves() { - slash::MutexLock ldb(&db_sync_protector_); - return db_sync_slaves_.size(); -} - -int32_t PikaServer::GetShardingSlaveListString(std::string& slave_list_str) { - std::vector complete_replica; - g_pika_rm->FindCompleteReplica(&complete_replica); - std::stringstream tmp_stream; - size_t index = 0; - for (auto replica : complete_replica) { - std::string ip; - int port; - if(!slash::ParseIpPortString(replica, ip, port)) { - continue; + int32_t count = 0; + std::lock_guard l(slave_mutex_); + for (const auto& slave : slaves_) { + for (const auto& ts : slave.db_structs) { + SlaveState slave_state; + std::shared_ptr db = g_pika_rm->GetSyncMasterDBByName(DBInfo(ts.db_name)); + if (!db) { + continue; + } + Status s = db->GetSlaveState(slave.ip, slave.port, &slave_state); + if (s.ok() && slave_state == SlaveState::kSlaveDbSync) { + count++; + } } - tmp_stream << "slave" << index++ << ":ip=" << ip << ",port=" << port << "\r\n"; } - slave_list_str.assign(tmp_stream.str()); - return index; + return count; } int32_t PikaServer::GetSlaveListString(std::string& slave_list_str) { @@ -719,44 +614,48 @@ int32_t PikaServer::GetSlaveListString(std::string& slave_list_str) { BinlogOffset sent_slave_boffset; BinlogOffset acked_slave_boffset; std::stringstream tmp_stream; - slash::MutexLock l(&slave_mutex_); + std::lock_guard l(slave_mutex_); + std::shared_ptr master_db = nullptr; for (const auto& slave : slaves_) { - tmp_stream << "slave" << index++ << ":ip=" << slave.ip << ",port=" << slave.port << ",conn_fd=" << slave.conn_fd << ",lag="; - for (const auto& ts : slave.table_structs) { - for (size_t idx = 0; idx < ts.partition_num; ++idx) { - std::shared_ptr partition = GetTablePartitionById(ts.table_name, idx); - RmNode rm_node(slave.ip, slave.port, ts.table_name, idx); - Status s = g_pika_rm->GetSyncMasterPartitionSlaveState(rm_node, &slave_state); - if (s.ok() - && slave_state == SlaveState::kSlaveBinlogSync - && g_pika_rm->GetSyncBinlogStatus(rm_node, &sent_slave_boffset, &acked_slave_boffset).ok()) { - if (!partition || !partition->GetBinlogOffset(&master_boffset)) { - continue; - } else { - uint64_t lag = - (master_boffset.filenum - sent_slave_boffset.filenum) * g_pika_conf->binlog_file_size() - + (master_boffset.offset - sent_slave_boffset.offset); - tmp_stream << "(" << partition->GetPartitionName() << ":" << lag << ")"; - } + tmp_stream << "slave" << index++ << ":ip=" << slave.ip << ",port=" << slave.port << ",conn_fd=" << slave.conn_fd + << ",lag="; + for (const auto& ts : slave.db_structs) { + std::shared_ptr db = g_pika_rm->GetSyncMasterDBByName(DBInfo(ts.db_name)); + if (!db) { + LOG(WARNING) << "Sync Master DB: " << ts.db_name << ", NotFound"; + continue; + } + Status s = db->GetSlaveState(slave.ip, slave.port, &slave_state); + if (s.ok() && slave_state == SlaveState::kSlaveBinlogSync && + db->GetSlaveSyncBinlogInfo(slave.ip, slave.port, &sent_slave_boffset, &acked_slave_boffset).ok()) { + Status s = db->Logger()->GetProducerStatus(&(master_boffset.filenum), &(master_boffset.offset)); + if (!s.ok()) { + continue; } else { - tmp_stream << "(" << partition->GetPartitionName() << ":not syncing)"; + uint64_t lag = + static_cast((master_boffset.filenum - sent_slave_boffset.filenum)) * g_pika_conf->binlog_file_size() + + master_boffset.offset - sent_slave_boffset.offset; + tmp_stream << "(" << db->DBName() << ":" << lag << ")"; } + } else if (s.ok() && slave_state == SlaveState::kSlaveDbSync) { + tmp_stream << "(" << db->DBName() << ":full syncing)"; + } else { + tmp_stream << "(" << db->DBName() << ":not syncing)"; } } tmp_stream << "\r\n"; } slave_list_str.assign(tmp_stream.str()); - return index; + return static_cast(index); } // Try add Slave, return true if success, // return false when slave already exist -bool PikaServer::TryAddSlave(const std::string& ip, int64_t port, int fd, - const std::vector& table_structs) { - std::string ip_port = slash::IpPortString(ip, port); +bool PikaServer::TryAddSlave(const std::string& ip, int64_t port, int fd, const std::vector& db_structs) { + std::string ip_port = pstd::IpPortString(ip, static_cast(port)); - slash::MutexLock l(&slave_mutex_); - std::vector::iterator iter = slaves_.begin(); + std::lock_guard l(slave_mutex_); + auto iter = slaves_.begin(); while (iter != slaves_.end()) { if (iter->ip_port == ip_port) { LOG(WARNING) << "Slave Already Exist, ip_port: " << ip << ":" << port; @@ -770,37 +669,37 @@ bool PikaServer::TryAddSlave(const std::string& ip, int64_t port, int fd, SlaveItem s; s.ip_port = ip_port; s.ip = ip; - s.port = port; + s.port = static_cast(port); s.conn_fd = fd; s.stage = SLAVE_ITEM_STAGE_ONE; - s.table_structs = table_structs; - gettimeofday(&s.create_time, NULL); + s.db_structs = db_structs; + gettimeofday(&s.create_time, nullptr); slaves_.push_back(s); return true; } void PikaServer::SyncError() { - slash::RWLock l(&state_protector_, true); + std::lock_guard l(state_protector_); repl_state_ = PIKA_REPL_ERROR; LOG(WARNING) << "Sync error, set repl_state to PIKA_REPL_ERROR"; } void PikaServer::RemoveMaster() { { - slash::RWLock l(&state_protector_, true); + std::lock_guard l(state_protector_); repl_state_ = PIKA_REPL_NO_CONNECT; role_ &= ~PIKA_ROLE_SLAVE; - if (master_ip_ != "" && master_port_ != -1) { + if (!master_ip_.empty() && master_port_ != -1) { g_pika_rm->CloseReplClientConn(master_ip_, master_port_ + kPortShiftReplServer); - g_pika_rm->LostConnection(master_ip_, master_port_); - loop_partition_state_machine_ = false; + g_pika_rm->DeactivateSyncSlaveDB(master_ip_, master_port_); + UpdateMetaSyncTimestampWithoutLock(); LOG(INFO) << "Remove Master Success, ip_port: " << master_ip_ << ":" << master_port_; } master_ip_ = ""; master_port_ = -1; - DoSameThingEveryPartition(TaskType::kResetReplState); + DoSameThingEveryDB(TaskType::kResetReplState); } } @@ -808,8 +707,8 @@ bool PikaServer::SetMaster(std::string& master_ip, int master_port) { if (master_ip == "127.0.0.1") { master_ip = host_; } - slash::RWLock l(&state_protector_, true); - if ((role_ ^ PIKA_ROLE_SLAVE) && repl_state_ == PIKA_REPL_NO_CONNECT) { + std::lock_guard l(state_protector_); + if (((role_ ^ PIKA_ROLE_SLAVE) != 0) && repl_state_ == PIKA_REPL_NO_CONNECT) { master_ip_ = master_ip; master_port_ = master_port; role_ |= PIKA_ROLE_SLAVE; @@ -820,339 +719,273 @@ bool PikaServer::SetMaster(std::string& master_ip, int master_port) { } bool PikaServer::ShouldMetaSync() { - slash::RWLock l(&state_protector_, false); + std::shared_lock l(state_protector_); return repl_state_ == PIKA_REPL_SHOULD_META_SYNC; } void PikaServer::FinishMetaSync() { - slash::RWLock l(&state_protector_, true); + std::lock_guard l(state_protector_); assert(repl_state_ == PIKA_REPL_SHOULD_META_SYNC); repl_state_ = PIKA_REPL_META_SYNC_DONE; } bool PikaServer::MetaSyncDone() { - slash::RWLock l(&state_protector_, false); + std::shared_lock l(state_protector_); return repl_state_ == PIKA_REPL_META_SYNC_DONE; } void PikaServer::ResetMetaSyncStatus() { - slash::RWLock sp_l(&state_protector_, true); - if (role_ & PIKA_ROLE_SLAVE) { + std::lock_guard sp_l(state_protector_); + if ((role_ & PIKA_ROLE_SLAVE) != 0) { // not change by slaveof no one, so set repl_state = PIKA_REPL_SHOULD_META_SYNC, // continue to connect master repl_state_ = PIKA_REPL_SHOULD_META_SYNC; - loop_partition_state_machine_ = false; - DoSameThingEveryPartition(TaskType::kResetReplState); - } -} - -bool PikaServer::AllPartitionConnectSuccess() { - bool all_partition_connect_success = true; - slash::RWLock rwl(&tables_rw_, false); - for (const auto& table_item : tables_) { - for (const auto& partition_item : table_item.second->partitions_) { - ReplState repl_state; - Status s = g_pika_rm->GetSlaveReplState( - PartitionInfo(table_item.second->GetTableName(), - partition_item.second->GetPartitionId()), &repl_state); - if (!s.ok()) { - return false; - } - if (repl_state != ReplState::kConnected) { - all_partition_connect_success = false; - break; - } - } + DoSameThingEveryDB(TaskType::kResetReplState); + } +} + +int PikaServer::GetMetaSyncTimestamp() { + std::shared_lock sp_l(state_protector_); + return last_meta_sync_timestamp_; +} + +void PikaServer::UpdateMetaSyncTimestamp() { + std::lock_guard sp_l(state_protector_); + last_meta_sync_timestamp_ = static_cast(time(nullptr)); +} + +void PikaServer::UpdateMetaSyncTimestampWithoutLock() { + last_meta_sync_timestamp_ = static_cast(time(nullptr)); +} + +bool PikaServer::IsFirstMetaSync() { + std::shared_lock sp_l(state_protector_); + return first_meta_sync_; +} + +void PikaServer::SetFirstMetaSync(bool v) { + std::lock_guard sp_l(state_protector_); + first_meta_sync_ = v; +} + +void PikaServer::ScheduleClientPool(net::TaskFunc func, void* arg, bool is_slow_cmd, bool is_admin_cmd) { + if (is_slow_cmd && g_pika_conf->slow_cmd_pool()) { + pika_slow_cmd_thread_pool_->Schedule(func, arg); + return; + } + if (is_admin_cmd) { + pika_admin_cmd_thread_pool_->Schedule(func, arg); + return; } - return all_partition_connect_success; + pika_client_processor_->SchedulePool(func, arg); } -bool PikaServer::LoopPartitionStateMachine() { - slash::RWLock sp_l(&state_protector_, false); - return loop_partition_state_machine_; +size_t PikaServer::ClientProcessorThreadPoolCurQueueSize() { + if (!pika_client_processor_) { + return 0; + } + return pika_client_processor_->ThreadPoolCurQueueSize(); +} + +size_t PikaServer::ClientProcessorThreadPoolMaxQueueSize() { + if (!pika_client_processor_) { + return 0; + } + return pika_client_processor_->ThreadPoolMaxQueueSize(); } -void PikaServer::SetLoopPartitionStateMachine(bool need_loop) { - slash::RWLock sp_l(&state_protector_, true); - assert(repl_state_ == PIKA_REPL_META_SYNC_DONE); - loop_partition_state_machine_ = need_loop; +size_t PikaServer::SlowCmdThreadPoolCurQueueSize() { + if (!pika_slow_cmd_thread_pool_) { + return 0; + } + size_t cur_size = 0; + pika_slow_cmd_thread_pool_->cur_queue_size(&cur_size); + return cur_size; } -void PikaServer::Schedule(pink::TaskFunc func, void* arg) { - pika_thread_pool_->Schedule(func, arg); +size_t PikaServer::SlowCmdThreadPoolMaxQueueSize() { + if (!pika_slow_cmd_thread_pool_) { + return 0; + } + return pika_slow_cmd_thread_pool_->max_queue_size(); } -void PikaServer::BGSaveTaskSchedule(pink::TaskFunc func, void* arg) { +void PikaServer::BGSaveTaskSchedule(net::TaskFunc func, void* arg) { bgsave_thread_.StartThread(); bgsave_thread_.Schedule(func, arg); } -void PikaServer::PurgelogsTaskSchedule(pink::TaskFunc func, void* arg) { +void PikaServer::PurgelogsTaskSchedule(net::TaskFunc func, void* arg) { purge_thread_.StartThread(); purge_thread_.Schedule(func, arg); } void PikaServer::PurgeDir(const std::string& path) { - std::string* dir_path = new std::string(path); + auto dir_path = new std::string(path); PurgeDirTaskSchedule(&DoPurgeDir, static_cast(dir_path)); } + void PikaServer::PurgeDirTaskSchedule(void (*function)(void*), void* arg) { purge_thread_.StartThread(); purge_thread_.Schedule(function, arg); } -void PikaServer::DBSync(const std::string& ip, int port, - const std::string& table_name, - uint32_t partition_id) { - { - std::string task_index = - DbSyncTaskIndex(ip, port, table_name, partition_id); - slash::MutexLock ml(&db_sync_protector_); - if (db_sync_slaves_.find(task_index) != db_sync_slaves_.end()) { - return; - } - db_sync_slaves_.insert(task_index); - } - // Reuse the bgsave_thread_ - // Since we expect BgSave and DBSync execute serially - bgsave_thread_.StartThread(); - DBSyncArg* arg = new DBSyncArg(this, ip, port, table_name, partition_id); - bgsave_thread_.Schedule(&DoDBSync, reinterpret_cast(arg)); -} - -void PikaServer::TryDBSync(const std::string& ip, int port, - const std::string& table_name, - uint32_t partition_id, int32_t top) { - std::shared_ptr partition = - GetTablePartitionById(table_name, partition_id); - if (!partition) { - LOG(WARNING) << "Partition: " << partition->GetPartitionName() - << " Not Found, TryDBSync Failed"; - } else { - BgSaveInfo bgsave_info = partition->bgsave_info(); - std::string logger_filename = partition->logger()->filename; - if (slash::IsDir(bgsave_info.path) != 0 - || !slash::FileExists(NewFileName(logger_filename, bgsave_info.filenum)) - || top - bgsave_info.filenum > kDBSyncMaxGap) { - // Need Bgsave first - partition->BgSavePartition(); - } - DBSync(ip, port, table_name, partition_id); +pstd::Status PikaServer::GetDumpUUID(const std::string& db_name, std::string* snapshot_uuid) { + std::shared_ptr db = GetDB(db_name); + if (!db) { + LOG(WARNING) << "cannot find db for db_name " << db_name; + return pstd::Status::NotFound("db no found"); } + db->GetBgSaveUUID(snapshot_uuid); + return pstd::Status::OK(); } -void PikaServer::DbSyncSendFile(const std::string& ip, int port, - const std::string& table_name, - uint32_t partition_id) { - std::shared_ptr partition = GetTablePartitionById(table_name, partition_id); - if (!partition) { - LOG(WARNING) << "Partition: " << partition->GetPartitionName() - << " Not Found, DbSync send file Failed"; - return; +pstd::Status PikaServer::GetDumpMeta(const std::string& db_name, std::vector* fileNames, std::string* snapshot_uuid) { + std::shared_ptr db = GetDB(db_name); + if (!db) { + LOG(WARNING) << "cannot find db for db_name " << db_name; + return pstd::Status::NotFound("db no found"); } + db->GetBgSaveMetaData(fileNames, snapshot_uuid); + return pstd::Status::OK(); +} - BgSaveInfo bgsave_info = partition->bgsave_info(); - std::string bg_path = bgsave_info.path; - uint32_t binlog_filenum = bgsave_info.filenum; - uint64_t binlog_offset = bgsave_info.offset; - - // Get all files need to send - std::vector descendant; - int ret = 0; - LOG(INFO) << "Partition: " << partition->GetPartitionName() - << " Start Send files in " << bg_path << " to " << ip; - ret = slash::GetChildren(bg_path, descendant); - if (ret != 0) { - std::string ip_port = slash::IpPortString(ip, port); - slash::MutexLock ldb(&db_sync_protector_); - db_sync_slaves_.erase(ip_port); - LOG(WARNING) << "Partition: " << partition->GetPartitionName() - << " Get child directory when try to do sync failed, error: " << strerror(ret); +void PikaServer::TryDBSync(const std::string& ip, int port, const std::string& db_name, + int32_t top) { + std::shared_ptr db = GetDB(db_name); + if (!db) { + LOG(WARNING) << "can not find DB : " << db_name + << ", TryDBSync Failed"; return; } - - std::string local_path, target_path; - std::string remote_path = g_pika_conf->classic_mode() ? table_name : table_name + "/" + std::to_string(partition_id); - std::vector::const_iterator iter = descendant.begin(); - slash::RsyncRemote remote(ip, port, kDBSyncModule, g_pika_conf->db_sync_speed() * 1024); - std::string secret_file_path = g_pika_conf->db_sync_path(); - if (g_pika_conf->db_sync_path().back() != '/') { - secret_file_path += "/"; - } - secret_file_path += slash::kRsyncSubDir + "/" + kPikaSecretFile; - - for (; iter != descendant.end(); ++iter) { - local_path = bg_path + "/" + *iter; - target_path = remote_path + "/" + *iter; - - if (*iter == kBgsaveInfoFile) { - continue; - } - - if (slash::IsDir(local_path) == 0 && - local_path.back() != '/') { - local_path.push_back('/'); - target_path.push_back('/'); - } - - // We need specify the speed limit for every single file - ret = slash::RsyncSendFile(local_path, target_path, secret_file_path, remote); - if (0 != ret) { - LOG(WARNING) << "Partition: " << partition->GetPartitionName() - << " RSync send file failed! From: " << *iter - << ", To: " << target_path - << ", At: " << ip << ":" << port - << ", Error: " << ret; - break; - } - } - // Clear target path - slash::RsyncSendClearTarget(bg_path + "/strings", remote_path + "/strings", secret_file_path, remote); - slash::RsyncSendClearTarget(bg_path + "/hashes", remote_path + "/hashes", secret_file_path, remote); - slash::RsyncSendClearTarget(bg_path + "/lists", remote_path + "/lists", secret_file_path, remote); - slash::RsyncSendClearTarget(bg_path + "/sets", remote_path + "/sets", secret_file_path, remote); - slash::RsyncSendClearTarget(bg_path + "/zsets", remote_path + "/zsets", secret_file_path, remote); - - pink::PinkCli* cli = pink::NewRedisCli(); - std::string lip(host_); - if (cli->Connect(ip, port, "").ok()) { - struct sockaddr_in laddr; - socklen_t llen = sizeof(laddr); - getsockname(cli->fd(), (struct sockaddr*) &laddr, &llen); - lip = inet_ntoa(laddr.sin_addr); - cli->Close(); - delete cli; - } else { - LOG(WARNING) << "Rsync try connect slave rsync service error" - << ", slave rsync service(" << ip << ":" << port << ")"; - delete cli; - } - - // Send info file at last - if (0 == ret) { - // need to modify the IP addr in the info file - if (lip.compare(host_)) { - std::ofstream fix; - std::string fn = bg_path + "/" + kBgsaveInfoFile + "." + std::to_string(time(NULL)); - fix.open(fn, std::ios::in | std::ios::trunc); - if (fix.is_open()) { - fix << "0s\n" << lip << "\n" << port_ << "\n" << binlog_filenum << "\n" << binlog_offset << "\n"; - fix.close(); - } - ret = slash::RsyncSendFile(fn, remote_path + "/" + kBgsaveInfoFile, secret_file_path, remote); - slash::DeleteFile(fn); - if (ret != 0) { - LOG(WARNING) << "Partition: " << partition->GetPartitionName() << " Send Modified Info File Failed"; - } - } else if (0 != (ret = slash::RsyncSendFile(bg_path + "/" + kBgsaveInfoFile, remote_path + "/" + kBgsaveInfoFile, secret_file_path, remote))) { - LOG(WARNING) << "Partition: " << partition->GetPartitionName() << " Send Info File Failed"; - } - } - // remove slave - { - std::string task_index = - DbSyncTaskIndex(ip, port, table_name, partition_id); - slash::MutexLock ml(&db_sync_protector_); - db_sync_slaves_.erase(task_index); + std::shared_ptr sync_db = + g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name)); + if (!sync_db) { + LOG(WARNING) << "can not find DB: " << db_name + << ", TryDBSync Failed"; + return; } - - if (0 == ret) { - LOG(INFO) << "Partition: " << partition->GetPartitionName() << " RSync Send Files Success"; + BgSaveInfo bgsave_info = db->bgsave_info(); + std::string logger_filename = sync_db->Logger()->filename(); + if (pstd::IsDir(bgsave_info.path) != 0 || + !pstd::FileExists(NewFileName(logger_filename, bgsave_info.offset.b_offset.filenum)) || + static_cast(top) - static_cast(bgsave_info.offset.b_offset.filenum) > + static_cast(kDBSyncMaxGap)) { + // Need Bgsave first + db->BgSaveDB(); } } -std::string PikaServer::DbSyncTaskIndex(const std::string& ip, - int port, - const std::string& table_name, - uint32_t partition_id) { - char buf[256]; - snprintf(buf, sizeof(buf), "%s:%d_%s:%d", - ip.data(), port, table_name.data(), partition_id); - return buf; -} - -void PikaServer::KeyScanTaskSchedule(pink::TaskFunc func, void* arg) { +void PikaServer::KeyScanTaskSchedule(net::TaskFunc func, void* arg) { key_scan_thread_.StartThread(); key_scan_thread_.Schedule(func, arg); } void PikaServer::ClientKillAll() { pika_dispatch_thread_->ClientKillAll(); - pika_monitor_thread_->ThreadClientKill(); + pika_pubsub_thread_->NotifyCloseAllConns(); +} + +void PikaServer::ClientKillPubSub() { pika_pubsub_thread_->NotifyCloseAllConns(); +} + +void PikaServer::ClientKillAllNormal() { + pika_dispatch_thread_->ClientKillAll(); } -int PikaServer::ClientKill(const std::string &ip_port) { - if (pika_dispatch_thread_->ClientKill(ip_port) - || pika_monitor_thread_->ThreadClientKill(ip_port)) { +int PikaServer::ClientKill(const std::string& ip_port) { + if (pika_dispatch_thread_->ClientKill(ip_port)) { return 1; } return 0; } -int64_t PikaServer::ClientList(std::vector *clients) { +int64_t PikaServer::ClientList(std::vector* clients) { int64_t clients_num = 0; - clients_num += pika_dispatch_thread_->ThreadClientList(clients); - clients_num += pika_monitor_thread_->ThreadClientList(clients); + clients_num += static_cast(pika_dispatch_thread_->ThreadClientList(clients)); return clients_num; } -bool PikaServer::HasMonitorClients() { - return pika_monitor_thread_->HasMonitorClients(); +bool PikaServer::HasMonitorClients() const { + std::unique_lock lock(monitor_mutex_protector_); + return !pika_monitor_clients_.empty(); +} +bool PikaServer::ClientIsMonitor(const std::shared_ptr& client_ptr) const { + std::unique_lock lock(monitor_mutex_protector_); + return pika_monitor_clients_.count(client_ptr) != 0; } void PikaServer::AddMonitorMessage(const std::string& monitor_message) { - pika_monitor_thread_->AddMonitorMessage(monitor_message); + const std::string msg = "+" + monitor_message + "\r\n"; + + std::vector> clients; + + std::unique_lock lock(monitor_mutex_protector_); + clients.reserve(pika_monitor_clients_.size()); + for (auto it = pika_monitor_clients_.begin(); it != pika_monitor_clients_.end();) { + auto cli = (*it).lock(); + if (cli) { + clients.push_back(std::move(cli)); + ++it; + } else { + it = pika_monitor_clients_.erase(it); + } + } + for (const auto& cli : clients) { + cli->WriteResp(msg); + cli->SendReply(); + } + lock.unlock(); // SendReply without lock } -void PikaServer::AddMonitorClient(std::shared_ptr client_ptr) { - pika_monitor_thread_->AddMonitorClient(client_ptr); +void PikaServer::AddMonitorClient(const std::shared_ptr& client_ptr) { + if (client_ptr) { + std::unique_lock lock(monitor_mutex_protector_); + pika_monitor_clients_.insert(client_ptr); + } } void PikaServer::SlowlogTrim() { - pthread_rwlock_wrlock(&slowlog_protector_); + std::lock_guard l(slowlog_protector_); while (slowlog_list_.size() > static_cast(g_pika_conf->slowlog_max_len())) { slowlog_list_.pop_back(); } - pthread_rwlock_unlock(&slowlog_protector_); } void PikaServer::SlowlogReset() { - pthread_rwlock_wrlock(&slowlog_protector_); + std::lock_guard l(slowlog_protector_); slowlog_list_.clear(); - pthread_rwlock_unlock(&slowlog_protector_); } uint32_t PikaServer::SlowlogLen() { - RWLock l(&slowlog_protector_, false); + std::shared_lock l(slowlog_protector_); return slowlog_list_.size(); } void PikaServer::SlowlogObtain(int64_t number, std::vector* slowlogs) { - pthread_rwlock_rdlock(&slowlog_protector_); + std::shared_lock l(slowlog_protector_); slowlogs->clear(); - std::list::const_iterator iter = slowlog_list_.begin(); - while (number-- && iter != slowlog_list_.end()) { + auto iter = slowlog_list_.begin(); + while (((number--) != 0) && iter != slowlog_list_.end()) { slowlogs->push_back(*iter); iter++; } - pthread_rwlock_unlock(&slowlog_protector_); } -void PikaServer::SlowlogPushEntry(const PikaCmdArgsType& argv, int32_t time, int64_t duration) { +void PikaServer::SlowlogPushEntry(const PikaCmdArgsType& argv, int64_t time, int64_t duration) { SlowlogEntry entry; - uint32_t slargc = (argv.size() < SLOWLOG_ENTRY_MAX_ARGC) - ? argv.size() : SLOWLOG_ENTRY_MAX_ARGC; + uint32_t slargc = (argv.size() < SLOWLOG_ENTRY_MAX_ARGC) ? argv.size() : SLOWLOG_ENTRY_MAX_ARGC; for (uint32_t idx = 0; idx < slargc; ++idx) { if (slargc != argv.size() && idx == slargc - 1) { char buffer[32]; - sprintf(buffer, "... (%lu more arguments)", argv.size() - slargc + 1); + snprintf(buffer, sizeof(buffer), "... (%lu more arguments)", argv.size() - slargc + 1); entry.argv.push_back(std::string(buffer)); } else { if (argv[idx].size() > SLOWLOG_ENTRY_MAX_STRING) { char buffer[32]; - sprintf(buffer, "... (%lu more bytes)", argv[idx].size() - SLOWLOG_ENTRY_MAX_STRING); + snprintf(buffer, sizeof(buffer), "... (%lu more bytes)", argv[idx].size() - SLOWLOG_ENTRY_MAX_STRING); std::string suffix(buffer); std::string brief = argv[idx].substr(0, SLOWLOG_ENTRY_MAX_STRING); entry.argv.push_back(brief + suffix); @@ -1162,191 +995,134 @@ void PikaServer::SlowlogPushEntry(const PikaCmdArgsType& argv, int32_t time, int } } - pthread_rwlock_wrlock(&slowlog_protector_); - entry.id = slowlog_entry_id_++; - entry.start_time = time; - entry.duration = duration; - slowlog_list_.push_front(entry); - pthread_rwlock_unlock(&slowlog_protector_); + { + std::lock_guard lock(slowlog_protector_); + entry.id = static_cast(slowlog_entry_id_++); + entry.start_time = time; + entry.duration = duration; + slowlog_list_.push_front(entry); + slowlog_counter_++; + } SlowlogTrim(); } -void PikaServer::ResetStat() { - statistic_data_.accumulative_connections.store(0); - statistic_data_.thread_querynum.store(0); - statistic_data_.last_thread_querynum.store(0); +uint64_t PikaServer::SlowlogCount() { + std::shared_lock l(slowlog_protector_); + return slowlog_counter_; } -uint64_t PikaServer::ServerQueryNum() { - return statistic_data_.thread_querynum.load(); +void PikaServer::ResetStat() { + statistic_.server_stat.accumulative_connections.store(0); + statistic_.server_stat.qps.querynum.store(0); + statistic_.server_stat.qps.last_querynum.store(0); } -uint64_t PikaServer::ServerCurrentQps() { - return statistic_data_.last_sec_thread_querynum.load(); -} +uint64_t PikaServer::ServerQueryNum() { return statistic_.server_stat.qps.querynum.load(); } -uint64_t PikaServer::accumulative_connections() { - return statistic_data_.accumulative_connections.load(); -} +uint64_t PikaServer::ServerCurrentQps() { return statistic_.server_stat.qps.last_sec_querynum.load(); } -void PikaServer::incr_accumulative_connections() { - ++statistic_data_.accumulative_connections; -} +uint64_t PikaServer::accumulative_connections() { return statistic_.server_stat.accumulative_connections.load(); } + +long long PikaServer::ServerKeyspaceHits() { return statistic_.server_stat.keyspace_hits.load(); } +long long PikaServer::ServerKeyspaceMisses() { return statistic_.server_stat.keyspace_misses.load(); } + +void PikaServer::incr_accumulative_connections() { ++(statistic_.server_stat.accumulative_connections); } +void PikaServer::incr_server_keyspace_hits() { ++(statistic_.server_stat.keyspace_hits); } +void PikaServer::incr_server_keyspace_misses() { ++(statistic_.server_stat.keyspace_misses); } // only one thread invoke this right now void PikaServer::ResetLastSecQuerynum() { - uint64_t last_query = statistic_data_.last_thread_querynum.load(); - uint64_t cur_query = statistic_data_.thread_querynum.load(); - uint64_t last_time_us = statistic_data_.last_time_us.load(); - if (cur_query < last_query) { - cur_query = last_query; - } - uint64_t delta_query = cur_query - last_query; - uint64_t cur_time_us = slash::NowMicros(); - if (cur_time_us <= last_time_us) { - cur_time_us = last_time_us + 1; - } - uint64_t delta_time_us = cur_time_us - last_time_us; - statistic_data_.last_sec_thread_querynum.store(delta_query - * 1000000 / (delta_time_us)); - statistic_data_.last_thread_querynum.store(cur_query); - statistic_data_.last_time_us.store(cur_time_us); + statistic_.server_stat.qps.ResetLastSecQuerynum(); + statistic_.ResetDBLastSecQuerynum(); } -void PikaServer::UpdateQueryNumAndExecCountTable(const std::string& command) { +void PikaServer::UpdateQueryNumAndExecCountDB(const std::string& db_name, const std::string& command, bool is_write) { std::string cmd(command); - statistic_data_.thread_querynum++; - statistic_data_.exec_count_table[slash::StringToUpper(cmd)]++; + statistic_.server_stat.qps.querynum++; + statistic_.server_stat.exec_count_db[pstd::StringToUpper(cmd)]++; + statistic_.UpdateDBQps(db_name, command, is_write); } -std::unordered_map PikaServer::ServerExecCountTable() { - std::unordered_map res; - for (auto& cmd : statistic_data_.exec_count_table) { - res[cmd.first] = cmd.second.load(); - } - return res; +size_t PikaServer::NetInputBytes() { return g_network_statistic->NetInputBytes(); } + +size_t PikaServer::NetOutputBytes() { return g_network_statistic->NetOutputBytes(); } + +size_t PikaServer::NetReplInputBytes() { return g_network_statistic->NetReplInputBytes(); } + +size_t PikaServer::NetReplOutputBytes() { return g_network_statistic->NetReplOutputBytes(); } + +float PikaServer::InstantaneousInputKbps() { + return static_cast(g_pika_server->instant_->getInstantaneousMetric(STATS_METRIC_NET_INPUT)) / 1024.0f; } -int PikaServer::SendToPeer() { - return g_pika_rm->ConsumeWriteQueue(); +float PikaServer::InstantaneousOutputKbps() { + return static_cast(g_pika_server->instant_->getInstantaneousMetric(STATS_METRIC_NET_OUTPUT)) / 1024.0f; } -void PikaServer::SignalAuxiliary() { - pika_auxiliary_thread_->mu_.Lock(); - pika_auxiliary_thread_->cv_.Signal(); - pika_auxiliary_thread_->mu_.Unlock(); +float PikaServer::InstantaneousInputReplKbps() { + return static_cast(g_pika_server->instant_->getInstantaneousMetric(STATS_METRIC_NET_INPUT_REPLICATION)) / + 1024.0f; } -Status PikaServer::TriggerSendBinlogSync() { - return g_pika_rm->WakeUpBinlogSync(); +float PikaServer::InstantaneousOutputReplKbps() { + return static_cast(g_pika_server->instant_->getInstantaneousMetric(STATS_METRIC_NET_OUTPUT_REPLICATION)) / + 1024.0f; } -int PikaServer::PubSubNumPat() { - return pika_pubsub_thread_->PubSubNumPat(); +std::unordered_map PikaServer::ServerExecCountDB() { + std::unordered_map res; + for (auto& cmd : statistic_.server_stat.exec_count_db) { + res[cmd.first] = cmd.second.load(); + } + return res; } +std::unordered_map PikaServer::ServerAllDBStat() { return statistic_.AllDBStat(); } + +int PikaServer::SendToPeer() { return g_pika_rm->ConsumeWriteQueue(); } + +void PikaServer::SignalAuxiliary() { pika_auxiliary_thread_->cv_.notify_one(); } + +Status PikaServer::TriggerSendBinlogSync() { return g_pika_rm->WakeUpBinlogSync(); } + +int PikaServer::PubSubNumPat() { return pika_pubsub_thread_->PubSubNumPat(); } + int PikaServer::Publish(const std::string& channel, const std::string& msg) { int receivers = pika_pubsub_thread_->Publish(channel, msg); return receivers; } -int PikaServer::UnSubscribe(std::shared_ptr conn, - const std::vector& channels, - bool pattern, - std::vector>* result) { +void PikaServer::EnablePublish(int fd) { + pika_pubsub_thread_->UpdateConnReadyState(fd, net::PubSubThread::ReadyState::kReady); +} + +int PikaServer::UnSubscribe(const std::shared_ptr& conn, const std::vector& channels, + bool pattern, std::vector>* result) { int subscribed = pika_pubsub_thread_->UnSubscribe(conn, channels, pattern, result); return subscribed; } -void PikaServer::Subscribe(std::shared_ptr conn, - const std::vector& channels, - bool pattern, - std::vector>* result) { +void PikaServer::Subscribe(const std::shared_ptr& conn, const std::vector& channels, + bool pattern, std::vector>* result) { pika_pubsub_thread_->Subscribe(conn, channels, pattern, result); } -void PikaServer::PubSubChannels(const std::string& pattern, - std::vector* result) { +void PikaServer::PubSubChannels(const std::string& pattern, std::vector* result) { pika_pubsub_thread_->PubSubChannels(pattern, result); } void PikaServer::PubSubNumSub(const std::vector& channels, - std::vector>* result) { + std::vector>* result) { pika_pubsub_thread_->PubSubNumSub(channels, result); } -int PikaServer::SendRedisCommand(const std::string& command, const std::string& key) { - // Send command - size_t idx = std::hash()(key) % redis_senders_.size(); - redis_senders_[idx]->SendRedisCommand(command); - return 0; +int PikaServer::ClientPubSubChannelSize(const std::shared_ptr& conn) { + return pika_pubsub_thread_->ClientPubSubChannelSize(conn); } -void PikaServer::RetransmitData(const std::string& path) { - - blackwidow::BlackWidow *db = new blackwidow::BlackWidow(); - rocksdb::Status s = db->Open(g_pika_server->bw_options(), path); - - if (!s.ok()) { - LOG(FATAL) << "open received database error: " << s.ToString(); - return; - } - - // Init SenderThread - int thread_num = g_pika_conf->redis_sender_num(); - std::string target_host = g_pika_conf->target_redis_host(); - int target_port = g_pika_conf->target_redis_port(); - std::string target_pwd = g_pika_conf->target_redis_pwd(); - - LOG(INFO) << "open received database success, start retransmit data to redis(" - << target_host << ":" << target_port << ")"; - - std::vector pika_senders; - std::vector migrators; - - for (int i = 0; i < thread_num; i++) { - pika_senders.emplace_back(new PikaSender(target_host, target_port, target_pwd)); - } - migrators.emplace_back(new MigratorThread(db, &pika_senders, blackwidow::kStrings, thread_num)); - migrators.emplace_back(new MigratorThread(db, &pika_senders, blackwidow::kLists, thread_num)); - migrators.emplace_back(new MigratorThread(db, &pika_senders, blackwidow::kHashes, thread_num)); - migrators.emplace_back(new MigratorThread(db, &pika_senders, blackwidow::kSets, thread_num)); - migrators.emplace_back(new MigratorThread(db, &pika_senders, blackwidow::kZSets, thread_num)); - - for (size_t i = 0; i < pika_senders.size(); i++) { - pika_senders[i]->StartThread(); - } - for (size_t i = 0; i < migrators.size(); i++) { - migrators[i]->StartThread(); - } - - for (size_t i = 0; i < migrators.size(); i++) { - migrators[i]->JoinThread(); - } - for (size_t i = 0; i < pika_senders.size(); i++) { - pika_senders[i]->Stop(); - } - for (size_t i = 0; i < pika_senders.size(); i++) { - pika_senders[i]->JoinThread(); - } - - int64_t replies = 0, records = 0; - for (size_t i = 0; i < migrators.size(); i++) { - records += migrators[i]->num(); - delete migrators[i]; - } - migrators.clear(); - for (size_t i = 0; i < pika_senders.size(); i++) { - replies += pika_senders[i]->elements(); - delete pika_senders[i]; - } - pika_senders.clear(); - - LOG(INFO) << "=============== Retransmit Finish ====================="; - LOG(INFO) << "Total records : " << records << " have been Scaned"; - LOG(INFO) << "Total replies : " << replies << " received from redis server"; - LOG(INFO) << "======================================================="; +int PikaServer::ClientPubSubChannelPatternSize(const std::shared_ptr& conn) { + return pika_pubsub_thread_->ClientPubSubChannelPatternSize(conn); } /******************************* PRIVATE *******************************/ @@ -1354,12 +1130,36 @@ void PikaServer::RetransmitData(const std::string& path) { void PikaServer::DoTimingTask() { // Maybe schedule compactrange AutoCompactRange(); - // Purge log - AutoPurge(); + // Purge serverlog + AutoServerlogPurge(); + // Purge binlog + AutoBinlogPurge(); // Delete expired dump AutoDeleteExpiredDump(); // Cheek Rsync Status - AutoKeepAliveRSync(); + // TODO: temporarily disable rsync + // AutoKeepAliveRSync(); + // Reset server qps + ResetLastSecQuerynum(); + // Auto update network instantaneous metric + AutoUpdateNetworkMetric(); + ProcessCronTask(); + UpdateCacheInfo(); + // Print the queue status periodically + PrintThreadPoolQueueStatus(); + StatDiskUsage(); +} + +void PikaServer::StatDiskUsage() { + thread_local uint64_t last_update_time = 0; + auto current_time = pstd::NowMicros(); + if (current_time - last_update_time < 60 * 1000 * 1000) { + return; + } + last_update_time = current_time; + + disk_statistic_.db_size_.store(pstd::Du(g_pika_conf->db_path())); + disk_statistic_.log_size_.store(pstd::Du(g_pika_conf->log_path())); } void PikaServer::AutoCompactRange() { @@ -1375,83 +1175,171 @@ void PikaServer::AutoCompactRange() { std::string ci = g_pika_conf->compact_interval(); std::string cc = g_pika_conf->compact_cron(); - if (ci != "") { - std::string::size_type slash = ci.find("/"); + if (!ci.empty()) { + std::string::size_type slash = ci.find('/'); int interval = std::atoi(ci.substr(0, slash).c_str()); - int usage = std::atoi(ci.substr(slash+1).c_str()); + int usage = std::atoi(ci.substr(slash + 1).c_str()); struct timeval now; - gettimeofday(&now, NULL); - if (last_check_compact_time_.tv_sec == 0 || - now.tv_sec - last_check_compact_time_.tv_sec >= interval * 3600) { - gettimeofday(&last_check_compact_time_, NULL); - if (((double)free_size / total_size) * 100 >= usage) { - Status s = DoSameThingSpecificTable(TaskType::kCompactAll); + gettimeofday(&now, nullptr); + if (last_check_compact_time_.tv_sec == 0 || now.tv_sec - last_check_compact_time_.tv_sec >= interval * 3600) { + gettimeofday(&last_check_compact_time_, nullptr); + if ((static_cast(free_size) / static_cast(total_size)) * 100 >= usage) { + std::set dbs = g_pika_server->GetAllDBName(); + Status s = DoSameThingSpecificDB(dbs, {TaskType::kCompactAll}); if (s.ok()) { - LOG(INFO) << "[Interval]schedule compactRange, freesize: " << free_size/1048576 << "MB, disksize: " << total_size/1048576 << "MB"; + LOG(INFO) << "[Interval]schedule compactRange, freesize: " << free_size / 1048576 + << "MB, disksize: " << total_size / 1048576 << "MB"; } else { - LOG(INFO) << "[Interval]schedule compactRange Failed, freesize: " << free_size/1048576 << "MB, disksize: " << total_size/1048576 - << "MB, error: " << s.ToString(); + LOG(INFO) << "[Interval]schedule compactRange Failed, freesize: " << free_size / 1048576 + << "MB, disksize: " << total_size / 1048576 << "MB, error: " << s.ToString(); } } else { LOG(WARNING) << "compact-interval failed, because there is not enough disk space left, freesize" - << free_size/1048576 << "MB, disksize: " << total_size/1048576 << "MB"; + << free_size / 1048576 << "MB, disksize: " << total_size / 1048576 << "MB"; } } return; } - if (cc != "") { + if (!cc.empty()) { bool have_week = false; - std::string compact_cron, week_str; - int slash_num = count(cc.begin(), cc.end(), '/'); + std::string compact_cron; + std::string week_str; + int64_t slash_num = count(cc.begin(), cc.end(), '/'); if (slash_num == 2) { have_week = true; - std::string::size_type first_slash = cc.find("/"); + std::string::size_type first_slash = cc.find('/'); week_str = cc.substr(0, first_slash); compact_cron = cc.substr(first_slash + 1); } else { compact_cron = cc; } - std::string::size_type colon = compact_cron.find("-"); - std::string::size_type underline = compact_cron.find("/"); + std::string::size_type colon = compact_cron.find('-'); + std::string::size_type underline = compact_cron.find('/'); int week = have_week ? (std::atoi(week_str.c_str()) % 7) : 0; int start = std::atoi(compact_cron.substr(0, colon).c_str()); - int end = std::atoi(compact_cron.substr(colon+1, underline).c_str()); - int usage = std::atoi(compact_cron.substr(underline+1).c_str()); + int end = std::atoi(compact_cron.substr(colon + 1, underline).c_str()); + int usage = std::atoi(compact_cron.substr(underline + 1).c_str()); std::time_t t = std::time(nullptr); std::tm* t_m = std::localtime(&t); bool in_window = false; if (start < end && (t_m->tm_hour >= start && t_m->tm_hour < end)) { in_window = have_week ? (week == t_m->tm_wday) : true; - } else if (start > end && ((t_m->tm_hour >= start && t_m->tm_hour < 24) || - (t_m->tm_hour >= 0 && t_m->tm_hour < end))) { - in_window = have_week ? false : true; + } else if (start > end && + ((t_m->tm_hour >= start && t_m->tm_hour < 24) || (t_m->tm_hour >= 0 && t_m->tm_hour < end))) { + in_window = !have_week; } else { have_scheduled_crontask_ = false; } if (!have_scheduled_crontask_ && in_window) { - if (((double)free_size / total_size) * 100 >= usage) { - Status s = DoSameThingEveryPartition(TaskType::kCompactAll); + if ((static_cast(free_size) / static_cast(total_size)) * 100 >= usage) { + Status s = DoSameThingEveryDB(TaskType::kCompactAll); if (s.ok()) { - LOG(INFO) << "[Cron]schedule compactRange, freesize: " << free_size/1048576 << "MB, disksize: " << total_size/1048576 << "MB"; + LOG(INFO) << "[Cron]schedule compactRange, freesize: " << free_size / 1048576 + << "MB, disksize: " << total_size / 1048576 << "MB"; } else { - LOG(INFO) << "[Cron]schedule compactRange Failed, freesize: " << free_size/1048576 << "MB, disksize: " << total_size/1048576 - << "MB, error: " << s.ToString(); + LOG(INFO) << "[Cron]schedule compactRange Failed, freesize: " << free_size / 1048576 + << "MB, disksize: " << total_size / 1048576 << "MB, error: " << s.ToString(); } have_scheduled_crontask_ = true; } else { LOG(WARNING) << "compact-cron failed, because there is not enough disk space left, freesize" - << free_size/1048576 << "MB, disksize: " << total_size/1048576 << "MB"; + << free_size / 1048576 << "MB, disksize: " << total_size / 1048576 << "MB"; } } } + + if (g_pika_conf->compaction_strategy() == PikaConf::FullCompact) { + DoSameThingEveryDB(TaskType::kCompactAll); + } else if (g_pika_conf->compaction_strategy() == PikaConf::OldestOrBestDeleteRatioSstCompact) { + DoSameThingEveryDB(TaskType::kCompactOldestOrBestDeleteRatioSst); + } } -void PikaServer::AutoPurge() { - DoSameThingEveryPartition(TaskType::kPurgeLog); +void PikaServer::AutoBinlogPurge() { DoSameThingEveryDB(TaskType::kPurgeLog); } + +void PikaServer::AutoServerlogPurge() { + std::string log_path = g_pika_conf->log_path(); + int retention_time = g_pika_conf->log_retention_time(); + if (retention_time < 0) { + return; + } + std::vector log_files; + + if (!pstd::FileExists(log_path)) { + return; + } + + if (pstd::GetChildren(log_path, log_files) != 0) { + return; + } + //Get the current time of system + time_t t = time(nullptr); + struct tm* now_time = localtime(&t); + now_time->tm_hour = 0; + now_time->tm_min = 0; + now_time->tm_sec = 0; + time_t now_timestamp = mktime(now_time); + + std::map>> log_files_by_level; + + //Serverlogformat: pika.[hostname].[user name].log.[severity level].[date].[time].[pid] + for (const auto& file : log_files) { + std::vector file_parts; + pstd::StringSplit(file, '.', file_parts); + if (file_parts.size() < 7) { + continue; + } + + std::string severity_level = file_parts[4]; + if (severity_level != "WARNING" && severity_level != "INFO" && severity_level != "ERROR") { + continue; + } + + int log_year, log_month, log_day; + if (sscanf(file_parts[5].c_str(), "%4d%2d%2d", &log_year, &log_month, &log_day) != 3) { + continue; + } + + //Get the time when the server log file was originally created + struct tm log_time; + log_time.tm_year = log_year - 1900; + log_time.tm_mon = log_month - 1; + log_time.tm_mday = log_day; + log_time.tm_hour = 0; + log_time.tm_min = 0; + log_time.tm_sec = 0; + log_time.tm_isdst = -1; + time_t log_timestamp = mktime(&log_time); + log_files_by_level[severity_level].push_back({file, log_timestamp}); +} + + // Process files for each log level + for (auto& [level, files] : log_files_by_level) { + // Sort by time in descending order + std::sort(files.begin(), files.end(), + [](const auto& a, const auto& b) { return a.second > b.second; }); + + bool has_recent_file = false; + for (const auto& [file, log_timestamp] : files) { + double diff_seconds = difftime(now_timestamp, log_timestamp); + int64_t interval_days = static_cast(diff_seconds / 86400); + if (interval_days <= retention_time) { + has_recent_file = true; + continue; + } + if (!has_recent_file) { + has_recent_file = true; + continue; + } + std::string log_file = log_path + "/" + file; + LOG(INFO) << "Deleting out of date log file: " << log_file; + if(!pstd::DeleteFile(log_file)) LOG(ERROR) << "Failed to delete log file: " << log_file; + } + } } void PikaServer::AutoDeleteExpiredDump() { @@ -1466,22 +1354,22 @@ void PikaServer::AutoDeleteExpiredDump() { } // Dump is not exist - if (!slash::FileExists(db_sync_path)) { + if (!pstd::FileExists(db_sync_path)) { return; } // Directory traversal - if (slash::GetChildren(db_sync_path, dump_dir) != 0) { + if (pstd::GetChildren(db_sync_path, dump_dir) != 0) { return; } // Handle dump directory - for (size_t i = 0; i < dump_dir.size(); i++) { - if (dump_dir[i].substr(0, db_sync_prefix.size()) != db_sync_prefix || dump_dir[i].size() != (db_sync_prefix.size() + 8)) { + for (auto& i : dump_dir) { + if (i.substr(0, db_sync_prefix.size()) != db_sync_prefix || i.size() != (db_sync_prefix.size() + 8)) { continue; } - std::string str_date = dump_dir[i].substr(db_sync_prefix.size(), (dump_dir[i].size() - db_sync_prefix.size())); - char *end = NULL; + std::string str_date = i.substr(db_sync_prefix.size(), (i.size() - db_sync_prefix.size())); + char* end = nullptr; std::strtol(str_date.c_str(), &end, 10); if (*end != 0) { continue; @@ -1492,13 +1380,14 @@ void PikaServer::AutoDeleteExpiredDump() { int dump_month = std::atoi(str_date.substr(4, 2).c_str()); int dump_day = std::atoi(str_date.substr(6, 2).c_str()); - time_t t = time(NULL); - struct tm *now = localtime(&t); + time_t t = time(nullptr); + struct tm* now = localtime(&t); int now_year = now->tm_year + 1900; int now_month = now->tm_mon + 1; int now_day = now->tm_mday; - struct tm dump_time, now_time; + struct tm dump_time; + struct tm now_time; dump_time.tm_year = dump_year; dump_time.tm_mon = dump_month; @@ -1514,16 +1403,16 @@ void PikaServer::AutoDeleteExpiredDump() { now_time.tm_min = 0; now_time.tm_sec = 0; - long dump_timestamp = mktime(&dump_time); - long now_timestamp = mktime(&now_time); + int64_t dump_timestamp = mktime(&dump_time); + int64_t now_timestamp = mktime(&now_time); // How many days, 1 day = 86400s - int interval_days = (now_timestamp - dump_timestamp) / 86400; + int64_t interval_days = (now_timestamp - dump_timestamp) / 86400; if (interval_days >= expiry_days) { - std::string dump_file = db_sync_path + dump_dir[i]; + std::string dump_file = db_sync_path + i; if (CountSyncSlaves() == 0) { LOG(INFO) << "Not syncing, delete dump file: " << dump_file; - slash::DeleteDirIfExist(dump_file); + pstd::DeleteDirIfExist(dump_file); } else { LOG(INFO) << "Syncing, can not delete " << dump_file << " dump file"; } @@ -1531,68 +1420,605 @@ void PikaServer::AutoDeleteExpiredDump() { } } -void PikaServer::AutoKeepAliveRSync() { - if (!pika_rsync_service_->CheckRsyncAlive()) { - LOG(WARNING) << "The Rsync service is down, Try to restart"; - pika_rsync_service_->StartRsync(); +void PikaServer::AutoUpdateNetworkMetric() { + monotime current_time = getMonotonicUs(); + size_t factor = 5e6; // us, 5s + instant_->trackInstantaneousMetric(STATS_METRIC_NET_INPUT, + g_pika_server->NetInputBytes() + g_pika_server->NetReplInputBytes(), current_time, + factor); + instant_->trackInstantaneousMetric(STATS_METRIC_NET_OUTPUT, + g_pika_server->NetOutputBytes() + g_pika_server->NetReplOutputBytes(), + current_time, factor); + instant_->trackInstantaneousMetric(STATS_METRIC_NET_INPUT_REPLICATION, g_pika_server->NetReplInputBytes(), + current_time, factor); + instant_->trackInstantaneousMetric(STATS_METRIC_NET_OUTPUT_REPLICATION, g_pika_server->NetReplOutputBytes(), + current_time, factor); +} + +void PikaServer::PrintThreadPoolQueueStatus() { + // Print the current queue size if it exceeds QUEUE_SIZE_THRESHOLD_PERCENTAGE/100 of the maximum queue size. + size_t cur_size = ClientProcessorThreadPoolCurQueueSize(); + size_t max_size = ClientProcessorThreadPoolMaxQueueSize(); + size_t thread_hold = (max_size / 100) * QUEUE_SIZE_THRESHOLD_PERCENTAGE; + if (cur_size > thread_hold) { + LOG(INFO) << "The current queue size of the Pika Server's client thread processor thread pool: " << cur_size; } } -void PikaServer::InitBlackwidowOptions() { +void PikaServer::InitStorageOptions() { + std::lock_guard rwl(storage_options_rw_); // For rocksdb::Options - bw_options_.options.create_if_missing = true; - bw_options_.options.keep_log_file_num = 10; - bw_options_.options.max_manifest_file_size = 64 * 1024 * 1024; - bw_options_.options.max_log_file_size = 512 * 1024 * 1024; - - bw_options_.options.write_buffer_size = - g_pika_conf->write_buffer_size(); - bw_options_.options.write_buffer_manager.reset( - new rocksdb::WriteBufferManager(g_pika_conf->max_write_buffer_size())); - bw_options_.options.target_file_size_base = - g_pika_conf->target_file_size_base(); - bw_options_.options.max_background_flushes = - g_pika_conf->max_background_flushes(); - bw_options_.options.max_background_compactions = - g_pika_conf->max_background_compactions(); - bw_options_.options.max_open_files = - g_pika_conf->max_cache_files(); - bw_options_.options.max_bytes_for_level_multiplier = - g_pika_conf->max_bytes_for_level_multiplier(); - bw_options_.options.optimize_filters_for_hits = - g_pika_conf->optimize_filters_for_hits(); - bw_options_.options.level_compaction_dynamic_level_bytes = - g_pika_conf->level_compaction_dynamic_level_bytes(); - - - if (g_pika_conf->compression() == "none") { - bw_options_.options.compression = - rocksdb::CompressionType::kNoCompression; - } else if (g_pika_conf->compression() == "snappy") { - bw_options_.options.compression = - rocksdb::CompressionType::kSnappyCompression; - } else if (g_pika_conf->compression() == "zlib") { - bw_options_.options.compression = - rocksdb::CompressionType::kZlibCompression; - } - - // For rocksdb::BlockBasedTableOptions - bw_options_.table_options.block_size = g_pika_conf->block_size(); - bw_options_.table_options.cache_index_and_filter_blocks = - g_pika_conf->cache_index_and_filter_blocks(); - bw_options_.block_cache_size = g_pika_conf->block_cache(); - bw_options_.share_block_cache = g_pika_conf->share_block_cache(); - - if (bw_options_.block_cache_size == 0) { - bw_options_.table_options.no_block_cache = true; - } else if (bw_options_.share_block_cache) { - bw_options_.table_options.block_cache = - rocksdb::NewLRUCache(bw_options_.block_cache_size); - } - - // For Blackwidow small compaction - bw_options_.statistics_max_size = g_pika_conf->max_cache_statistic_keys(); - bw_options_.small_compaction_threshold = - g_pika_conf->small_compaction_threshold(); + storage_options_.options.create_if_missing = true; + storage_options_.options.keep_log_file_num = 10; + storage_options_.options.max_manifest_file_size = 64 * 1024 * 1024; + storage_options_.options.max_log_file_size = 512 * 1024 * 1024; + + storage_options_.options.write_buffer_size = g_pika_conf->write_buffer_size(); + storage_options_.options.arena_block_size = g_pika_conf->arena_block_size(); + storage_options_.options.write_buffer_manager = + std::make_shared(g_pika_conf->max_write_buffer_size()); + storage_options_.options.max_total_wal_size = g_pika_conf->MaxTotalWalSize(); + storage_options_.options.max_write_buffer_number = g_pika_conf->max_write_buffer_number(); + storage_options_.options.level0_file_num_compaction_trigger = g_pika_conf->level0_file_num_compaction_trigger(); + storage_options_.options.level0_stop_writes_trigger = g_pika_conf->level0_stop_writes_trigger(); + storage_options_.options.level0_slowdown_writes_trigger = g_pika_conf->level0_slowdown_writes_trigger(); + storage_options_.options.min_write_buffer_number_to_merge = g_pika_conf->min_write_buffer_number_to_merge(); + storage_options_.options.max_bytes_for_level_base = g_pika_conf->level0_file_num_compaction_trigger() * g_pika_conf->write_buffer_size(); + storage_options_.options.max_subcompactions = g_pika_conf->max_subcompactions(); + storage_options_.options.target_file_size_base = g_pika_conf->target_file_size_base(); + storage_options_.options.max_compaction_bytes = g_pika_conf->max_compaction_bytes(); + storage_options_.options.max_background_flushes = g_pika_conf->max_background_flushes(); + storage_options_.options.max_background_compactions = g_pika_conf->max_background_compactions(); + storage_options_.options.disable_auto_compactions = g_pika_conf->disable_auto_compactions(); + storage_options_.options.max_background_jobs = g_pika_conf->max_background_jobs(); + storage_options_.options.delayed_write_rate = g_pika_conf->delayed_write_rate(); + storage_options_.options.max_open_files = g_pika_conf->max_cache_files(); + storage_options_.options.max_bytes_for_level_multiplier = g_pika_conf->max_bytes_for_level_multiplier(); + storage_options_.options.optimize_filters_for_hits = g_pika_conf->optimize_filters_for_hits(); + storage_options_.options.level_compaction_dynamic_level_bytes = g_pika_conf->level_compaction_dynamic_level_bytes(); + + storage_options_.options.compression = PikaConf::GetCompression(g_pika_conf->compression()); + storage_options_.options.compression_per_level = g_pika_conf->compression_per_level(); + // avoid blocking io on scan + // see https://github.com/facebook/rocksdb/wiki/IO#avoid-blocking-io + storage_options_.options.avoid_unnecessary_blocking_io = true; + + // default l0 l1 noCompression l2 and more use `compression` option + if (storage_options_.options.compression_per_level.empty() && + storage_options_.options.compression != rocksdb::kNoCompression) { + storage_options_.options.compression_per_level.push_back(rocksdb::kNoCompression); + storage_options_.options.compression_per_level.push_back(rocksdb::kNoCompression); + storage_options_.options.compression_per_level.push_back(storage_options_.options.compression); + } + + // For rocksdb::BlockBasedDBOptions + storage_options_.table_options.block_size = g_pika_conf->block_size(); + storage_options_.table_options.cache_index_and_filter_blocks = g_pika_conf->cache_index_and_filter_blocks(); + storage_options_.block_cache_size = g_pika_conf->block_cache(); + storage_options_.share_block_cache = g_pika_conf->share_block_cache(); + + storage_options_.table_options.pin_l0_filter_and_index_blocks_in_cache = + g_pika_conf->pin_l0_filter_and_index_blocks_in_cache(); + + if (storage_options_.block_cache_size == 0) { + storage_options_.table_options.no_block_cache = true; + } else if (storage_options_.share_block_cache) { + storage_options_.table_options.block_cache = + rocksdb::NewLRUCache(storage_options_.block_cache_size, static_cast(g_pika_conf->num_shard_bits())); + } + storage_options_.options.rate_limiter = + std::shared_ptr( + rocksdb::NewGenericRateLimiter( + g_pika_conf->rate_limiter_bandwidth(), + g_pika_conf->rate_limiter_refill_period_us(), + static_cast(g_pika_conf->rate_limiter_fairness()), + static_cast(g_pika_conf->rate_limiter_mode()), + g_pika_conf->rate_limiter_auto_tuned() + )); + // For Storage small compaction + storage_options_.statistics_max_size = g_pika_conf->max_cache_statistic_keys(); + storage_options_.small_compaction_threshold = g_pika_conf->small_compaction_threshold(); + + // For Storage compaction + storage_options_.compact_param_.best_delete_min_ratio_ = g_pika_conf->best_delete_min_ratio(); + storage_options_.compact_param_.dont_compact_sst_created_in_seconds_ = g_pika_conf->dont_compact_sst_created_in_seconds(); + storage_options_.compact_param_.force_compact_file_age_seconds_ = g_pika_conf->force_compact_file_age_seconds(); + storage_options_.compact_param_.force_compact_min_delete_ratio_ = g_pika_conf->force_compact_min_delete_ratio(); + storage_options_.compact_param_.compact_every_num_of_files_ = g_pika_conf->compact_every_num_of_files(); + + // rocksdb blob + if (g_pika_conf->enable_blob_files()) { + storage_options_.options.enable_blob_files = g_pika_conf->enable_blob_files(); + storage_options_.options.min_blob_size = g_pika_conf->min_blob_size(); + storage_options_.options.blob_file_size = g_pika_conf->blob_file_size(); + storage_options_.options.blob_compression_type = PikaConf::GetCompression(g_pika_conf->blob_compression_type()); + storage_options_.options.enable_blob_garbage_collection = g_pika_conf->enable_blob_garbage_collection(); + storage_options_.options.blob_garbage_collection_age_cutoff = g_pika_conf->blob_garbage_collection_age_cutoff(); + storage_options_.options.blob_garbage_collection_force_threshold = + g_pika_conf->blob_garbage_collection_force_threshold(); + if (g_pika_conf->blob_cache() > 0) { // blob cache less than 0,not open cache + storage_options_.options.blob_cache = + rocksdb::NewLRUCache(g_pika_conf->blob_cache(), static_cast(g_pika_conf->blob_num_shard_bits())); + } + } + + // for column-family options + storage_options_.options.ttl = g_pika_conf->rocksdb_ttl_second(); + storage_options_.options.periodic_compaction_seconds = g_pika_conf->rocksdb_periodic_compaction_second(); + + // For Partitioned Index Filters + if (g_pika_conf->enable_partitioned_index_filters()) { + storage_options_.table_options.index_type = rocksdb::BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch; + storage_options_.table_options.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, false)); + storage_options_.table_options.partition_filters = true; + storage_options_.table_options.metadata_block_size = 4096; + storage_options_.table_options.cache_index_and_filter_blocks_with_high_priority = true; + storage_options_.table_options.pin_top_level_index_and_filter = true; + storage_options_.table_options.optimize_filters_for_memory = true; + } + // For statistics + storage_options_.enable_db_statistics = g_pika_conf->enable_db_statistics(); + storage_options_.db_statistics_level = g_pika_conf->db_statistics_level(); +} + +storage::Status PikaServer::RewriteStorageOptions(const storage::OptionType& option_type, + const std::unordered_map& options_map) { + storage::Status s; + std::shared_lock db_rwl(dbs_rw_); + for (const auto& db_item : dbs_) { + s = db_item.second->storage()->SetOptions(option_type, storage::ALL_DB, options_map); + if (!s.ok()) { + return s; + } + } + std::lock_guard rwl(storage_options_rw_); + s = storage_options_.ResetOptions(option_type, options_map); + return s; +} + +Status PikaServer::GetCmdRouting(std::vector& redis_cmds, std::vector* dst, + bool* all_local) { + UNUSED(redis_cmds); + UNUSED(dst); + *all_local = true; + return Status::OK(); +} + +int PikaServer::SendRedisCommand(const std::string& command, const std::string& key) { + // Send command + size_t idx = std::hash()(key) % redis_senders_.size(); + redis_senders_[idx]->SendRedisCommand(command); + return 0; +} + +static bool isFirstRetransmit = true; +void PikaServer::RetransmitData(const std::string& path) { + if (isFirstRetransmit) { + isFirstRetransmit = false; + LOG(INFO) << "Retransmit data from " << path; + }else { + LOG(FATAL) << "full DB sync shuould only be called once"; + } + + std::shared_ptr storage_ = std::make_shared(); + rocksdb::Status s = storage_->Open(g_pika_server->storage_options(), path); + + if (!s.ok()) { + LOG(FATAL) << "open received database error: " << s.ToString(); + return; + } + + // Init SenderThread + int thread_num = g_pika_conf->redis_sender_num(); + std::string target_host = g_pika_conf->target_redis_host(); + int target_port = g_pika_conf->target_redis_port(); + std::string target_pwd = g_pika_conf->target_redis_pwd(); + + LOG(INFO) << "open received database success, start retransmit data to redis(" + << target_host << ":" << target_port << ")"; + + + std::vector> pika_senders; + std::vector> migrators; + + for (int i = 0; i < thread_num; i++) { + pika_senders.emplace_back(std::make_shared(target_host, target_port, target_pwd)); + } + migrators.emplace_back(std::make_shared(storage_, &pika_senders, int(storage::DataType::kStrings), thread_num)); + migrators.emplace_back(std::make_shared(storage_, &pika_senders, int(storage::DataType::kLists), thread_num)); + migrators.emplace_back(std::make_shared(storage_, &pika_senders, int(storage::DataType::kHashes), thread_num)); + migrators.emplace_back(std::make_shared(storage_, &pika_senders, int(storage::DataType::kSets), thread_num)); + migrators.emplace_back(std::make_shared(storage_, &pika_senders, int(storage::DataType::kZSets), thread_num)); + migrators.emplace_back(std::make_shared(storage_, &pika_senders, int(storage::DataType::kStreams), thread_num)); + + for (size_t i = 0; i < pika_senders.size(); i++) { + pika_senders[i]->StartThread(); + } + for (size_t i = 0; i < migrators.size(); i++) { + migrators[i]->StartThread(); + } + + for (size_t i = 0; i < migrators.size(); i++) { + migrators[i]->JoinThread(); + } + for (size_t i = 0; i < pika_senders.size(); i++) { + pika_senders[i]->Stop(); + } + for (size_t i = 0; i < pika_senders.size(); i++) { + pika_senders[i]->JoinThread(); + } + + int64_t replies = 0, records = 0; + for (size_t i = 0; i < migrators.size(); i++) { + records += migrators[i]->num(); + } + migrators.clear(); + for (size_t i = 0; i < pika_senders.size(); i++) { + replies += pika_senders[i]->elements(); + } + pika_senders.clear(); + + LOG(INFO) << "=============== Retransmit Finish ====================="; + LOG(INFO) << "Total records : " << records << " have been Scaned"; + LOG(INFO) << "Total replies : " << replies << " received from redis server"; + LOG(INFO) << "======================================================="; +} + +void PikaServer::ServerStatus(std::string* info) { + std::stringstream tmp_stream; + size_t q_size = ClientProcessorThreadPoolCurQueueSize(); + tmp_stream << "Client Processor thread-pool queue size: " << q_size << "\r\n"; + info->append(tmp_stream.str()); +} + +bool PikaServer::SlotsMigrateBatch(const std::string &ip, int64_t port, int64_t time_out, int64_t slot_num,int64_t keys_num, const std::shared_ptr& db) { + return pika_migrate_thread_->ReqMigrateBatch(ip, port, time_out, slot_num, keys_num, db); +} + +void PikaServer::GetSlotsMgrtSenderStatus(std::string *ip, int64_t* port, int64_t *slot, bool *migrating, int64_t *moved, int64_t *remained) { + return pika_migrate_thread_->GetMigrateStatus(ip, port, slot, migrating, moved, remained); +} + +int PikaServer::SlotsMigrateOne(const std::string& key, const std::shared_ptr& db) { + return pika_migrate_thread_->ReqMigrateOne(key, db); +} + +bool PikaServer::SlotsMigrateAsyncCancel() { + pika_migrate_thread_->CancelMigrate(); + return true; +} + +void PikaServer::Bgslotsreload(const std::shared_ptr& db) { + // Only one thread can go through + { + std::lock_guard ml(bgslots_protector_); + if (bgslots_reload_.reloading || db->IsBgSaving()) { + return; + } + bgslots_reload_.reloading = true; + } + + bgslots_reload_.start_time = time(nullptr); + char s_time[32]; + size_t len = strftime(s_time, sizeof(s_time), "%Y%m%d%H%M%S", localtime(&bgslots_reload_.start_time)); + bgslots_reload_.s_start_time.assign(s_time, len); + bgslots_reload_.cursor = 0; + bgslots_reload_.pattern = "*"; + bgslots_reload_.count = 100; + bgslots_reload_.db = db; + + LOG(INFO) << "Start slot reloading"; + + // Start new thread if needed + bgsave_thread_.StartThread(); + bgsave_thread_.Schedule(&DoBgslotsreload, static_cast(this)); +} + +void DoBgslotsreload(void* arg) { + auto p = static_cast(arg); + PikaServer::BGSlotsReload reload = p->bgslots_reload(); + + // Do slotsreload + rocksdb::Status s; + std::vector keys; + int64_t cursor_ret = -1; + while(cursor_ret != 0 && p->GetSlotsreloading()) { + cursor_ret = reload.db->storage()->Scan(storage::DataType::kAll, reload.cursor, reload.pattern, reload.count, &keys); + + std::vector::const_iterator iter; + for (iter = keys.begin(); iter != keys.end(); iter++) { + std::string key_type; + int s = GetKeyType(*iter, key_type, reload.db); + //if key is slotkey, can't add to SlotKey + if (s > 0) { + if (key_type == "s" && ((*iter).find(SlotKeyPrefix) != std::string::npos || (*iter).find(SlotTagPrefix) != std::string::npos)) { + continue; + } + + AddSlotKey(key_type, *iter, reload.db); + } + } + + reload.cursor = cursor_ret; + p->SetSlotsreloadingCursor(cursor_ret); + keys.clear(); + } + p->SetSlotsreloading(false); + + if (cursor_ret == 0) { + LOG(INFO) << "Finish slot reloading"; + } else { + LOG(INFO) << "Stop slot reloading"; + } +} + +void PikaServer::Bgslotscleanup(std::vector cleanupSlots, const std::shared_ptr& db) { + // Only one thread can go through + { + std::lock_guard ml(bgslots_protector_); + if (bgslots_cleanup_.cleaningup || bgslots_reload_.reloading || db->IsBgSaving()) { + return; + } + bgslots_cleanup_.cleaningup = true; + } + + bgslots_cleanup_.start_time = time(nullptr); + char s_time[32]; + size_t len = strftime(s_time, sizeof(s_time), "%Y%m%d%H%M%S", localtime(&bgslots_cleanup_.start_time)); + bgslots_cleanup_.s_start_time.assign(s_time, len); + bgslots_cleanup_.cursor = 0; + bgslots_cleanup_.pattern = "*"; + bgslots_cleanup_.count = 100; + bgslots_cleanup_.db = db; + bgslots_cleanup_.cleanup_slots.swap(cleanupSlots); + + std::string slotsStr; + slotsStr.assign(cleanupSlots.begin(), cleanupSlots.end()); + LOG(INFO) << "Start slot cleanup, slots: " << slotsStr << std::endl; + + // Start new thread if needed + bgslots_cleanup_thread_.StartThread(); + bgslots_cleanup_thread_.Schedule(&DoBgslotscleanup, static_cast(this)); +} +int64_t PikaServer::GetLastSaveTime(const std::string& dir_path) { + std::vector dump_dir; + // Dump file is not exist + if (!pstd::FileExists(dir_path)) { + LOG(INFO) << "Dump file is not exist,path: " << dir_path; + return 0; + } + if (pstd::GetChildren(dir_path, dump_dir) != 0) { + return 0; + } + std::string dump_file = dir_path + dump_dir[0]; + struct stat fileStat; + if (stat(dump_file.c_str(), &fileStat) == 0) { + return static_cast(fileStat.st_mtime); + } + return 0; +} + +void PikaServer::AllClientUnAuth(const std::set& users) { + pika_dispatch_thread_->UnAuthUserAndKillClient(users, acl_->GetUserLock(Acl::DefaultUser)); +} + +void PikaServer::CheckPubsubClientKill(const std::string& userName, const std::vector& allChannel) { + pika_pubsub_thread_->ConnCanSubscribe(allChannel, [&](const std::shared_ptr& conn) -> bool { + auto pikaConn = std::dynamic_pointer_cast(conn); + if (pikaConn && pikaConn->UserName() == userName) { + return true; + } + return false; + }); +} + +void PikaServer::DisableCompact() { + /* disable auto compactions */ + std::unordered_map options_map{{"disable_auto_compactions", "true"}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + LOG(ERROR) << "-ERR Set storage::OptionType::kColumnFamily disable_auto_compactions error: " + s.ToString() + "\r\n"; + return; + } + g_pika_conf->SetDisableAutoCompaction("true"); + + /* cancel in-progress manual compactions */ + std::shared_lock rwl(dbs_rw_); + for (const auto& db_item : dbs_) { + db_item.second->DBLock(); + db_item.second->SetCompactRangeOptions(true); + db_item.second->DBUnlock(); + } +} + +void DoBgslotscleanup(void* arg) { + auto p = static_cast(arg); + PikaServer::BGSlotsCleanup cleanup = p->bgslots_cleanup(); + + // Do slotscleanup + std::vector keys; + int64_t cursor_ret = -1; + std::vector cleanupSlots(cleanup.cleanup_slots); + while (cursor_ret != 0 && p->GetSlotscleaningup()) { + cursor_ret = g_pika_server->bgslots_cleanup_.db->storage()->Scan(storage::DataType::kAll, cleanup.cursor, cleanup.pattern, cleanup.count, &keys); + + std::string key_type; + std::vector::const_iterator iter; + for (iter = keys.begin(); iter != keys.end(); iter++) { + if ((*iter).find(SlotKeyPrefix) != std::string::npos || (*iter).find(SlotTagPrefix) != std::string::npos) { + continue; + } + if (std::find(cleanupSlots.begin(), cleanupSlots.end(), GetSlotID(g_pika_conf->default_slot_num(), *iter)) != cleanupSlots.end()) { + if (GetKeyType(*iter, key_type, g_pika_server->bgslots_cleanup_.db) <= 0) { + LOG(WARNING) << "slots clean get key type for slot " << GetSlotID(g_pika_conf->default_slot_num(), *iter) << " key " << *iter << " error"; + continue; + } + if (DeleteKey(*iter, key_type[0], g_pika_server->bgslots_cleanup_.db) <= 0) { + LOG(WARNING) << "slots clean del for slot " << GetSlotID(g_pika_conf->default_slot_num(), *iter) << " key "<< *iter << " error"; + } + } + } + + cleanup.cursor = cursor_ret; + p->SetSlotscleaningupCursor(cursor_ret); + keys.clear(); + } + + for (int cleanupSlot : cleanupSlots) { + WriteDelKeyToBinlog(GetSlotKey(cleanupSlot), g_pika_server->bgslots_cleanup_.db); + WriteDelKeyToBinlog(GetSlotsTagKey(cleanupSlot), g_pika_server->bgslots_cleanup_.db); + } + + p->SetSlotscleaningup(false); + std::vector empty; + p->SetCleanupSlots(empty); + + std::string slotsStr; + slotsStr.assign(cleanup.cleanup_slots.begin(), cleanup.cleanup_slots.end()); + LOG(INFO) << "Finish slots cleanup, slots " << slotsStr; +} + +void PikaServer::ResetCacheAsync(uint32_t cache_num, std::shared_ptr db, cache::CacheConfig *cache_cfg) { + if (PIKA_CACHE_STATUS_OK == db->cache()->CacheStatus() + || PIKA_CACHE_STATUS_NONE == db->cache()->CacheStatus()) { + common_bg_thread_.StartThread(); + BGCacheTaskArg *arg = new BGCacheTaskArg(); + arg->db = db; + arg->cache_num = cache_num; + if (cache_cfg == nullptr) { + arg->task_type = CACHE_BGTASK_RESET_NUM; + } else { + arg->task_type = CACHE_BGTASK_RESET_CFG; + arg->cache_cfg = *cache_cfg; + } + common_bg_thread_.Schedule(&DoCacheBGTask, static_cast(arg)); + } else { + LOG(WARNING) << "can not reset cache in status: " << db->cache()->CacheStatus(); + } +} + +void PikaServer::ClearCacheDbAsync(std::shared_ptr db) { + // disable cache temporarily, and restore it after cache cleared + g_pika_conf->SetCacheDisableFlag(); + if (PIKA_CACHE_STATUS_OK != db->cache()->CacheStatus()) { + LOG(WARNING) << "can not clear cache in status: " << db->cache()->CacheStatus(); + return; + } + common_bg_thread_.StartThread(); + BGCacheTaskArg *arg = new BGCacheTaskArg(); + arg->db = db; + arg->task_type = CACHE_BGTASK_CLEAR; + common_bg_thread_.Schedule(&DoCacheBGTask, static_cast(arg)); +} + +void PikaServer::DoCacheBGTask(void* arg) { + std::unique_ptr pCacheTaskArg(static_cast(arg)); + std::shared_ptr db = pCacheTaskArg->db; + + switch (pCacheTaskArg->task_type) { + case CACHE_BGTASK_CLEAR: + LOG(INFO) << "clear cache start..."; + db->cache()->SetCacheStatus(PIKA_CACHE_STATUS_CLEAR); + g_pika_server->ResetDisplayCacheInfo(PIKA_CACHE_STATUS_CLEAR, db); + db->cache()->FlushCache(); + LOG(INFO) << "clear cache finish"; + break; + case CACHE_BGTASK_RESET_NUM: + LOG(INFO) << "reset cache num start..."; + db->cache()->SetCacheStatus(PIKA_CACHE_STATUS_RESET); + g_pika_server->ResetDisplayCacheInfo(PIKA_CACHE_STATUS_RESET, db); + db->cache()->Reset(pCacheTaskArg->cache_num); + LOG(INFO) << "reset cache num finish"; + break; + case CACHE_BGTASK_RESET_CFG: + LOG(INFO) << "reset cache config start..."; + db->cache()->SetCacheStatus(PIKA_CACHE_STATUS_RESET); + g_pika_server->ResetDisplayCacheInfo(PIKA_CACHE_STATUS_RESET, db); + db->cache()->Reset(pCacheTaskArg->cache_num); + LOG(INFO) << "reset cache config finish"; + break; + default: + LOG(WARNING) << "invalid cache task type: " << pCacheTaskArg->task_type; + break; + } + + db->cache()->SetCacheStatus(PIKA_CACHE_STATUS_OK); + g_pika_conf->UnsetCacheDisableFlag(); +} + +void PikaServer::ResetCacheConfig(std::shared_ptr db) { + cache::CacheConfig cache_cfg; + cache_cfg.maxmemory = g_pika_conf->cache_maxmemory(); + cache_cfg.maxmemory_policy = g_pika_conf->cache_maxmemory_policy(); + cache_cfg.maxmemory_samples = g_pika_conf->cache_maxmemory_samples(); + cache_cfg.lfu_decay_time = g_pika_conf->cache_lfu_decay_time(); + cache_cfg.zset_cache_start_direction = g_pika_conf->zset_cache_start_direction(); + cache_cfg.zset_cache_field_num_per_key = g_pika_conf->zset_cache_field_num_per_key(); + db->cache()->ResetConfig(&cache_cfg); +} + +void PikaServer::ClearHitRatio(std::shared_ptr db) { + db->cache()->ClearHitRatio(); +} + +void PikaServer::OnCacheStartPosChanged(int zset_cache_start_direction, std::shared_ptr db) { + ResetCacheConfig(db); + ClearCacheDbAsyncV2(db); +} + +void PikaServer::ClearCacheDbAsyncV2(std::shared_ptr db) { + if (PIKA_CACHE_STATUS_OK != db->cache()->CacheStatus()) { + LOG(WARNING) << "can not clear cache in status: " << db->cache()->CacheStatus(); + return; + } + common_bg_thread_.StartThread(); + BGCacheTaskArg *arg = new BGCacheTaskArg(); + arg->db = db; + arg->task_type = CACHE_BGTASK_CLEAR; + arg->conf = std::move(g_pika_conf); + arg->reenable_cache = true; + common_bg_thread_.Schedule(&DoCacheBGTask, static_cast(arg)); +} + +void PikaServer::ProcessCronTask() { + for (auto& dbs : dbs_) { + auto cache = dbs.second->cache(); + cache->ProcessCronTask(); + } +} + +double PikaServer::HitRatio(void) { + std::unique_lock l(mu_); + int64_t hits = 0; + int64_t misses = 0; + cache::RedisCache::GetHitAndMissNum(&hits, &misses); + int64_t all_cmds = hits + misses; + if (0 >= all_cmds) { + return 0; + } + return hits / (all_cmds * 1.0); +} + +void PikaServer::UpdateCacheInfo(void) { + for (auto& dbs : dbs_) { + if (PIKA_CACHE_STATUS_OK != dbs.second->cache()->CacheStatus()) { + return; + } + // get cache info from redis cache + CacheInfo cache_info; + dbs.second->cache()->Info(cache_info); + dbs.second->UpdateCacheInfo(cache_info); + } +} + +void PikaServer::ResetDisplayCacheInfo(int status, std::shared_ptr db) { + db->ResetDisplayCacheInfo(status); +} + +void PikaServer::CacheConfigInit(cache::CacheConfig& cache_cfg) { + cache_cfg.maxmemory = g_pika_conf->cache_maxmemory(); + cache_cfg.maxmemory_policy = g_pika_conf->cache_maxmemory_policy(); + cache_cfg.maxmemory_samples = g_pika_conf->cache_maxmemory_samples(); + cache_cfg.lfu_decay_time = g_pika_conf->cache_lfu_decay_time(); } diff --git a/tools/pika_migrate/src/pika_set.cc b/tools/pika_migrate/src/pika_set.cc index c78784487a..66ca7f168e 100644 --- a/tools/pika_migrate/src/pika_set.cc +++ b/tools/pika_migrate/src/pika_set.cc @@ -4,8 +4,10 @@ // of patent rights can be found in the PATENTS file in the same directory. #include "include/pika_set.h" - -#include "slash/include/slash_string.h" +#include "include/pika_cache.h" +#include "include/pika_conf.h" +#include "pstd/include/pstd_string.h" +#include "include/pika_slot_command.h" void SAddCmd::DoInitial() { if (!CheckArg(argv_.size())) { @@ -13,45 +15,102 @@ void SAddCmd::DoInitial() { return; } key_ = argv_[1]; - PikaCmdArgsType::iterator iter = argv_.begin(); - iter++; + auto iter = argv_.begin(); + iter++; iter++; members_.assign(iter, argv_.end()); - return; } -void SAddCmd::Do(std::shared_ptr partition) { +void SAddCmd::Do() { int32_t count = 0; - rocksdb::Status s = partition->db()->SAdd(key_, members_, &count); - if (!s.ok()) { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + s_ = db_->storage()->SAdd(key_, members_, &count); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else if (!s_.ok()) { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); return; } + AddSlotKey("s", key_, db_); res_.AppendInteger(count); - return; +} + +void SAddCmd::DoThroughDB() { + Do(); +} + +void SAddCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->SAddIfKeyExist(key_, members_); + } } void SPopCmd::DoInitial() { - if (!CheckArg(argv_.size())) { + size_t argc = argv_.size(); + if (!CheckArg(argc)) { res_.SetRes(CmdRes::kWrongNum, kCmdNameSPop); return; } + count_ = 1; key_ = argv_[1]; - return; + if (argc > 3) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSPop); + } else if (argc == 3) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &count_) == 0) { + res_.SetRes(CmdRes::kErrOther, kCmdNameSPop); + return; + } + if (count_ <= 0) { + res_.SetRes(CmdRes::kErrOther, kCmdNameSPop); + return; + } + } } -void SPopCmd::Do(std::shared_ptr partition) { - std::string member; - rocksdb::Status s = partition->db()->SPop(key_, &member); - if (s.ok()) { - res_.AppendStringLen(member.size()); - res_.AppendContent(member); - } else if (s.IsNotFound()) { +void SPopCmd::Do() { + s_ = db_->storage()->SPop(key_, &members_, count_); + if (s_.ok()) { + res_.AppendArrayLenUint64(members_.size()); + for (const auto& member : members_) { + res_.AppendStringLenUint64(member.size()); + res_.AppendContent(member); + } + } else if (s_.IsNotFound()) { res_.AppendContent("$-1"); + res_.SetRes(CmdRes::kNoExists); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); } - return; +} + +void SPopCmd::DoThroughDB() { + Do(); +} + +void SPopCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->SRem(key_, members_); + } +} + +void SPopCmd::DoBinlog() { + if (!s_.ok()) { + return; + } + + PikaCmdArgsType srem_args; + srem_args.emplace_back("srem"); + srem_args.emplace_back(key_); + for (auto m = members_.begin(); m != members_.end(); ++m) { + srem_args.emplace_back(*m); + } + + srem_cmd_->Initial(srem_args, db_name_); + srem_cmd_->SetConn(GetConn()); + srem_cmd_->SetResp(resp_.lock()); + srem_cmd_->DoBinlog(); } void SCardCmd::DoInitial() { @@ -60,18 +119,44 @@ void SCardCmd::DoInitial() { return; } key_ = argv_[1]; - return; } -void SCardCmd::Do(std::shared_ptr partition) { +void SCardCmd::Do() { int32_t card = 0; - rocksdb::Status s = partition->db()->SCard(key_, &card); - if (s.ok() || s.IsNotFound()) { + s_ = db_->storage()->SCard(key_, &card); + if (s_.ok()) { + res_.AppendInteger(card); + } else if (s_.IsNotFound()) { + res_.AppendInteger(card); + res_.SetRes(CmdRes::kNoExists); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, "scard error"); + } +} + +void SCardCmd::ReadCache() { + uint64_t card = 0; + auto s = db_->cache()->SCard(key_, &card); + if (s.ok()) { res_.AppendInteger(card); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); } else { res_.SetRes(CmdRes::kErrOther, "scard error"); } - return; +} + +void SCardCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void SCardCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_SET, key_, db_); + } } void SMembersCmd::DoInitial() { @@ -80,22 +165,52 @@ void SMembersCmd::DoInitial() { return; } key_ = argv_[1]; - return; } -void SMembersCmd::Do(std::shared_ptr partition) { +void SMembersCmd::Do() { std::vector members; - rocksdb::Status s = partition->db()->SMembers(key_, &members); - if (s.ok() || s.IsNotFound()) { + s_ = db_->storage()->SMembers(key_, &members); + if (s_.ok()) { + res_.AppendArrayLenUint64(members.size()); + for (const auto& member : members) { + res_.AppendStringLenUint64(member.size()); + res_.AppendContent(member); + } + } else if (s_.IsNotFound()) { + res_.SetRes(CmdRes::kNoExists); + res_.AppendArrayLenUint64(members.size()); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void SMembersCmd::ReadCache() { + std::vector members; + auto s = db_->cache()->SMembers(key_, &members); + if (s.ok()) { res_.AppendArrayLen(members.size()); for (const auto& member : members) { res_.AppendStringLen(member.size()); res_.AppendContent(member); } + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } - return; +} + +void SMembersCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void SMembersCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_SET, key_, db_); + } } void SScanCmd::DoInitial() { @@ -104,23 +219,23 @@ void SScanCmd::DoInitial() { return; } key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &cursor_)) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &cursor_) == 0) { res_.SetRes(CmdRes::kWrongNum, kCmdNameSScan); return; } - size_t argc = argv_.size(), index = 3; + size_t argc = argv_.size(); + size_t index = 3; while (index < argc) { std::string opt = argv_[index]; - if (!strcasecmp(opt.data(), "match") - || !strcasecmp(opt.data(), "count")) { + if ((strcasecmp(opt.data(), "match") == 0) || (strcasecmp(opt.data(), "count") == 0)) { index++; if (index >= argc) { res_.SetRes(CmdRes::kSyntaxErr); return; } - if (!strcasecmp(opt.data(), "match")) { + if (strcasecmp(opt.data(), "match") == 0) { pattern_ = argv_[index]; - } else if (!slash::string2l(argv_[index].data(), argv_[index].size(), &count_)) { + } else if (pstd::string2int(argv_[index].data(), argv_[index].size(), &count_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } @@ -134,29 +249,38 @@ void SScanCmd::DoInitial() { res_.SetRes(CmdRes::kSyntaxErr); return; } - return; } -void SScanCmd::Do(std::shared_ptr partition) { +void SScanCmd::Do() { int64_t next_cursor = 0; std::vector members; - rocksdb::Status s = partition->db()->SScan(key_, cursor_, pattern_, count_, &members, &next_cursor); + rocksdb::Status s = db_->storage()->SScan(key_, cursor_, pattern_, count_, &members, &next_cursor); - if (s.ok() || s.IsNotFound()) { + if (s.ok()) { res_.AppendContent("*2"); char buf[32]; - int64_t len = slash::ll2string(buf, sizeof(buf), next_cursor); + int64_t len = pstd::ll2string(buf, sizeof(buf), next_cursor); res_.AppendStringLen(len); res_.AppendContent(buf); - res_.AppendArrayLen(members.size()); + res_.AppendArrayLenUint64(members.size()); for (const auto& member : members) { res_.AppendString(member); } + } else if (s.IsNotFound()) { + res_.AppendContent("*2"); + char buf[32]; + int64_t len = pstd::ll2string(buf, sizeof(buf), next_cursor); + res_.AppendStringLen(len); + res_.AppendContent(buf); + + res_.AppendArrayLenUint64(members.size()); + res_.SetRes(CmdRes::kNoExists); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } - return; } void SRemCmd::DoInitial() { @@ -165,17 +289,33 @@ void SRemCmd::DoInitial() { return; } key_ = argv_[1]; - PikaCmdArgsType::iterator iter = argv_.begin(); + auto iter = argv_.begin(); iter++; members_.assign(++iter, argv_.end()); - return; } -void SRemCmd::Do(std::shared_ptr partition) { - int32_t count = 0; - rocksdb::Status s = partition->db()->SRem(key_, members_, &count); - res_.AppendInteger(count); - return; +void SRemCmd::Do() { + s_ = db_->storage()->SRem(key_, members_, &deleted_); + if (s_.ok()) { + res_.AppendInteger(deleted_); + } else if (s_.IsNotFound()) { + res_.SetRes(CmdRes::kNoExists); + res_.AppendInteger(deleted_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void SRemCmd::DoThroughDB() { + Do(); +} + +void SRemCmd::DoUpdateCache() { + if (s_.ok() && deleted_ > 0) { + db_->cache()->SRem(key_, members_); + } } void SUnionCmd::DoInitial() { @@ -183,20 +323,24 @@ void SUnionCmd::DoInitial() { res_.SetRes(CmdRes::kWrongNum, kCmdNameSUnion); return; } - PikaCmdArgsType::iterator iter = argv_.begin(); + auto iter = argv_.begin(); keys_.assign(++iter, argv_.end()); - return; } -void SUnionCmd::Do(std::shared_ptr partition) { +void SUnionCmd::Do() { std::vector members; - partition->db()->SUnion(keys_, &members); - res_.AppendArrayLen(members.size()); - for (const auto& member : members) { - res_.AppendStringLen(member.size()); - res_.AppendContent(member); + s_ = db_->storage()->SUnion(keys_, &members); + if (s_.ok()) { + res_.AppendArrayLenUint64(members.size()); + for (const auto& member : members) { + res_.AppendStringLenUint64(member.size()); + res_.AppendContent(member); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); } - return; } void SUnionstoreCmd::DoInitial() { @@ -205,21 +349,73 @@ void SUnionstoreCmd::DoInitial() { return; } dest_key_ = argv_[1]; - PikaCmdArgsType::iterator iter = argv_.begin(); + auto iter = argv_.begin(); iter++; keys_.assign(++iter, argv_.end()); - return; } -void SUnionstoreCmd::Do(std::shared_ptr partition) { +void SUnionstoreCmd::Do() { int32_t count = 0; - rocksdb::Status s = partition->db()->SUnionstore(dest_key_, keys_, &count); - if (s.ok()) { + s_ = db_->storage()->SUnionstore(dest_key_, keys_, value_to_dest_, &count); + if (s_.ok()) { res_.AppendInteger(count); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); } - return; +} + +void SUnionstoreCmd::DoThroughDB() { + Do(); +} + +void SUnionstoreCmd::DoUpdateCache() { + if (s_.ok()) { + std::vector v; + v.emplace_back(dest_key_); + db_->cache()->Del(v); + } +} + +void SetOperationCmd::DoBinlog() { + PikaCmdArgsType del_args; + del_args.emplace_back("del"); + del_args.emplace_back(dest_key_); + del_cmd_->Initial(del_args, db_name_); + del_cmd_->SetConn(GetConn()); + del_cmd_->SetResp(resp_.lock()); + del_cmd_->DoBinlog(); + + if (value_to_dest_.size() == 0) { + //The union/diff/inter operation got an empty set, just exec del to simulate overwrite an empty set to dest_key + return; + } + + PikaCmdArgsType initial_args; + initial_args.emplace_back("sadd");//use "sadd" to distinguish the binlog of SaddCmd which use "SADD" for binlog + initial_args.emplace_back(dest_key_); + initial_args.emplace_back(value_to_dest_[0]); + sadd_cmd_->Initial(initial_args, db_name_); + sadd_cmd_->SetConn(GetConn()); + sadd_cmd_->SetResp(resp_.lock()); + + auto& sadd_argv = sadd_cmd_->argv(); + size_t data_size = value_to_dest_[0].size(); + + for (size_t i = 1; i < value_to_dest_.size(); i++) { + if (data_size >= 131072) { + // If the binlog has reached the size of 128KB. (131,072 bytes = 128KB) + sadd_cmd_->DoBinlog(); + sadd_argv.clear(); + sadd_argv.emplace_back("sadd"); + sadd_argv.emplace_back(dest_key_); + data_size = 0; + } + sadd_argv.emplace_back(value_to_dest_[i]); + data_size += value_to_dest_[i].size(); + } + sadd_cmd_->DoBinlog(); } void SInterCmd::DoInitial() { @@ -227,20 +423,24 @@ void SInterCmd::DoInitial() { res_.SetRes(CmdRes::kWrongNum, kCmdNameSInter); return; } - PikaCmdArgsType::iterator iter = argv_.begin(); + auto iter = argv_.begin(); keys_.assign(++iter, argv_.end()); - return; } -void SInterCmd::Do(std::shared_ptr partition) { +void SInterCmd::Do() { std::vector members; - partition->db()->SInter(keys_, &members); - res_.AppendArrayLen(members.size()); - for (const auto& member : members) { - res_.AppendStringLen(member.size()); - res_.AppendContent(member); + s_ = db_->storage()->SInter(keys_, &members); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendArrayLenUint64(members.size()); + for (const auto& member : members) { + res_.AppendStringLenUint64(member.size()); + res_.AppendContent(member); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); } - return; } void SInterstoreCmd::DoInitial() { @@ -249,21 +449,33 @@ void SInterstoreCmd::DoInitial() { return; } dest_key_ = argv_[1]; - PikaCmdArgsType::iterator iter = argv_.begin(); + auto iter = argv_.begin(); iter++; keys_.assign(++iter, argv_.end()); - return; } -void SInterstoreCmd::Do(std::shared_ptr partition) { +void SInterstoreCmd::Do() { int32_t count = 0; - rocksdb::Status s = partition->db()->SInterstore(dest_key_, keys_, &count); - if (s.ok()) { + s_ = db_->storage()->SInterstore(dest_key_, keys_, value_to_dest_, &count); + if (s_.ok()) { res_.AppendInteger(count); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void SInterstoreCmd::DoThroughDB() { + Do(); +} + +void SInterstoreCmd::DoUpdateCache() { + if (s_.ok()) { + std::vector v; + v.emplace_back(dest_key_); + db_->cache()->Del(v); } - return; } void SIsmemberCmd::DoInitial() { @@ -273,17 +485,44 @@ void SIsmemberCmd::DoInitial() { } key_ = argv_[1]; member_ = argv_[2]; - return; } -void SIsmemberCmd::Do(std::shared_ptr partition) { +void SIsmemberCmd::Do() { int32_t is_member = 0; - partition->db()->SIsmember(key_, member_, &is_member); - if (is_member) { + s_ = db_->storage()->SIsmember(key_, member_, &is_member); + if (is_member != 0) { res_.AppendContent(":1"); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { res_.AppendContent(":0"); } + if (s_.IsNotFound()) { + res_.SetRes(CmdRes::kNoExists); + } +} + +void SIsmemberCmd::ReadCache() { + auto s = db_->cache()->SIsmember(key_, member_); + if (s.ok()) { + res_.AppendContent(":1"); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + + +void SIsmemberCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void SIsmemberCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_SET, key_, db_); + } } void SDiffCmd::DoInitial() { @@ -291,20 +530,24 @@ void SDiffCmd::DoInitial() { res_.SetRes(CmdRes::kWrongNum, kCmdNameSDiff); return; } - PikaCmdArgsType::iterator iter = argv_.begin(); + auto iter = argv_.begin(); keys_.assign(++iter, argv_.end()); - return; } -void SDiffCmd::Do(std::shared_ptr partition) { +void SDiffCmd::Do() { std::vector members; - partition->db()->SDiff(keys_, &members); - res_.AppendArrayLen(members.size()); - for (const auto& member : members) { - res_.AppendStringLen(member.size()); - res_.AppendContent(member); + s_ = db_->storage()->SDiff(keys_, &members); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendArrayLenUint64(members.size()); + for (const auto& member : members) { + res_.AppendStringLenUint64(member.size()); + res_.AppendContent(member); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther,s_.ToString()); } - return; } void SDiffstoreCmd::DoInitial() { @@ -313,19 +556,32 @@ void SDiffstoreCmd::DoInitial() { return; } dest_key_ = argv_[1]; - PikaCmdArgsType::iterator iter = argv_.begin(); + auto iter = argv_.begin(); iter++; keys_.assign(++iter, argv_.end()); - return; } -void SDiffstoreCmd::Do(std::shared_ptr partition) { +void SDiffstoreCmd::Do() { int32_t count = 0; - rocksdb::Status s = partition->db()->SDiffstore(dest_key_, keys_, &count); - if (s.ok()) { + s_ = db_->storage()->SDiffstore(dest_key_, keys_, value_to_dest_, &count); + if (s_.ok()) { res_.AppendInteger(count); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void SDiffstoreCmd::DoThroughDB() { + Do(); +} + +void SDiffstoreCmd::DoUpdateCache() { + if (s_.ok()) { + std::vector v; + v.emplace_back(dest_key_); + db_->cache()->Del(v); } } @@ -337,18 +593,64 @@ void SMoveCmd::DoInitial() { src_key_ = argv_[1]; dest_key_ = argv_[2]; member_ = argv_[3]; - return; } -void SMoveCmd::Do(std::shared_ptr partition) { +void SMoveCmd::Do() { int32_t res = 0; - rocksdb::Status s = partition->db()->SMove(src_key_, dest_key_, member_, &res); - if (s.ok() || s.IsNotFound()) { + s_ = db_->storage()->SMove(src_key_, dest_key_, member_, &res); + if (s_.ok()) { res_.AppendInteger(res); + move_success_ = res; + } else if (s_.IsNotFound()) { + res_.AppendInteger(res); + move_success_ = res; + res_.SetRes(CmdRes::kNoExists); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void SMoveCmd::DoThroughDB() { + Do(); +} + +void SMoveCmd::DoUpdateCache() { + if (s_.ok()) { + std::vector members; + members.emplace_back(member_); + db_->cache()->SRem(src_key_, members); + db_->cache()->SAddIfKeyExist(dest_key_, members); + } +} + +void SMoveCmd::DoBinlog() { + if (!move_success_) { + //the member is not in the source set, nothing changed + return; } - return; + PikaCmdArgsType srem_args; + //SremCmd use "SREM", SMove use "srem" + srem_args.emplace_back("srem"); + srem_args.emplace_back(src_key_); + srem_args.emplace_back(member_); + srem_cmd_->Initial(srem_args, db_name_); + + PikaCmdArgsType sadd_args; + //Saddcmd use "SADD", Smovecmd use "sadd" + sadd_args.emplace_back("sadd"); + sadd_args.emplace_back(dest_key_); + sadd_args.emplace_back(member_); + sadd_cmd_->Initial(sadd_args, db_name_); + + srem_cmd_->SetConn(GetConn()); + srem_cmd_->SetResp(resp_.lock()); + sadd_cmd_->SetConn(GetConn()); + sadd_cmd_->SetResp(resp_.lock()); + + srem_cmd_->DoBinlog(); + sadd_cmd_->DoBinlog(); } void SRandmemberCmd::DoInitial() { @@ -361,19 +663,42 @@ void SRandmemberCmd::DoInitial() { res_.SetRes(CmdRes::kWrongNum, kCmdNameSRandmember); return; } else if (argv_.size() == 3) { - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &count_)) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &count_) == 0) { res_.SetRes(CmdRes::kInvalidInt); } else { - reply_arr = true;; + reply_arr = true; + } + } +} + +void SRandmemberCmd::Do() { + std::vector members; + s_ = db_->storage()->SRandmember(key_, static_cast(count_), &members); + if (s_.ok()) { + if (!reply_arr && (static_cast(!members.empty()) != 0U)) { + res_.AppendStringLenUint64(members[0].size()); + res_.AppendContent(members[0]); + } else { + res_.AppendArrayLenUint64(members.size()); + for (const auto& member : members) { + res_.AppendStringLenUint64(member.size()); + res_.AppendContent(member); + } } + } else if (s_.IsNotFound()) { + res_.SetRes(CmdRes::kNoExists); + res_.AppendArrayLenUint64(members.size()); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); } - return; } -void SRandmemberCmd::Do(std::shared_ptr partition) { +void SRandmemberCmd::ReadCache() { std::vector members; - rocksdb::Status s = partition->db()->SRandmember(key_, count_, &members); - if (s.ok() || s.IsNotFound()) { + auto s = db_->cache()->SRandmember(key_, count_, &members); + if (s.ok()) { if (!reply_arr && members.size()) { res_.AppendStringLen(members[0].size()); res_.AppendContent(members[0]); @@ -384,8 +709,21 @@ void SRandmemberCmd::Do(std::shared_ptr partition) { res_.AppendContent(member); } } + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } - return; } + +void SRandmemberCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void SRandmemberCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_SET, key_, db_); + } +} + diff --git a/tools/pika_migrate/src/pika_slave_node.cc b/tools/pika_migrate/src/pika_slave_node.cc new file mode 100644 index 0000000000..a9adbd89b8 --- /dev/null +++ b/tools/pika_migrate/src/pika_slave_node.cc @@ -0,0 +1,107 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_slave_node.h" +#include "include/pika_conf.h" + +using pstd::Status; + +extern std::unique_ptr g_pika_conf; + +/* SyncWindow */ + +void SyncWindow::Push(const SyncWinItem& item) { + win_.push_back(item); + total_size_ += item.binlog_size_; +} + +bool SyncWindow::Update(const SyncWinItem& start_item, const SyncWinItem& end_item, LogOffset* acked_offset) { + size_t start_pos = win_.size(); + size_t end_pos = win_.size(); + for (size_t i = 0; i < win_.size(); ++i) { + if (win_[i] == start_item) { + start_pos = i; + } + if (win_[i] == end_item) { + end_pos = i; + break; + } + } + if (start_pos == win_.size() || end_pos == win_.size()) { + LOG(WARNING) << "Ack offset Start: " << start_item.ToString() << "End: " << end_item.ToString() + << " not found in binlog controller window." << std::endl + << "window status " << std::endl + << ToStringStatus(); + return false; + } + for (size_t i = start_pos; i <= end_pos; ++i) { + win_[i].acked_ = true; + total_size_ -= win_[i].binlog_size_; + } + while (!win_.empty()) { + if (win_[0].acked_) { + *acked_offset = win_[0].offset_; + win_.pop_front(); + } else { + break; + } + } + return true; +} + +int SyncWindow::Remaining() { + std::size_t remaining_size = g_pika_conf->sync_window_size() - win_.size(); + return static_cast(remaining_size > 0 ? remaining_size : 0); +} + +/* SlaveNode */ + +SlaveNode::SlaveNode(const std::string& ip, int port, const std::string& db_name, int session_id) + : RmNode(ip, port, db_name, session_id) + + {} + +SlaveNode::~SlaveNode() = default; + +Status SlaveNode::InitBinlogFileReader(const std::shared_ptr& binlog, const BinlogOffset& offset) { + binlog_reader = std::make_shared(); + int res = binlog_reader->Seek(binlog, offset.filenum, offset.offset); + if (res != 0) { + return Status::Corruption(ToString() + " binlog reader init failed"); + } + return Status::OK(); +} + +std::string SlaveNode::ToStringStatus() { + std::stringstream tmp_stream; + tmp_stream << " Slave_state: " << SlaveStateMsg[slave_state] << "\r\n"; + tmp_stream << " Binlog_sync_state: " << BinlogSyncStateMsg[b_state] << "\r\n"; + tmp_stream << " Sync_window: " + << "\r\n" + << sync_win.ToStringStatus(); + tmp_stream << " Sent_offset: " << sent_offset.ToString() << "\r\n"; + tmp_stream << " Acked_offset: " << acked_offset.ToString() << "\r\n"; + tmp_stream << " Binlog_reader activated: " << (binlog_reader != nullptr) << "\r\n"; + return tmp_stream.str(); +} + +Status SlaveNode::Update(const LogOffset& start, const LogOffset& end, LogOffset* updated_offset) { + if (slave_state != kSlaveBinlogSync) { + return Status::Corruption(ToString() + "state not BinlogSync"); + } + *updated_offset = LogOffset(); + bool res = sync_win.Update(SyncWinItem(start), SyncWinItem(end), updated_offset); + if (!res) { + return Status::Corruption("UpdateAckedInfo failed"); + } + if (*updated_offset == LogOffset()) { + // nothing to update return current acked_offset + *updated_offset = acked_offset; + return Status::OK(); + } + // update acked_offset + acked_offset = *updated_offset; + return Status::OK(); +} diff --git a/tools/pika_migrate/src/pika_slot.cc b/tools/pika_migrate/src/pika_slot.cc deleted file mode 100644 index adeecf8bb7..0000000000 --- a/tools/pika_migrate/src/pika_slot.cc +++ /dev/null @@ -1,436 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_rm.h" -#include "include/pika_slot.h" -#include "include/pika_table.h" -#include "include/pika_server.h" -#include "include/pika_cmd_table_manager.h" - -extern PikaCmdTableManager* g_pika_cmd_table_manager; -extern PikaReplicaManager* g_pika_rm; -extern PikaServer* g_pika_server; -extern PikaConf* g_pika_conf; - -// SLOTSINFO -void SlotsInfoCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsInfo); - return; - } - - if (g_pika_conf->classic_mode()) { - res_.SetRes(CmdRes::kErrOther, "SLOTSINFO only support on sharding mode"); - return; - } - - return; -} - -void SlotsInfoCmd::Do(std::shared_ptr partition) { - std::shared_ptr
table_ptr = g_pika_server->GetTable(g_pika_conf->default_table()); - if (!table_ptr) { - res_.SetRes(CmdRes::kNotFound, kCmdNameSlotsInfo); - return; - } - table_ptr->KeyScan(); - // this get will get last time scan info - KeyScanInfo key_scan_info = table_ptr->GetKeyScanInfo(); - - std::map infos; - Status s = table_ptr->GetPartitionsKeyScanInfo(&infos); - if (!s.ok()) { - res_.SetRes(CmdRes::kInvalidParameter, kCmdNameSlotsInfo); - return; - } - res_.AppendArrayLen(infos.size()); - for (auto& key_info : infos) { - uint64_t total_key_size = 0; - for (size_t idx = 0; idx < key_info.second.key_infos.size(); ++idx) { - total_key_size += key_info.second.key_infos[idx].keys; - } - res_.AppendArrayLen(2); - res_.AppendInteger(key_info.first); - res_.AppendInteger(total_key_size); - } - return; -} - -// SLOTSHASHKEY key1 [key2 …] -void SlotsHashKeyCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsHashKey); - return; - } - - if (g_pika_conf->classic_mode()) { - res_.SetRes(CmdRes::kErrOther, "SLOTSHASHKEY only support on sharding mode"); - return; - } - - return; -} - -void SlotsHashKeyCmd::Do(std::shared_ptr partition) { - res_.AppendArrayLen(argv_.size() - 1); - std::shared_ptr
table_ptr = g_pika_server->GetTable(g_pika_conf->default_table()); - uint32_t partition_num = table_ptr->PartitionNum(); - if (!table_ptr) { - res_.SetRes(CmdRes::kInvalidParameter, kCmdNameSlotsHashKey); - } - // iter starts from real key, first item in argv_ is command name - std::vector::const_iterator iter = argv_.begin() + 1; - for (; iter != argv_.end(); iter++) { - res_.AppendInteger(g_pika_cmd_table_manager->DistributeKey(*iter, partition_num)); - } - return; -} - -// slotsmgrtslot-async host port timeout maxbulks maxbytes slot numkeys -void SlotsMgrtSlotAsyncCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsMgrtSlotAsync); - return; - } - - if (g_pika_conf->classic_mode()) { - res_.SetRes(CmdRes::kErrOther, "SLOTSMGRTTAGSLOT-ASYNC only support on sharding mode"); - return; - } - - return; -} - -void SlotsMgrtSlotAsyncCmd::Do(std::shared_ptr partition) { - int64_t moved = 0; - int64_t remained = 0; - res_.AppendArrayLen(2); - res_.AppendInteger(moved); - res_.AppendInteger(remained); -} - -// SLOTSMGRTTAGSLOT-ASYNC host port timeout maxbulks maxbytes slot numkeys -void SlotsMgrtTagSlotAsyncCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsMgrtTagSlotAsync); - return; - } - - if (g_pika_conf->classic_mode()) { - res_.SetRes(CmdRes::kErrOther, "SLOTSMGRTTAGSLOT-ASYNC only support on sharding mode"); - return; - } - - PikaCmdArgsType::const_iterator it = argv_.begin() + 1; //Remember the first args is the opt name - dest_ip_ = *it++; - slash::StringToLower(dest_ip_); - - std::string str_dest_port = *it++; - if (!slash::string2l(str_dest_port.data(), str_dest_port.size(), &dest_port_) || dest_port_ <= 0) { - res_.SetRes(CmdRes::kInvalidInt, kCmdNameSlotsMgrtTagSlotAsync); - return; - } - - if ((dest_ip_ == "127.0.0.1" || dest_ip_ == g_pika_server->host()) && dest_port_ == g_pika_server->port()) { - res_.SetRes(CmdRes::kErrOther, "destination address error"); - return; - } - - std::string str_timeout_ms = *it++; - - std::string str_max_bulks = *it++; - - std::string str_max_bytes_ = *it++; - - std::string str_slot_num = *it++; - if (!slash::string2l(str_slot_num.data(), str_slot_num.size(), &slot_num_) - || slot_num_ < 0 || slot_num_ >= g_pika_conf->default_slot_num()) { - res_.SetRes(CmdRes::kInvalidInt, kCmdNameSlotsMgrtTagSlotAsync); - return; - } - - std::string str_keys_num = *it++; - return; -} - -void SlotsMgrtTagSlotAsyncCmd::Do(std::shared_ptr partition) { - int64_t moved = 0; - int64_t remained = 0; - // check if this slave node exist. - // if exist, dont mark migrate done - // cache coming request in codis proxy and keep retrying - // Until sync done, new node slaveof no one. - // mark this migrate done - // proxy retry cached request in new node - bool is_exist = g_pika_rm->CheckPartitionSlaveExist( - RmNode(dest_ip_, dest_port_, g_pika_conf->default_table(), slot_num_)); - if (is_exist) { - remained = 1; - } else { - remained = 0; - } - res_.AppendArrayLen(2); - res_.AppendInteger(moved); - res_.AppendInteger(remained); -} - -// SLOTSSCAN slotnum cursor [COUNT count] -void SlotsScanCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsScan); - return; - } - - if (g_pika_conf->classic_mode()) { - res_.SetRes(CmdRes::kErrOther, "SLOTSSCAN only support on sharding mode"); - return; - } - - int64_t slotnum; - if (!slash::string2l(argv_[1].data(), argv_[1].size(), &slotnum)) { - res_.SetRes(CmdRes::kInvalidInt, kCmdNameSlotsScan); - return; - } - slotnum_ = static_cast(slotnum); - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &cursor_)) { - res_.SetRes(CmdRes::kInvalidInt, kCmdNameSlotsScan); - return; - } - size_t argc = argv_.size(), index = 3; - - while (index < argc) { - std::string opt = argv_[index]; - if (!strcasecmp(opt.data(), "match") - || !strcasecmp(opt.data(), "count")) { - index++; - if (index >= argc) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - if (!strcasecmp(opt.data(), "match")) { - pattern_ = argv_[index]; - } else if (!slash::string2l(argv_[index].data(), argv_[index].size(), &count_) || count_ <= 0) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - } else { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - index++; - } - return; -} - -void SlotsScanCmd::Do(std::shared_ptr partition) { - std::shared_ptr
table_ptr = g_pika_server->GetTable(g_pika_conf->default_table()); - if (!table_ptr) { - res_.SetRes(CmdRes::kNotFound, kCmdNameSlotsScan); - return; - } - std::shared_ptr cur_partition = table_ptr->GetPartitionById(slotnum_); - if (!cur_partition) { - res_.SetRes(CmdRes::kNotFound, kCmdNameSlotsScan); - return; - } - std::vector keys; - int64_t cursor_ret = cur_partition->db()->Scan(blackwidow::DataType::kAll, - cursor_, pattern_, count_, &keys); - - res_.AppendArrayLen(2); - - char buf[32]; - int len = slash::ll2string(buf, sizeof(buf), cursor_ret); - res_.AppendStringLen(len); - res_.AppendContent(buf); - - res_.AppendArrayLen(keys.size()); - std::vector::iterator iter; - for (iter = keys.begin(); iter != keys.end(); iter++) { - res_.AppendStringLen(iter->size()); - res_.AppendContent(*iter); - } - return; -} - -// SLOTSDEL slot1 [slot2 …] -void SlotsDelCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsDel); - return; - } - - if (g_pika_conf->classic_mode()) { - res_.SetRes(CmdRes::kErrOther, "SLOTSDEL only support on sharding mode"); - return; - } - - // iter starts from real key, first item in argv_ is command name - std::vector::const_iterator iter = argv_.begin() + 1; - for (; iter != argv_.end(); iter++) { - int64_t slotnum; - if (!slash::string2l(iter->data(), iter->size(), &slotnum)) { - res_.SetRes(CmdRes::kInvalidInt, kCmdNameSlotsDel); - return; - } - slots_.push_back(static_cast(slotnum)); - } - return; -} - -void SlotsDelCmd::Do(std::shared_ptr partition) { - std::shared_ptr
table_ptr = g_pika_server->GetTable(g_pika_conf->default_table()); - if (!table_ptr) { - res_.SetRes(CmdRes::kNotFound, kCmdNameSlotsDel); - return; - } - if (table_ptr->IsKeyScaning()) { - res_.SetRes(CmdRes::kErrOther, "The keyscan operation is executing, Try again later"); - return; - } - std::vector successed_slots; - for (auto& slotnum : slots_) { - std::shared_ptr cur_partition = table_ptr->GetPartitionById(slotnum); - if (!cur_partition) { - continue; - } - cur_partition->FlushDB(); - successed_slots.push_back(slotnum); - } - res_.AppendArrayLen(successed_slots.size()); - for (auto& slotnum : successed_slots) { - res_.AppendArrayLen(2); - res_.AppendInteger(slotnum); - res_.AppendInteger(0); - } - return; -} - -// SLOTSMGRT-EXEC-WRAPPER $hashkey $command [$arg1 ...] -void SlotsMgrtExecWrapperCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsMgrtExecWrapper); - return; - } - - if (g_pika_conf->classic_mode()) { - res_.SetRes(CmdRes::kErrOther, "SLOTSMGRT-EXEC-WRAPPER only support on sharding mode"); - return; - } - - PikaCmdArgsType::const_iterator it = argv_.begin() + 1; - key_ = *it++; - //slash::StringToLower(key_); - return; -} - -void SlotsMgrtExecWrapperCmd::Do(std::shared_ptr partition) { - // return 0 means proxy will request to new slot server - // return 1 means proxy will keey trying - // return 2 means return this key directly - res_.AppendArrayLen(2); - res_.AppendInteger(1); - res_.AppendInteger(1); - return; -} - -// slotsmgrt-async-status -void SlotsMgrtAsyncStatusCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsMgrtAsyncStatus); - return; - } - - if (g_pika_conf->classic_mode()) { - res_.SetRes(CmdRes::kErrOther, "SLOTSMGRT-ASYNC-STATUS only support on sharding mode"); - return; - } - - return; -} - -void SlotsMgrtAsyncStatusCmd::Do(std::shared_ptr partition) { - std::string status; - std::string ip = "none"; - int64_t port = -1, slot = -1, moved = -1, remained = -1; - std::string mstatus = "no"; - res_.AppendArrayLen(5); - status = "dest server: " + ip + ":" + std::to_string(port); - res_.AppendStringLen(status.size()); - res_.AppendContent(status); - status = "slot number: " + std::to_string(slot); - res_.AppendStringLen(status.size()); - res_.AppendContent(status); - status = "migrating : " + mstatus; - res_.AppendStringLen(status.size()); - res_.AppendContent(status); - status = "moved keys : " + std::to_string(moved); - res_.AppendStringLen(status.size()); - res_.AppendContent(status); - status = "remain keys: " + std::to_string(remained); - res_.AppendStringLen(status.size()); - res_.AppendContent(status); - return; -} - -// slotsmgrt-async-cancel -void SlotsMgrtAsyncCancelCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsMgrtAsyncCancel); - return; - } - - if (g_pika_conf->classic_mode()) { - res_.SetRes(CmdRes::kErrOther, "SLOTSMGRT-ASYNC-CANCEL only support on sharding mode"); - return; - } - - return; -} - -void SlotsMgrtAsyncCancelCmd::Do(std::shared_ptr partition) { - res_.SetRes(CmdRes::kOk); - return; -} - -// slotsmgrtslot host port timeout slot -void SlotsMgrtSlotCmd::DoInitial() { - res_.SetRes(CmdRes::kErrOther, kCmdNameSlotsMgrtSlot + " NOT supported"); - return; -} - -void SlotsMgrtSlotCmd::Do(std::shared_ptr partition) { - return; -} - -// slotsmgrttagslot host port timeout slot -void SlotsMgrtTagSlotCmd::DoInitial() { - res_.SetRes(CmdRes::kErrOther, kCmdNameSlotsMgrtTagSlot + " NOT supported"); - return; -} - -void SlotsMgrtTagSlotCmd::Do(std::shared_ptr partition) { - return; -} - -// slotsmgrtone host port timeout key -void SlotsMgrtOneCmd::DoInitial() { - res_.SetRes(CmdRes::kErrOther, kCmdNameSlotsMgrtOne + " NOT supported"); - return; -} - -void SlotsMgrtOneCmd::Do(std::shared_ptr partition) { - return; -} - -// slotsmgrttagone host port timeout key -void SlotsMgrtTagOneCmd::DoInitial() { - res_.SetRes(CmdRes::kErrOther, kCmdNameSlotsMgrtTagOne + " NOT supported"); - return; -} - -void SlotsMgrtTagOneCmd::Do(std::shared_ptr partition) { - return; -} diff --git a/tools/pika_migrate/src/pika_slot_command.cc b/tools/pika_migrate/src/pika_slot_command.cc new file mode 100644 index 0000000000..9340a6ebb2 --- /dev/null +++ b/tools/pika_migrate/src/pika_slot_command.cc @@ -0,0 +1,1530 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include +#include + +#include "include/pika_admin.h" +#include "include/pika_cmd_table_manager.h" +#include "include/pika_command.h" +#include "include/pika_conf.h" +#include "include/pika_data_distribution.h" +#include "include/pika_define.h" +#include "include/pika_migrate_thread.h" +#include "include/pika_rm.h" +#include "include/pika_server.h" +#include "include/pika_slot_command.h" +#include "pstd/include/pika_codis_slot.h" +#include "pstd/include/pstd_status.h" +#include "pstd/include/pstd_string.h" +#include "src/redis_streams.h" +#include "storage/include/storage/storage.h" + +#define min(a, b) (((a) > (b)) ? (b) : (a)) +#define MAX_MEMBERS_NUM 512 + +extern std::unique_ptr g_pika_server; +extern std::unique_ptr g_pika_conf; +extern std::unique_ptr g_pika_rm; +extern std::unique_ptr g_pika_cmd_table_manager; + +PikaMigrate::PikaMigrate() { migrate_clients_.clear(); } + +PikaMigrate::~PikaMigrate() { + // close and release all clients + // get the mutex lock + std::lock_guard lm(mutex_); + KillAllMigrateClient(); +} + +net::NetCli *PikaMigrate::GetMigrateClient(const std::string &host, const int port, int timeout) { + std::string ip_port = host + ":" + std::to_string(port); + net::NetCli *migrate_cli; + pstd::Status s; + + auto migrate_clients_iter = migrate_clients_.find(ip_port); + if (migrate_clients_iter == migrate_clients_.end()) { + migrate_cli = net::NewRedisCli(); + s = migrate_cli->Connect(host, port, g_pika_server->host()); + if (!s.ok()) { + LOG(ERROR) << "GetMigrateClient: new migrate_cli[" << ip_port.c_str() << "] failed"; + + delete migrate_cli; + return nullptr; + } + + LOG(INFO) << "GetMigrateClient: new migrate_cli[" << ip_port.c_str() << "]"; + + // add a new migrate client to the map + migrate_clients_[ip_port] = migrate_cli; + } else { + migrate_cli = static_cast(migrate_clients_iter->second); + } + + // set the client connect timeout + migrate_cli->set_send_timeout(timeout); + migrate_cli->set_recv_timeout(timeout); + + // modify the client last time + gettimeofday(&migrate_cli->last_interaction_, nullptr); + + return migrate_cli; +} + +void PikaMigrate::KillMigrateClient(net::NetCli *migrate_cli) { + auto migrate_clients_iter = migrate_clients_.begin(); + while (migrate_clients_iter != migrate_clients_.end()) { + if (migrate_cli == static_cast(migrate_clients_iter->second)) { + LOG(INFO) << "KillMigrateClient: kill migrate_cli[" << migrate_clients_iter->first.c_str() << "]"; + + migrate_cli->Close(); + delete migrate_cli; + migrate_cli = nullptr; + + migrate_clients_.erase(migrate_clients_iter); + break; + } + + ++migrate_clients_iter; + } +} + +// clean and realse timeout client +void PikaMigrate::CleanMigrateClient() { + struct timeval now; + + // if the size of migrate_clients_ <= 0, don't need clean + if (migrate_clients_.size() <= 0) { + return; + } + + gettimeofday(&now, nullptr); + auto migrate_clients_iter = migrate_clients_.begin(); + while (migrate_clients_iter != migrate_clients_.end()) { + auto migrate_cli = static_cast(migrate_clients_iter->second); + // pika_server do DoTimingTask every 10s, so we Try colse the migrate_cli before pika timeout, do it at least 20s in + // advance + int timeout = (g_pika_conf->timeout() > 0) ? g_pika_conf->timeout() : 60; + if (now.tv_sec - migrate_cli->last_interaction_.tv_sec > timeout - 20) { + LOG(INFO) << "CleanMigrateClient: clean migrate_cli[" << migrate_clients_iter->first.c_str() << "]"; + migrate_cli->Close(); + delete migrate_cli; + + migrate_clients_iter = migrate_clients_.erase(migrate_clients_iter); + } else { + ++migrate_clients_iter; + } + } +} + +// clean and realse all client +void PikaMigrate::KillAllMigrateClient() { + auto migrate_clients_iter = migrate_clients_.begin(); + while (migrate_clients_iter != migrate_clients_.end()) { + auto migrate_cli = static_cast(migrate_clients_iter->second); + + LOG(INFO) << "KillAllMigrateClient: kill migrate_cli[" << migrate_clients_iter->first.c_str() << "]"; + + migrate_cli->Close(); + delete migrate_cli; + + migrate_clients_iter = migrate_clients_.erase(migrate_clients_iter); + } +} + +/* * + * do migrate a key-value for slotsmgrt/slotsmgrtone commands + * return value: + * -1 - error happens + * >=0 - # of success migration (0 or 1) + * */ +int PikaMigrate::MigrateKey(const std::string &host, const int port, int timeout, const std::string& key, + const char type, std::string &detail, const std::shared_ptr& db) { + int send_command_num = -1; + + net::NetCli *migrate_cli = GetMigrateClient(host, port, timeout); + if (!migrate_cli) { + detail = "IOERR error or timeout connecting to the client"; + return -1; + } + + send_command_num = MigrateSend(migrate_cli, key, type, detail, db); + if (send_command_num <= 0) { + return send_command_num; + } + + if (MigrateRecv(migrate_cli, send_command_num, detail)) { + return send_command_num; + } + + return -1; +} + +int PikaMigrate::MigrateSend(net::NetCli* migrate_cli, const std::string& key, const char type, std::string& detail, + const std::shared_ptr& db) { + std::string wbuf_str; + pstd::Status s; + int command_num = -1; + + // chech the client is alive + if (!migrate_cli) { + return -1; + } + + command_num = ParseKey(key, type, wbuf_str, db); + if (command_num < 0) { + detail = "ParseKey failed"; + return command_num; + } + + // don't need seed data, key is not exists + if (command_num == 0 || wbuf_str.empty()) { + return 0; + } + + s = migrate_cli->Send(&wbuf_str); + if (!s.ok()) { + LOG(ERROR) << "Connect slots target, Send error: " << s.ToString(); + detail = "Connect slots target, Send error: " + s.ToString(); + KillMigrateClient(migrate_cli); + return -1; + } + + return command_num; +} + +bool PikaMigrate::MigrateRecv(net::NetCli* migrate_cli, int need_receive, std::string& detail) { + pstd::Status s; + std::string reply; + int64_t ret; + + if (nullptr == migrate_cli || need_receive < 0) { + return false; + } + + net::RedisCmdArgsType argv; + while (need_receive) { + s = migrate_cli->Recv(&argv); + if (!s.ok()) { + LOG(ERROR) << "Connect slots target, Recv error: " << s.ToString(); + detail = "Connect slots target, Recv error: " + s.ToString(); + KillMigrateClient(migrate_cli); + return false; + } + + reply = argv[0]; + need_receive--; + + // set return ok + // zadd return number + // hset return 0 or 1 + // hmset return ok + // sadd return number + // rpush return length + // xadd return stream-id + if (argv.size() == 1 && + (kInnerReplOk == pstd::StringToLower(reply) || pstd::string2int(reply.data(), reply.size(), &ret))) { + // continue reiceve response + if (need_receive > 0) { + continue; + } + + // has got all responses + break; + } + + // failed + detail = "something wrong with slots migrate, reply: " + reply; + LOG(ERROR) << "something wrong with slots migrate, reply:" << reply; + return false; + } + + return true; +} + +// return -1 is error; 0 don't migrate; >0 the number of commond +int PikaMigrate::ParseKey(const std::string& key, const char type, std::string& wbuf_str, const std::shared_ptr& db) { + int command_num = -1; + int64_t ttl = 0; + rocksdb::Status s; + switch (type) { + case 'k': + command_num = ParseKKey(key, wbuf_str, db); + break; + case 'h': + command_num = ParseHKey(key, wbuf_str, db); + break; + case 'l': + command_num = ParseLKey(key, wbuf_str, db); + break; + case 'z': + command_num = ParseZKey(key, wbuf_str, db); + break; + case 's': + command_num = ParseSKey(key, wbuf_str, db); + break; + case 'm': + command_num = ParseMKey(key, wbuf_str, db); + break; + default: + LOG(INFO) << "ParseKey key[" << key << "], the type[" << type << "] is not support."; + return -1; + break; + } + + // error or key is not existed + if (command_num <= 0) { + LOG(INFO) << "ParseKey key[" << key << "], parse return " << command_num + << ", the key maybe is not exist or expired."; + return command_num; + } + + // skip kv, stream because kv and stream cmd: SET key value ttl + if (type == 'k' || type == 'm') { + return command_num; + } + + ttl = TTLByType(type, key, db); + + //-1 indicates the key is valid forever + if (ttl == -1) { + return command_num; + } + + // key is expired or not exist, don't migrate + if (ttl == 0 or ttl == -2) { + wbuf_str.clear(); + return 0; + } + + // no kv, because kv cmd: SET key value ttl + if (SetTTL(key, wbuf_str, ttl)) { + command_num += 1; + } + + return command_num; +} + +bool PikaMigrate::SetTTL(const std::string& key, std::string& wbuf_str, int64_t ttl) { + //-1 indicates the key is valid forever + if (ttl == -1) { + return false; + } + + // if ttl = -2 indicates, the key is not existed + if (ttl < 0) { + LOG(INFO) << "SetTTL key[" << key << "], ttl is " << ttl; + ttl = 0; + } + + net::RedisCmdArgsType argv; + std::string cmd; + + argv.emplace_back("EXPIRE"); + argv.emplace_back(key); + argv.emplace_back(std::to_string(ttl)); + + net::SerializeRedisCommand(argv, &cmd); + wbuf_str.append(cmd); + + return true; +} + +// return -1 is error; 0 don't migrate; >0 the number of commond +int PikaMigrate::ParseKKey(const std::string& key, std::string& wbuf_str, const std::shared_ptr& db) { + net::RedisCmdArgsType argv; + std::string cmd; + std::string value; + int64_t ttl = 0; + rocksdb::Status s; + + s = db->storage()->Get(key, &value); + + // if key is not existed, don't migrate + if (s.IsNotFound()) { + return 0; + } + + if (!s.ok()) { + return -1; + } + + argv.emplace_back("SET"); + argv.emplace_back(key); + argv.emplace_back(value); + + ttl = TTLByType('k', key, db); + + // ttl = -1 indicates the key is valid forever, dont process + // key is expired or not exist, dont migrate + // todo check ttl + if (ttl == 0 || ttl == -2) { + wbuf_str.clear(); + return 0; + } + + if (ttl > 0) { + argv.emplace_back("EX"); + argv.emplace_back(std::to_string(ttl)); + } + net::SerializeRedisCommand(argv, &cmd); + wbuf_str.append(cmd); + return 1; +} + +int64_t PikaMigrate::TTLByType(const char key_type, const std::string& key, const std::shared_ptr& db) { + return db->storage()->TTL(key); +} + +int PikaMigrate::ParseZKey(const std::string& key, std::string& wbuf_str, const std::shared_ptr& db) { + int command_num = 0; + + int64_t next_cursor = 0; + std::vector score_members; + do { + score_members.clear(); + rocksdb::Status s = db->storage()->ZScan(key, next_cursor, "*", MAX_MEMBERS_NUM, &score_members, &next_cursor); + if (s.ok()) { + if (score_members.empty()) { + break; + } + + net::RedisCmdArgsType argv; + std::string cmd; + argv.emplace_back("ZADD"); + argv.emplace_back(key); + + for (const auto &score_member : score_members) { + argv.emplace_back(std::to_string(score_member.score)); + argv.emplace_back(score_member.member); + } + + net::SerializeRedisCommand(argv, &cmd); + wbuf_str.append(cmd); + command_num++; + } else if (s.IsNotFound()) { + wbuf_str.clear(); + return 0; + } else { + wbuf_str.clear(); + return -1; + } + } while (next_cursor > 0); + + return command_num; +} + +// return -1 is error; 0 don't migrate; >0 the number of commond +int PikaMigrate::ParseHKey(const std::string& key, std::string& wbuf_str, const std::shared_ptr& db) { + int64_t next_cursor = 0; + int command_num = 0; + std::vector field_values; + do { + field_values.clear(); + rocksdb::Status s = db->storage()->HScan(key, next_cursor, "*", MAX_MEMBERS_NUM, &field_values, &next_cursor); + if (s.ok()) { + if (field_values.empty()) { + break; + } + + net::RedisCmdArgsType argv; + std::string cmd; + argv.emplace_back("HMSET"); + argv.emplace_back(key); + + for (const auto &field_value : field_values) { + argv.emplace_back(field_value.field); + argv.emplace_back(field_value.value); + } + + net::SerializeRedisCommand(argv, &cmd); + wbuf_str.append(cmd); + command_num++; + } else if (s.IsNotFound()) { + wbuf_str.clear(); + return 0; + } else { + wbuf_str.clear(); + return -1; + } + } while (next_cursor > 0); + + return command_num; +} + +// return -1 is error; 0 don't migrate; >0 the number of commond +int PikaMigrate::ParseSKey(const std::string& key, std::string& wbuf_str, const std::shared_ptr& db) { + int command_num = 0; + int64_t next_cursor = 0; + std::vector members; + + do { + members.clear(); + rocksdb::Status s = db->storage()->SScan(key, next_cursor, "*", MAX_MEMBERS_NUM, &members, &next_cursor); + + if (s.ok()) { + if (members.empty()) { + break; + } + + net::RedisCmdArgsType argv; + std::string cmd; + argv.emplace_back("SADD"); + argv.emplace_back(key); + + for (const auto &member : members) { + argv.emplace_back(member); + } + + net::SerializeRedisCommand(argv, &cmd); + wbuf_str.append(cmd); + command_num++; + } else if (s.IsNotFound()) { + wbuf_str.clear(); + return 0; + } else { + wbuf_str.clear(); + return -1; + } + } while (next_cursor > 0); + + return command_num; +} + +int PikaMigrate::ParseMKey(const std::string& key, std::string& wbuf_str, const std::shared_ptr& db) { + int command_num = 0; + std::vector id_messages; + storage::StreamScanArgs arg; + storage::StreamUtils::StreamParseIntervalId("-", arg.start_sid, &arg.start_ex, 0); + storage::StreamUtils::StreamParseIntervalId("+", arg.end_sid, &arg.end_ex, UINT64_MAX); + auto s = db->storage()->XRange(key, arg, id_messages); + + if (s.ok()) { + net::RedisCmdArgsType argv; + std::string cmd; + argv.emplace_back("XADD"); + argv.emplace_back(key); + for (auto &fv : id_messages) { + std::vector message; + storage::StreamUtils::DeserializeMessage(fv.value, message); + storage::streamID sid; + sid.DeserializeFrom(fv.field); + argv.emplace_back(sid.ToString()); + for (auto &m : message) { + argv.emplace_back(m); + } + } + net::SerializeRedisCommand(argv, &cmd); + wbuf_str.append(cmd); + command_num++; + } else if (s.IsNotFound()) { + wbuf_str.clear(); + return 0; + } else { + wbuf_str.clear(); + return -1; + } + return command_num; +} + +// return -1 is error; 0 don't migrate; >0 the number of commond +int PikaMigrate::ParseLKey(const std::string& key, std::string& wbuf_str, const std::shared_ptr& db) { + int64_t left = 0; + int command_num = 0; + std::vector values; + + net::RedisCmdArgsType argv; + std::string cmd; + + // del old key, before migrate list; prevent redo when failed + argv.emplace_back("DEL"); + argv.emplace_back(key); + net::SerializeRedisCommand(argv, &cmd); + wbuf_str.append(cmd); + command_num++; + + do { + values.clear(); + rocksdb::Status s = db->storage()->LRange(key, left, left + (MAX_MEMBERS_NUM - 1), &values); + if (s.ok()) { + if (values.empty()) { + break; + } + + net::RedisCmdArgsType argv; + std::string cmd; + + argv.emplace_back("RPUSH"); + argv.emplace_back(key); + + for (const auto &value : values) { + argv.emplace_back(value); + } + + net::SerializeRedisCommand(argv, &cmd); + wbuf_str.append(cmd); + command_num++; + + left += MAX_MEMBERS_NUM; + } else if (s.IsNotFound()) { + wbuf_str.clear(); + return 0; + } else { + wbuf_str.clear(); + return -1; + } + } while (!values.empty()); + + if (command_num == 1) { + wbuf_str.clear(); + command_num = 0; + } + + return command_num; +} + +/* * + * do migrate a key-value for slotsmgrt/slotsmgrtone commands + * return value: + * -1 - error happens + * >=0 - # of success migration (0 or 1) + * */ +static int SlotsMgrtOne(const std::string &host, const int port, int timeout, const std::string& key, const char type, + std::string& detail, const std::shared_ptr& db) { + int send_command_num = 0; + rocksdb::Status s; + std::map type_status; + + send_command_num = g_pika_server->pika_migrate_->MigrateKey(host, port, timeout, key, type, detail, db); + + // the key is migrated to target, delete key and slotsinfo + if (send_command_num >= 1) { + std::vector keys; + keys.emplace_back(key); + int64_t count = db->storage()->Del(keys); + if (count > 0) { + WriteDelKeyToBinlog(key, db); + } + + // del slots info + RemSlotKeyByType(std::string(1, type), key, db); + return 1; + } + + // key is not existed, only del slotsinfo + if (send_command_num == 0) { + // del slots info + RemSlotKeyByType(std::string(1, type), key, db); + return 0; + } + return -1; +} + +void RemSlotKeyByType(const std::string& type, const std::string& key, const std::shared_ptr& db) { + uint32_t crc; + int hastag; + uint32_t slotNum = GetSlotsID(g_pika_conf->default_slot_num(), key, &crc, &hastag); + + std::string slot_key = GetSlotKey(slotNum); + int32_t res = 0; + + std::vector members; + members.emplace_back(type + key); + rocksdb::Status s = db->storage()->SRem(slot_key, members, &res); + if (!s.ok()) { + LOG(ERROR) << "srem key[" << key << "] from slotKey[" << slot_key << "] failed, error: " << s.ToString(); + return; + } + + if (hastag) { + std::string tag_key = GetSlotsTagKey(crc); + s = db->storage()->SRem(tag_key, members, &res); + if (!s.ok()) { + LOG(ERROR) << "srem key[" << key << "] from tagKey[" << tag_key << "] failed, error: " << s.ToString(); + return; + } + } +} + +/* * + * do migrate mutli key-value(s) for {slotsmgrt/slotsmgrtone}with tag commands + * return value: + * -1 - error happens + * >=0 - # of success migration + * */ +static int SlotsMgrtTag(const std::string& host, const int port, int timeout, const std::string& key, const char type, + std::string& detail, const std::shared_ptr& db) { + int count = 0; + uint32_t crc; + int hastag; + GetSlotsID(g_pika_conf->default_slot_num(), key, &crc, &hastag); + if (!hastag) { + if (type == 0) { + return 0; + } + return SlotsMgrtOne(host, port, timeout, key, type, detail, db); + } + + std::string tag_key = GetSlotsTagKey(crc); + std::vector members; + + // get all keys that have the same crc + rocksdb::Status s = db->storage()->SMembers(tag_key, &members); + if (!s.ok()) { + return -1; + } + + auto iter = members.begin(); + for (; iter != members.end(); iter++) { + std::string key = *iter; + char type = key.at(0); + key.erase(key.begin()); + int ret = SlotsMgrtOne(host, port, timeout, key, type, detail, db); + + // the key is migrated to target + if (ret == 1) { + count++; + continue; + } + + if (ret == 0) { + LOG(WARNING) << "slots migrate tag failed, key: " << key << ", detail: " << detail; + continue; + } + + return -1; + } + + return count; +} + +std::string GetSlotKey(uint32_t slot) { + return SlotKeyPrefix + std::to_string(slot); +} + +// add key to slotkey +void AddSlotKey(const std::string& type, const std::string& key, const std::shared_ptr& db) { + if (g_pika_conf->slotmigrate() != true) { + return; + } + + rocksdb::Status s; + int32_t res = -1; + uint32_t crc; + int hastag; + uint32_t slotID = GetSlotsID(g_pika_conf->default_slot_num(), key, &crc, &hastag); + std::string slot_key = GetSlotKey(slotID); + std::vector members; + members.emplace_back(type + key); + s = db->storage()->SAdd(slot_key, members, &res); + if (!s.ok()) { + LOG(ERROR) << "sadd key[" << key << "] to slotKey[" << slot_key << "] failed, error: " << s.ToString(); + return; + } + + // if res == 0, indicate the key is existed; may return, + // prevent write slot_key success, but write tag_key failed, so always write tag_key + if (hastag) { + std::string tag_key = GetSlotsTagKey(crc); + s = db->storage()->SAdd(tag_key, members, &res); + if (!s.ok()) { + LOG(ERROR) << "sadd key[" << key << "] to tagKey[" << tag_key << "] failed, error: " << s.ToString(); + return; + } + } +} + +// del key from slotkey +void RemSlotKey(const std::string& key, const std::shared_ptr& db) { + if (g_pika_conf->slotmigrate() != true) { + return; + } + std::string type; + if (GetKeyType(key, type, db) < 0) { + LOG(WARNING) << "SRem key: " << key << " from slotKey error"; + return; + } + std::string slotKey = GetSlotKey(GetSlotID(g_pika_conf->default_slot_num(), key)); + int32_t count = 0; + std::vector members(1, type + key); + rocksdb::Status s = db->storage()->SRem(slotKey, members, &count); + if (!s.ok()) { + LOG(WARNING) << "SRem key: " << key << " from slotKey, error: " << s.ToString(); + return; + } +} + +int GetKeyType(const std::string& key, std::string& key_type, const std::shared_ptr& db) { + enum storage::DataType type; + rocksdb::Status s = db->storage()->GetType(key, type); + if (!s.ok()) { + LOG(WARNING) << "Get key type error: " << key << " " << s.ToString(); + key_type = ""; + return -1; + } + auto key_type_char = storage::DataTypeToTag(type); + if (key_type_char == DataTypeToTag(storage::DataType::kNones)) { + LOG(WARNING) << "Get key type error: " << key; + key_type = ""; + return -1; + } + key_type = key_type_char; + return 1; +} + +// get slotstagkey by key +std::string GetSlotsTagKey(uint32_t crc) { + return SlotTagPrefix + std::to_string(crc); +} + +// delete key from db && cache +int DeleteKey(const std::string& key, const char key_type, const std::shared_ptr& db) { + int32_t res = 0; + std::string slotKey = GetSlotKey(GetSlotID(g_pika_conf->default_slot_num(), key)); + + // delete slotkey + std::vector members; + members.emplace_back(key_type + key); + rocksdb::Status s = db->storage()->SRem(slotKey, members, &res); + if (!s.ok()) { + if (s.IsNotFound()) { + LOG(INFO) << "Del key Srem key " << key << " not found"; + return 0; + } else { + LOG(WARNING) << "Del key Srem key: " << key << " from slotKey, error: " << strerror(errno); + return -1; + } + } + + // delete from cache + if (PIKA_CACHE_NONE != g_pika_conf->cache_mode() + && PIKA_CACHE_STATUS_OK == db->cache()->CacheStatus()) { + db->cache()->Del(members); + } + + // delete key from db + members.clear(); + members.emplace_back(key); + std::map type_status; + int64_t del_nums = db->storage()->Del(members); + if (0 > del_nums) { + LOG(WARNING) << "Del key: " << key << " at slot " << GetSlotID(g_pika_conf->default_slot_num(), key) << " error"; + return -1; + } + + return 1; +} + +void SlotsMgrtTagSlotCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsMgrtTagSlot); + return; + } + // Remember the first args is the opt name + auto it = argv_.begin() + 1; + dest_ip_ = *it++; + pstd::StringToLower(dest_ip_); + + std::string str_dest_port = *it++; + if (!pstd::string2int(str_dest_port.data(), str_dest_port.size(), &dest_port_)) { + std::string detail = "invalid port number " + std::to_string(dest_port_); + res_.SetRes(CmdRes::kErrOther, detail); + return; + } + if (dest_port_ < 0 || dest_port_ > 65535) { + std::string detail = "invalid port number " + std::to_string(dest_port_); + res_.SetRes(CmdRes::kErrOther, detail); + return; + } + + if ((dest_ip_ == "127.0.0.1" || dest_ip_ == g_pika_server->host()) && dest_port_ == g_pika_server->port()) { + res_.SetRes(CmdRes::kErrOther, "destination address error"); + return; + } + + std::string str_timeout_ms = *it++; + if (!pstd::string2int(str_timeout_ms.data(), str_timeout_ms.size(), &timeout_ms_)) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + if (timeout_ms_ < 0) { + std::string detail = "invalid timeout number " + std::to_string(timeout_ms_); + res_.SetRes(CmdRes::kErrOther, detail); + return; + } + if (timeout_ms_ == 0) { + timeout_ms_ = 100; + } + + std::string str_slot_num = *it++; + if (!pstd::string2int(str_slot_num.data(), str_slot_num.size(), &slot_id_)) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + if (slot_id_ < 0 || slot_id_ >= g_pika_conf->default_slot_num()) { + std::string detail = "invalid slot number " + std::to_string(slot_id_); + res_.SetRes(CmdRes::kErrOther, detail); + return; + } +} + +void SlotsMgrtTagSlotCmd::Do() { + if (g_pika_conf->slotmigrate() != true) { + LOG(WARNING) << "Not in slotmigrate mode"; + res_.SetRes(CmdRes::kErrOther, "not set slotmigrate"); + return; + } + + int32_t len = 0; + int ret = 0; + std::string detail; + std::string slot_key = GetSlotKey(static_cast(slot_id_)); + + // first, get the count of slot_key, prevent to sscan key very slowly when the key is not found + rocksdb::Status s = db_->storage()->SCard(slot_key, &len); + if (len < 0) { + detail = "Get the len of slot Error"; + } + // mutex between SlotsMgrtTagSlotCmd、SlotsMgrtTagOneCmd and migrator_thread + if (len > 0 && g_pika_server->pika_migrate_->Trylock()) { + g_pika_server->pika_migrate_->CleanMigrateClient(); + int64_t next_cursor = 0; + std::vector members; + rocksdb::Status s = db_->storage()->SScan(slot_key, 0, "*", 1, &members, &next_cursor); + if (s.ok()) { + for (const auto &member : members) { + std::string key = member; + char type = key.at(0); + key.erase(key.begin()); + ret = SlotsMgrtTag(dest_ip_, static_cast(dest_port_), static_cast(timeout_ms_), key, type, detail, db_); + } + } + // unlock + g_pika_server->pika_migrate_->Unlock(); + } else { + LOG(WARNING) << "pika migrate is running, try again later, slot_id_: " << slot_id_; + } + if (ret == 0) { + LOG(WARNING) << "slots migrate without tag failed, slot_id_: " << slot_id_ << ", detail: " << detail; + } + if (len >= 0 && ret >= 0) { + res_.AppendArrayLen(2); + // the number of keys migrated + res_.AppendInteger(ret); + // the number of keys remained + res_.AppendInteger(len - ret); + } else { + res_.SetRes(CmdRes::kErrOther, detail); + } + + return; +} + +// check key type +int SlotsMgrtTagOneCmd::KeyTypeCheck(const std::shared_ptr& db) { + enum storage::DataType type; + std::string key_type; + rocksdb::Status s = db->storage()->GetType(key_, type); + if (!s.ok()) { + if (s.IsNotFound()) { + LOG(WARNING) << "Migrate slot key " << key_ << " not found"; + res_.AppendInteger(0); + } else { + LOG(WARNING) << "Migrate slot key: " << key_ << " error: " << s.ToString(); + res_.SetRes(CmdRes::kErrOther, "migrate slot error"); + } + return -1; + } + key_type_ = storage::DataTypeToTag(type); + if (type == storage::DataType::kNones) { + LOG(WARNING) << "Migrate slot key: " << key_ << " not found"; + res_.AppendInteger(0); + return -1; + } + return 0; +} + +void SlotsMgrtTagOneCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsMgrtTagSlot); + return; + } + // Remember the first args is the opt name + auto it = argv_.begin() + 1; + dest_ip_ = *it++; + pstd::StringToLower(dest_ip_); + + std::string str_dest_port = *it++; + if (!pstd::string2int(str_dest_port.data(), str_dest_port.size(), &dest_port_)) { + std::string detail = "invalid port number " + std::to_string(dest_port_); + res_.SetRes(CmdRes::kErrOther, detail); + return; + } + if (dest_port_ < 0 || dest_port_ > 65535) { + std::string detail = "invalid port number " + std::to_string(dest_port_); + res_.SetRes(CmdRes::kErrOther, detail); + return; + } + + if ((dest_ip_ == "127.0.0.1" || dest_ip_ == g_pika_server->host()) && dest_port_ == g_pika_server->port()) { + res_.SetRes(CmdRes::kErrOther, "destination address error"); + return; + } + + std::string str_timeout_ms = *it++; + if (!pstd::string2int(str_timeout_ms.data(), str_timeout_ms.size(), &timeout_ms_)) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + if (timeout_ms_ < 0) { + std::string detail = "invalid timeout number " + std::to_string(timeout_ms_); + res_.SetRes(CmdRes::kErrOther, detail); + return; + } + if (timeout_ms_ == 0) { + timeout_ms_ = 100; + } + + key_ = *it++; +} + +void SlotsMgrtTagOneCmd::Do() { + if (!g_pika_conf->slotmigrate()) { + LOG(WARNING) << "Not in slotmigrate mode"; + res_.SetRes(CmdRes::kErrOther, "not set slotmigrate"); + return; + } + + int64_t ret = 0; + int32_t len = 0; + int hastag = 0; + uint32_t crc = 0; + std::string detail; + rocksdb::Status s; + std::map type_status; + + // if you need migrates key, if the key is not existed, return + GetSlotsID(g_pika_conf->default_slot_num(), key_, &crc, &hastag); + if (!hastag) { + std::vector keys; + keys.emplace_back(key_); + + // check the key is not existed + ret = db_->storage()->Exists(keys); + + // when the key is not existed, ret = 0 + if (ret == -1) { + res_.SetRes(CmdRes::kErrOther, "exists internal error"); + return; + } + + if (ret == 0) { + res_.AppendInteger(0); + return; + } + + // else need to migrate + } else { + // key is tag_key, check the number of the tag_key + std::string tag_key = GetSlotsTagKey(crc); + s = db_->storage()->SCard(tag_key, &len); + if (s.IsNotFound()) { + res_.AppendInteger(0); + return; + } + if (!s.ok() || len == -1) { + res_.SetRes(CmdRes::kErrOther, "can't get the number of tag_key"); + return; + } + + if (len == 0) { + res_.AppendInteger(0); + return; + } + + // else need to migrate + } + + // lock batch migrate, dont do slotsmgrttagslot when do slotsmgrttagone + // pika_server thread exit(~PikaMigrate) and dispatch thread do CronHandle nead lock() + g_pika_server->pika_migrate_->Lock(); + + // if the key is not existed, return + if (!hastag) { + std::vector keys; + keys.emplace_back(key_); + // the key may be deleted by another thread + std::map type_status; + ret = db_->storage()->Exists(keys); + + // when the key is not existed, ret = 0 + if (ret == -1) { + detail = s.ToString(); + } else if (KeyTypeCheck(db_) != 0) { + detail = "cont get the key type."; + ret = -1; + } else { + ret = SlotsMgrtTag(dest_ip_, static_cast(dest_port_), static_cast(timeout_ms_), key_, key_type_, detail, db_); + } + } else { + // key maybe doesn't exist, the key is tag key, migrate the same tag key + ret = SlotsMgrtTag(dest_ip_, static_cast(dest_port_), static_cast(timeout_ms_), key_, 0, detail, db_); + } + + // unlock the record lock + g_pika_server->pika_migrate_->Unlock(); + + if (ret >= 0) { + res_.AppendInteger(ret); + } else { + if (detail.size() == 0) { + detail = "Unknown Error"; + } + res_.SetRes(CmdRes::kErrOther, detail); + } + + return; +} + +/* * + * slotsinfo [start] [count] + * */ +void SlotsInfoCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsInfo); + return; + } + + if (argv_.size() >= 2) { + if (!pstd::string2int(argv_[1].data(), argv_[1].size(), &begin_)) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + + if (begin_ < 0 || begin_ >= end_) { + std::string detail = "invalid slot begin = " + argv_[1]; + res_.SetRes(CmdRes::kErrOther, detail); + return; + } + } + + if (argv_.size() >= 3) { + int64_t count = 0; + if (!pstd::string2int(argv_[2].data(), argv_[2].size(), &count)) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + + if (count < 0) { + std::string detail = "invalid slot count = " + argv_[2]; + res_.SetRes(CmdRes::kErrOther, detail); + return; + } + + if (begin_ + count < end_) { + end_ = begin_ + count; + } + } + + if (argv_.size() >= 4) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsInfo); + return; + } +} + +void SlotsInfoCmd::Do() { + int slotNum = g_pika_conf->default_slot_num(); + int slots_slot[slotNum]; + int slots_size[slotNum]; + memset(slots_slot, 0, slotNum); + memset(slots_size, 0, slotNum); + int n = 0; + int32_t len = 0; + std::string slot_key; + + for (auto i = static_cast(begin_); i < end_; i++) { + slot_key = GetSlotKey(i); + len = 0; + rocksdb::Status s = db_->storage()->SCard(slot_key, &len); + if (!s.ok() || len == 0) { + continue; + } + + slots_slot[n] = i; + slots_size[n] = len; + n++; + } + + res_.AppendArrayLen(n); + for (int i = 0; i < n; i++) { + res_.AppendArrayLen(2); + res_.AppendInteger(slots_slot[i]); + res_.AppendInteger(slots_size[i]); + } + + return; +} + +void SlotsMgrtTagSlotAsyncCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsMgrtTagSlotAsync); + } + // Remember the first args is the opt name + auto it = argv_.begin() + 1; + dest_ip_ = *it++; + pstd::StringToLower(dest_ip_); + + std::string str_dest_port = *it++; + if (!pstd::string2int(str_dest_port.data(), str_dest_port.size(), &dest_port_) || dest_port_ <= 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + + if ((dest_ip_ == "127.0.0.1" || dest_ip_ == g_pika_server->host()) && dest_port_ == g_pika_server->port()) { + res_.SetRes(CmdRes::kErrOther, "destination address error"); + return; + } + + std::string str_timeout_ms = *it++; + if (!pstd::string2int(str_timeout_ms.data(), str_timeout_ms.size(), &timeout_ms_) || timeout_ms_ <= 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + + std::string str_max_bulks = *it++; + if (!pstd::string2int(str_max_bulks.data(), str_max_bulks.size(), &max_bulks_) || max_bulks_ <= 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + + std::string str_max_bytes_ = *it++; + if (!pstd::string2int(str_max_bytes_.data(), str_max_bytes_.size(), &max_bytes_) || max_bytes_ <= 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + + std::string str_slot_num = *it++; + if (!pstd::string2int(str_slot_num.data(), str_slot_num.size(), &slot_id_) || slot_id_ < 0 || + slot_id_ >= g_pika_conf->default_slot_num()) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + + std::string str_keys_num = *it++; + if (!pstd::string2int(str_keys_num.data(), str_keys_num.size(), &keys_num_) || keys_num_ < 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + return; +} + +void SlotsMgrtTagSlotAsyncCmd::Do() { + // check whether open slotmigrate + if (!g_pika_conf->slotmigrate()) { + res_.SetRes(CmdRes::kErrOther, "please open slotmigrate and reload slot"); + return; + } + + int32_t remained = 0; + std::string slotKey = GetSlotKey(static_cast(slot_id_)); + storage::Status status = db_->storage()->SCard(slotKey, &remained); + if (status.IsNotFound()) { + LOG(INFO) << "find no record in slot " << slot_id_; + res_.AppendArrayLen(2); + res_.AppendInteger(0); + res_.AppendInteger(remained); + return; + } + if (!status.ok()) { + LOG(WARNING) << "Slot batch migrate keys get result error"; + res_.SetRes(CmdRes::kErrOther, "Slot batch migrating keys get result error"); + return; + } + + bool ret = g_pika_server->SlotsMigrateBatch(dest_ip_, dest_port_, timeout_ms_, slot_id_, keys_num_, db_); + if (!ret) { + LOG(WARNING) << "Slot batch migrate keys error"; + res_.SetRes(CmdRes::kErrOther, "Slot batch migrating keys error, may be currently migrating"); + return; + } + + res_.AppendArrayLen(2); + res_.AppendInteger(0); + res_.AppendInteger(remained); + return; +} + +void SlotsMgrtAsyncStatusCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsMgrtAsyncStatus); + } + return; +} + +void SlotsMgrtAsyncStatusCmd::Do() { + std::string status; + std::string ip; + int64_t port = -1, slots = -1, moved = -1, remained = -1; + bool migrating = false; + g_pika_server->GetSlotsMgrtSenderStatus(&ip, &port, &slots, &migrating, &moved, &remained); + std::string mstatus = migrating ? "yes" : "no"; + res_.AppendArrayLen(5); + status = "dest server: " + ip + ":" + std::to_string(port); + res_.AppendStringLenUint64(status.size()); + res_.AppendContent(status); + status = "slot number: " + std::to_string(slots); + res_.AppendStringLenUint64(status.size()); + res_.AppendContent(status); + status = "migrating : " + mstatus; + res_.AppendStringLenUint64(status.size()); + res_.AppendContent(status); + status = "moved keys : " + std::to_string(moved); + res_.AppendStringLenUint64(status.size()); + res_.AppendContent(status); + status = "remain keys: " + std::to_string(remained); + res_.AppendStringLenUint64(status.size()); + res_.AppendContent(status); + + return; +} + +void SlotsMgrtAsyncCancelCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsMgrtAsyncCancel); + } + return; +} + +void SlotsMgrtAsyncCancelCmd::Do() { + bool ret = g_pika_server->SlotsMigrateAsyncCancel(); + if (!ret) { + res_.SetRes(CmdRes::kErrOther, "slotsmgrt-async-cancel error"); + } + res_.SetRes(CmdRes::kOk); + return; +} + +void SlotsDelCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsDel); + } + slots_.assign(argv_.begin(), argv_.end()); + return; +} + +void SlotsDelCmd::Do() { + std::vector keys; + std::vector::const_iterator iter; + for (iter = slots_.begin(); iter != slots_.end(); iter++) { + keys.emplace_back(SlotKeyPrefix + *iter); + } + std::map type_status; + int64_t count = db_->storage()->Del(keys); + if (count >= 0) { + res_.AppendInteger(count); + } else { + res_.SetRes(CmdRes::kErrOther, "SlotsDel error"); + } + return; +} + +/* * + * slotshashkey [key1 key2...] + * */ +void SlotsHashKeyCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsHashKey); + return; + } + + auto iter = argv_.begin(); + keys_.assign(++iter, argv_.end()); + return; +} + +void SlotsHashKeyCmd::Do() { + std::vector::const_iterator keys_it; + + res_.AppendArrayLenUint64(keys_.size()); + for (keys_it = keys_.begin(); keys_it != keys_.end(); ++keys_it) { + res_.AppendInteger(GetSlotsID(g_pika_conf->default_slot_num(), *keys_it, nullptr, nullptr)); + } + + return; +} + +void SlotsScanCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsScan); + return; + } + key_ = SlotKeyPrefix + argv_[1]; + if (std::stoll(argv_[1].data()) < 0 || std::stoll(argv_[1].data()) >= g_pika_conf->default_slot_num()) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsScan); + return; + } + if (!pstd::string2int(argv_[2].data(), argv_[2].size(), &cursor_)) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsScan); + return; + } + size_t argc = argv_.size(), index = 3; + while (index < argc) { + std::string opt = argv_[index]; + if (!strcasecmp(opt.data(), "match") || !strcasecmp(opt.data(), "count")) { + index++; + if (index >= argc) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + if (!strcasecmp(opt.data(), "match")) { + pattern_ = argv_[index]; + } else if (!pstd::string2int(argv_[index].data(), argv_[index].size(), &count_)) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + } else { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + index++; + } + if (count_ < 0) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + return; +} + +void SlotsScanCmd::Do() { + std::vector members; + rocksdb::Status s = db_->storage()->SScan(key_, cursor_, pattern_, count_, &members, &cursor_); + + if (members.size() <= 0) { + cursor_ = 0; + } + res_.AppendContent("*2"); + + char buf[32]; + int64_t len = pstd::ll2string(buf, sizeof(buf), cursor_); + res_.AppendStringLen(len); + res_.AppendContent(buf); + + res_.AppendArrayLenUint64(members.size()); + auto iter_member = members.begin(); + for (; iter_member != members.end(); iter_member++) { + res_.AppendStringLenUint64(iter_member->size()); + res_.AppendContent(*iter_member); + } + return; +} + +void SlotsMgrtExecWrapperCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsMgrtExecWrapper); + } + auto it = argv_.begin() + 1; + key_ = *it++; + pstd::StringToLower(key_); + return; +} + +// return 0 means key doesn't exist, or key is not migrating +// return 1 means key is migrating +// return -1 means something wrong +void SlotsMgrtExecWrapperCmd::Do() { + res_.AppendArrayLen(2); + int ret = g_pika_server->SlotsMigrateOne(key_, db_); + switch (ret) { + case 0: + res_.AppendInteger(0); + res_.AppendInteger(0); + return; + case 1: + res_.AppendInteger(1); + res_.AppendInteger(1); + return; + default: + res_.AppendInteger(-1); + res_.AppendInteger(-1); + return; + } + return; +} + +void SlotsReloadCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsReload); + } + return; +} + +void SlotsReloadCmd::Do() { + g_pika_server->Bgslotsreload(db_); + const PikaServer::BGSlotsReload &info = g_pika_server->bgslots_reload(); + char buf[256]; + snprintf(buf, sizeof(buf), "+%s : %lld", info.s_start_time.c_str(), g_pika_server->GetSlotsreloadingCursor()); + res_.AppendContent(buf); + return; +} + +void SlotsReloadOffCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsReloadOff); + } + return; +} + +void SlotsReloadOffCmd::Do() { + g_pika_server->SetSlotsreloading(false); + res_.SetRes(CmdRes::kOk); + return; +} + +void SlotsCleanupCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsCleanup); + } + + auto iter = argv_.begin() + 1; + std::string slot; + long slotLong = 0; + std::vector slots; + for (; iter != argv_.end(); iter++) { + slot = *iter; + if (!pstd::string2int(slot.data(), slot.size(), &slotLong) || slotLong < 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + slots.emplace_back(static_cast(slotLong)); + } + cleanup_slots_.swap(slots); + return; +} + +void SlotsCleanupCmd::Do() { + g_pika_server->Bgslotscleanup(cleanup_slots_, db_); + std::vector cleanup_slots(g_pika_server->GetCleanupSlots()); + res_.AppendArrayLenUint64(cleanup_slots.size()); + auto iter = cleanup_slots.begin(); + for (; iter != cleanup_slots.end(); iter++) { + res_.AppendInteger(*iter); + } + return; +} + +void SlotsCleanupOffCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsCleanupOff); + } + return; +} + +void SlotsCleanupOffCmd::Do() { + g_pika_server->StopBgslotscleanup(); + res_.SetRes(CmdRes::kOk); + return; +} diff --git a/tools/pika_migrate/src/pika_stable_log.cc b/tools/pika_migrate/src/pika_stable_log.cc new file mode 100644 index 0000000000..ba51d9171c --- /dev/null +++ b/tools/pika_migrate/src/pika_stable_log.cc @@ -0,0 +1,225 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include + +#include + +#include "include/pika_rm.h" +#include "include/pika_server.h" +#include "include/pika_stable_log.h" +#include "pstd/include/env.h" +#include "include/pika_conf.h" + +using pstd::Status; + +extern PikaServer* g_pika_server; +extern std::unique_ptr g_pika_rm; + +StableLog::StableLog(std::string db_name, std::string log_path) + : purging_(false), db_name_(std::move(db_name)), log_path_(std::move(log_path)) { + stable_logger_ = std::make_shared(log_path_, g_pika_conf->binlog_file_size()); + std::map binlogs; + if (!GetBinlogFiles(&binlogs)) { + LOG(FATAL) << log_path_ << " Could not get binlog files!"; + } + if (!binlogs.empty()) { + UpdateFirstOffset(binlogs.begin()->first); + } +} + +StableLog::~StableLog() = default; + +void StableLog::Leave() { + Close(); + RemoveStableLogDir(); +} + +void StableLog::Close() { stable_logger_->Close(); } + +void StableLog::RemoveStableLogDir() { + std::string logpath = log_path_; + if (logpath[logpath.length() - 1] == '/') { + logpath.erase(logpath.length() - 1); + } + logpath.append("_deleting/"); + if (pstd::RenameFile(log_path_, logpath) != 0) { + LOG(WARNING) << "Failed to move log to trash, error: " << strerror(errno); + return; + } + g_pika_server->PurgeDir(logpath); + + LOG(WARNING) << "DB StableLog: " << db_name_ << " move to trash success"; +} + +bool StableLog::PurgeStableLogs(uint32_t to, bool manual) { + // Only one thread can go through + bool expect = false; + if (!purging_.compare_exchange_strong(expect, true)) { + LOG(WARNING) << "purge process already exist"; + return false; + } + auto arg = new PurgeStableLogArg(); + arg->to = to; + arg->manual = manual; + arg->logger = shared_from_this(); + g_pika_server->PurgelogsTaskSchedule(&DoPurgeStableLogs, static_cast(arg)); + return true; +} + +void StableLog::ClearPurge() { purging_ = false; } + +void StableLog::DoPurgeStableLogs(void* arg) { + std::unique_ptr purge_arg(static_cast(arg)); + purge_arg->logger->PurgeFiles(purge_arg->to, purge_arg->manual); + purge_arg->logger->ClearPurge(); +} + +bool StableLog::PurgeFiles(uint32_t to, bool manual) { + std::map binlogs; + if (!GetBinlogFiles(&binlogs)) { + LOG(WARNING) << log_path_ << " Could not get binlog files!"; + return false; + } + + int delete_num = 0; + struct stat file_stat; + auto remain_expire_num = static_cast(binlogs.size() - g_pika_conf->expire_logs_nums()); + std::shared_ptr master_db = nullptr; + std::map::iterator it; + for (it = binlogs.begin(); it != binlogs.end(); ++it) { + if ((manual && it->first <= to) // Manual purgelogsto + || (remain_expire_num > 0) // Expire num trigger + || (binlogs.size() - delete_num > 10 // At lease remain 10 files + && stat(((log_path_ + it->second)).c_str(), &file_stat) == 0 && + file_stat.st_mtime < time(nullptr) - g_pika_conf->expire_logs_days() * 24 * 3600)) { // Expire time trigger + // We check this every time to avoid lock when we do file deletion + master_db = g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name_)); + if (!master_db) { + LOG(WARNING) << "DB: " << db_name_ << "Not Found"; + return false; + } + + if (!master_db->BinlogCloudPurge(it->first)) { + LOG(WARNING) << log_path_ << " Could not purge " << (it->first) << ", since it is already be used"; + return false; + } + + // Do delete + if (pstd::DeleteFile(log_path_ + it->second)) { + ++delete_num; + --remain_expire_num; + } else { + LOG(WARNING) << log_path_ << " Purge log file : " << (it->second) << " failed! error: delete file failed"; + } + } else { + // Break when face the first one not satisfied + // Since the binlogs is order by the file index + break; + } + } + if (delete_num != 0) { + std::map binlogs; + if (!GetBinlogFiles(&binlogs)) { + LOG(WARNING) << log_path_ << " Could not get binlog files!"; + return false; + } + auto it = binlogs.begin(); + if (it != binlogs.end()) { + UpdateFirstOffset(it->first); + } + } + if (delete_num != 0) { + LOG(INFO) << log_path_ << " Success purge " << delete_num << " binlog file"; + } + return true; +} + +bool StableLog::GetBinlogFiles(std::map* binlogs) { + std::vector children; + int ret = pstd::GetChildren(log_path_, children); + if (ret) { + LOG(WARNING) << log_path_ << " Get all files in log path failed! error:" << ret; + return false; + } + + int64_t index = 0; + std::string sindex; + std::vector::iterator it; + for (it = children.begin(); it != children.end(); ++it) { + if ((*it).compare(0, kBinlogPrefixLen, kBinlogPrefix) != 0) { + continue; + } + sindex = (*it).substr(kBinlogPrefixLen); + if (pstd::string2int(sindex.c_str(), sindex.size(), &index) == 1) { + binlogs->insert(std::pair(static_cast(index), *it)); + } + } + return true; +} + +void StableLog::UpdateFirstOffset(uint32_t filenum) { + PikaBinlogReader binlog_reader; + int res = binlog_reader.Seek(stable_logger_, filenum, 0); + if (res != 0) { + LOG(WARNING) << "Binlog reader init failed"; + return; + } + + BinlogItem item; + BinlogOffset offset; + while (true) { + std::string binlog; + Status s = binlog_reader.Get(&binlog, &(offset.filenum), &(offset.offset)); + if (s.IsEndFile()) { + return; + } + if (!s.ok()) { + LOG(WARNING) << "Binlog reader get failed"; + return; + } + if (!PikaBinlogTransverter::BinlogItemWithoutContentDecode(TypeFirst, binlog, &item)) { + LOG(WARNING) << "Binlog item decode failed"; + return; + } + // exec_time == 0, could be padding binlog + if (item.exec_time() != 0) { + break; + } + } + + std::lock_guard l(offset_rwlock_); + first_offset_.b_offset = offset; + first_offset_.l_offset.term = item.term_id(); + first_offset_.l_offset.index = item.logic_id(); +} + +Status StableLog::PurgeFileAfter(uint32_t filenum) { + std::map binlogs; + bool res = GetBinlogFiles(&binlogs); + if (!res) { + return Status::Corruption("GetBinlogFiles failed"); + } + for (auto& it : binlogs) { + if (it.first > filenum) { + // Do delete + auto filename = log_path_ + it.second; + if (!pstd::DeleteFile(filename)) { + return Status::IOError("pstd::DeleteFile faield, filename = " + filename); + } + LOG(WARNING) << "Delete file " << filename; + } + } + return Status::OK(); +} + +Status StableLog::TruncateTo(const LogOffset& offset) { + Status s = PurgeFileAfter(offset.b_offset.filenum); + if (!s.ok()) { + return s; + } + return stable_logger_->Truncate(offset.b_offset.filenum, offset.b_offset.offset, offset.l_offset.index); +} diff --git a/tools/pika_migrate/src/pika_statistic.cc b/tools/pika_migrate/src/pika_statistic.cc new file mode 100644 index 0000000000..b7ab7a8c53 --- /dev/null +++ b/tools/pika_migrate/src/pika_statistic.cc @@ -0,0 +1,111 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_statistic.h" + +#include "pstd/include/env.h" + +#include "include/pika_command.h" + +/* QpsStatistic */ + +QpsStatistic::QpsStatistic() + : querynum(0), + write_querynum(0), + last_querynum(0), + last_write_querynum(0), + last_sec_querynum(0), + last_sec_write_querynum(0), + last_time_us(0) {} + +QpsStatistic::QpsStatistic(const QpsStatistic& other) { + querynum = other.querynum.load(); + write_querynum = other.write_querynum.load(); + last_querynum = other.last_querynum.load(); + last_write_querynum = other.last_write_querynum.load(); + last_sec_querynum = other.last_sec_querynum.load(); + last_sec_write_querynum = other.last_sec_write_querynum.load(); + last_time_us = other.last_time_us.load(); +} + +void QpsStatistic::IncreaseQueryNum(bool is_write) { + querynum++; + if (is_write) { + write_querynum++; + } +} + +void QpsStatistic::ResetLastSecQuerynum() { + uint64_t last_query = last_querynum.load(); + uint64_t last_write_query = last_write_querynum.load(); + uint64_t cur_query = querynum.load(); + uint64_t cur_write_query = write_querynum.load(); + uint64_t last_time = last_time_us.load(); + if (cur_write_query < last_write_query) { + cur_write_query = last_write_query; + } + if (cur_query < last_query) { + cur_query = last_query; + } + uint64_t delta_query = cur_query - last_query; + uint64_t delta_write_query = cur_write_query - last_write_query; + uint64_t cur_time_us = pstd::NowMicros(); + if (cur_time_us <= last_time) { + cur_time_us = last_time + 1; + } + uint64_t delta_time_us = cur_time_us - last_time; + last_sec_querynum.store(delta_query * 1000000 / (delta_time_us)); + last_sec_write_querynum.store(delta_write_query * 1000000 / (delta_time_us)); + last_querynum.store(cur_query); + last_write_querynum.store(cur_write_query); + + last_time_us.store(cur_time_us); +} + +/* Statistic */ + +Statistic::Statistic() { + pthread_rwlockattr_t db_stat_rw_attr; + pthread_rwlockattr_init(&db_stat_rw_attr); +} + +QpsStatistic Statistic::DBStat(const std::string& db_name) { + std::shared_lock l(db_stat_rw); + return db_stat[db_name]; +} + +std::unordered_map Statistic::AllDBStat() { + std::shared_lock l(db_stat_rw); + return db_stat; +} + +void Statistic::UpdateDBQps(const std::string& db_name, const std::string& command, bool is_write) { + bool db_exist = true; + std::unordered_map::iterator iter; + { + std::shared_lock l(db_stat_rw); + auto search = db_stat.find(db_name); + if (search == db_stat.end()) { + db_exist = false; + } else { + iter = search; + } + } + if (db_exist) { + iter->second.IncreaseQueryNum(is_write); + } else { + { + std::lock_guard l(db_stat_rw); + db_stat[db_name].IncreaseQueryNum(is_write); + } + } +} + +void Statistic::ResetDBLastSecQuerynum() { + std::shared_lock l(db_stat_rw); + for (auto& stat : db_stat) { + stat.second.ResetLastSecQuerynum(); + } +} diff --git a/tools/pika_migrate/src/pika_stream.cc b/tools/pika_migrate/src/pika_stream.cc new file mode 100644 index 0000000000..3bddf8c564 --- /dev/null +++ b/tools/pika_migrate/src/pika_stream.cc @@ -0,0 +1,540 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_stream.h" +#include +#include +#include +#include + +#include "glog/logging.h" +#include "include/pika_command.h" +#include "include/pika_db.h" +#include "include/pika_slot_command.h" +#include "include/pika_define.h" +#include "storage/storage.h" + +// s : rocksdb::Status +// res : CmdRes +#define TRY_CATCH_ERROR(s, res) \ + do { \ + if (!s.ok()) { \ + LOG(ERROR) << s.ToString(); \ + res.SetRes(CmdRes::kErrOther, s.ToString()); \ + return; \ + } \ + } while (0) + +void ParseAddOrTrimArgsOrReply(CmdRes &res, const PikaCmdArgsType &argv, storage::StreamAddTrimArgs &args, int *idpos, + bool is_xadd) { + int i = 2; + bool limit_given = false; + for (; i < argv.size(); ++i) { + size_t moreargs = argv.size() - 1 - i; + const std::string &opt = argv[i]; + + if (is_xadd && strcasecmp(opt.c_str(), "*") == 0 && opt.size() == 1) { + // case: XADD mystream * field value [field value ...] + break; + + } else if (strcasecmp(opt.c_str(), "maxlen") == 0 && moreargs) { + // case: XADD mystream ... MAXLEN [= | ~] threshold ... + if (args.trim_strategy != storage::StreamTrimStrategy::TRIM_STRATEGY_NONE) { + res.SetRes(CmdRes::kSyntaxErr, "syntax error, MAXLEN and MINID options at the same time are not compatible"); + return; + } + const auto &next = argv[i + 1]; + if (moreargs >= 2 && (next == "~" || next == "=")) { + // we allways not do approx trim, so we ignore the ~ and = + i++; + } + // parse threshold as uint64 + if (!storage::StreamUtils::string2uint64(argv[i + 1].c_str(), args.maxlen)) { + res.SetRes(CmdRes::kInvalidParameter, "Invalid MAXLEN argument"); + } + i++; + args.trim_strategy = storage::StreamTrimStrategy::TRIM_STRATEGY_MAXLEN; + args.trim_strategy_arg_idx = i; + + } else if (strcasecmp(opt.c_str(), "minid") == 0 && moreargs) { + // case: XADD mystream ... MINID [= | ~] threshold ... + if (args.trim_strategy != storage::StreamTrimStrategy::TRIM_STRATEGY_NONE) { + res.SetRes(CmdRes::kSyntaxErr, "syntax error, MAXLEN and MINID options at the same time are not compatible"); + return; + } + const auto &next = argv[i + 1]; + if (moreargs >= 2 && (next == "~" || next == "=") && next.size() == 1) { + // we allways not do approx trim, so we ignore the ~ and = + i++; + } + // parse threshold as stremID + if (!storage::StreamUtils::StreamParseID(argv[i + 1], args.minid, 0)) { + res.SetRes(CmdRes::kInvalidParameter, "Invalid stream ID specified as stream "); + return; + } + i++; + args.trim_strategy = storage::StreamTrimStrategy::TRIM_STRATEGY_MINID; + args.trim_strategy_arg_idx = i; + + } else if (strcasecmp(opt.c_str(), "limit") == 0 && moreargs) { + // case: XADD mystream ... ~ threshold LIMIT count ... + // we do not need approx trim, so we do not support LIMIT option + res.SetRes(CmdRes::kSyntaxErr, "syntax error, Pika do not support LIMIT option"); + return; + + } else if (is_xadd && strcasecmp(opt.c_str(), "nomkstream") == 0) { + // case: XADD mystream ... NOMKSTREAM ... + args.no_mkstream = true; + + } else if (is_xadd) { + // case: XADD mystream ... ID ... + if (!storage::StreamUtils::StreamParseStrictID(argv[i], args.id, 0, &args.seq_given)) { + res.SetRes(CmdRes::kInvalidParameter, "Invalid stream ID specified as stream "); + return; + } + args.id_given = true; + break; + } else { + res.SetRes(CmdRes::kSyntaxErr); + return; + } + } // end for + + if (idpos) { + *idpos = i; + } else if (is_xadd) { + res.SetRes(CmdRes::kErrOther, "idpos is null, xadd comand must parse idpos"); + } +} + +/* XREADGROUP GROUP group consumer [COUNT count] [BLOCK milliseconds] + * [NOACK] STREAMS key [key ...] id [id ...] + * XREAD [COUNT count] [BLOCK milliseconds] STREAMS key [key ...] id + * [id ...] */ +void ParseReadOrReadGroupArgsOrReply(CmdRes &res, const PikaCmdArgsType &argv, storage::StreamReadGroupReadArgs &args, + bool is_xreadgroup) { + int streams_arg_idx{0}; // the index of stream keys arg + size_t streams_cnt{0}; // the count of stream keys + + for (int i = 1; i < argv.size(); ++i) { + size_t moreargs = argv.size() - i - 1; + const std::string &o = argv[i]; + if (strcasecmp(o.c_str(), "BLOCK") == 0 && moreargs) { + i++; + if (!storage::StreamUtils::string2uint64(argv[i].c_str(), args.block)) { + res.SetRes(CmdRes::kInvalidParameter, "Invalid BLOCK argument"); + return; + } + } else if (strcasecmp(o.c_str(), "COUNT") == 0 && moreargs) { + i++; + if (!storage::StreamUtils::string2int32(argv[i].c_str(), args.count)) { + res.SetRes(CmdRes::kInvalidParameter, "Invalid COUNT argument"); + return; + } + if (args.count < 0) args.count = 0; + } else if (strcasecmp(o.c_str(), "STREAMS") == 0 && moreargs) { + streams_arg_idx = i + 1; + streams_cnt = argv.size() - streams_arg_idx; + if (streams_cnt % 2 != 0) { + res.SetRes(CmdRes::kSyntaxErr, "Unbalanced list of streams: for each stream key an ID must be specified"); + return; + } + streams_cnt /= 2; + break; + } else if (strcasecmp(o.c_str(), "GROUP") == 0 && moreargs >= 2) { + if (!is_xreadgroup) { + res.SetRes(CmdRes::kSyntaxErr, "The GROUP option is only supported by XREADGROUP. You called XREAD instead."); + return; + } + args.group_name = argv[i + 1]; + args.consumer_name = argv[i + 2]; + i += 2; + } else if (strcasecmp(o.c_str(), "NOACK") == 0) { + if (!is_xreadgroup) { + res.SetRes(CmdRes::kSyntaxErr, "The NOACK option is only supported by XREADGROUP. You called XREAD instead."); + return; + } + args.noack_ = true; + } else { + res.SetRes(CmdRes::kSyntaxErr); + return; + } + } + + if (streams_arg_idx == 0) { + res.SetRes(CmdRes::kSyntaxErr); + return; + } + + if (is_xreadgroup && args.group_name.empty()) { + res.SetRes(CmdRes::kSyntaxErr, "Missing GROUP option for XREADGROUP"); + return; + } + + // collect keys and ids + for (auto i = streams_arg_idx + streams_cnt; i < argv.size(); ++i) { + auto key_idx = i - streams_cnt; + args.keys.push_back(argv[key_idx]); + args.unparsed_ids.push_back(argv[i]); + const std::string &key = argv[i - streams_cnt]; + } +} + +void AppendMessagesToRes(CmdRes &res, std::vector &id_messages, const DB* db) { + assert(db); + res.AppendArrayLenUint64(id_messages.size()); + for (auto &fv : id_messages) { + std::vector message; + if (!storage::StreamUtils::DeserializeMessage(fv.value, message)) { + LOG(ERROR) << "Deserialize message failed"; + res.SetRes(CmdRes::kErrOther, "Deserialize message failed"); + return; + } + + assert(message.size() % 2 == 0); + res.AppendArrayLen(2); + storage::streamID sid; + sid.DeserializeFrom(fv.field); + res.AppendString(sid.ToString()); // field here is the stream id + res.AppendArrayLenUint64(message.size()); + for (auto &m : message) { + res.AppendString(m); + } + } +} + +void XAddCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameXAdd); + return; + } + key_ = argv_[1]; + + int idpos{-1}; + ParseAddOrTrimArgsOrReply(res_, argv_, args_, &idpos, true); + if (res_.ret() != CmdRes::kNone) { + return; + } else if (idpos < 0) { + LOG(ERROR) << "Invalid idpos: " << idpos; + res_.SetRes(CmdRes::kErrOther); + return; + } + + field_pos_ = idpos + 1; + if ((argv_.size() - field_pos_) % 2 == 1 || (argv_.size() - field_pos_) < 2) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameXAdd); + return; + } +} + +void XAddCmd::Do() { + std::string message; + if (!storage::StreamUtils::SerializeMessage(argv_, message, field_pos_)) { + res_.SetRes(CmdRes::kErrOther, "Serialize message failed"); + return; + } + + auto s = db_->storage()->XAdd(key_, message, args_); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else if (!s.ok()) { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + return; + } + + // reset command's id in argvs if it not be given + if (!args_.id_given || !args_.seq_given) { + assert(field_pos_ > 0); + argv_[field_pos_ - 1] = args_.id.ToString(); + } + + res_.AppendString(args_.id.ToString()); + AddSlotKey("m", key_, db_); +} + +void XRangeCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameXRange); + return; + } + key_ = argv_[1]; + if (!storage::StreamUtils::StreamParseIntervalId(argv_[2], args_.start_sid, &args_.start_ex, 0) || + !storage::StreamUtils::StreamParseIntervalId(argv_[3], args_.end_sid, &args_.end_ex, UINT64_MAX)) { + res_.SetRes(CmdRes::kInvalidParameter, "Invalid stream ID specified as stream "); + return; + } + if (args_.start_ex && args_.start_sid.ms == UINT64_MAX && args_.start_sid.seq == UINT64_MAX) { + res_.SetRes(CmdRes::kInvalidParameter, "invalid start id"); + return; + } + if (args_.end_ex && args_.end_sid.ms == 0 && args_.end_sid.seq == 0) { + res_.SetRes(CmdRes::kInvalidParameter, "invalid end id"); + return; + } + if (argv_.size() == 6) { + if (!storage::StreamUtils::string2int32(argv_[5].c_str(), args_.limit)) { + res_.SetRes(CmdRes::kInvalidParameter, "COUNT should be a integer greater than 0 and not bigger than INT32_MAX"); + return; + } + } +} + +void XRangeCmd::Do() { + std::vector id_messages; + + if (args_.start_sid <= args_.end_sid) { + auto s = db_->storage()->XRange(key_, args_, id_messages); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else if (!s.ok() && !s.IsNotFound()) { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + return; + } + } + AppendMessagesToRes(res_, id_messages, db_.get()); +} + +void XRevrangeCmd::Do() { + std::vector id_messages; + + if (args_.start_sid >= args_.end_sid) { + auto s = db_->storage()->XRevrange(key_, args_, id_messages); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else if (!s.ok() && !s.IsNotFound()) { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + return; + } + } + + AppendMessagesToRes(res_, id_messages, db_.get()); +} + +void XDelCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameXAdd); + return; + } + + key_ = argv_[1]; + for (int i = 2; i < argv_.size(); i++) { + storage::streamID id; + if (!storage::StreamUtils::StreamParseStrictID(argv_[i], id, 0, nullptr)) { + res_.SetRes(CmdRes::kInvalidParameter, "Invalid stream ID specified as stream "); + return; + } + if (res_.ret() != CmdRes::kNone) { + return; + } + ids_.emplace_back(id); + } +} + +void XDelCmd::Do() { + int32_t count{0}; + auto s = db_->storage()->XDel(key_, ids_, count); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else if (!s.ok() && !s.IsNotFound()) { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } + + if (count > INT_MAX) { + return res_.SetRes(CmdRes::kErrOther, "count is larger than INT_MAX"); + } + + res_.AppendInteger(count); +} + +void XLenCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameXLen); + return; + } + key_ = argv_[1]; +} + +void XLenCmd::Do() { + int32_t len{0}; + auto s = db_->storage()->XLen(key_, len); + if (s.IsNotFound()) { + res_.SetRes(CmdRes::kNotFound); + return; + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else if (!s.ok()) { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + return; + } + + if (len > INT_MAX) { + return res_.SetRes(CmdRes::kErrOther, "stream's length is larger than INT_MAX"); + } + + res_.AppendInteger(len); + return; +} + +void XReadCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameXRead); + return; + } + + ParseReadOrReadGroupArgsOrReply(res_, argv_, args_, false); +} + +void XReadCmd::Do() { + std::vector> results; + // The wrong key will not trigger error, just be ignored, + // we need to save the right key,and return it to client. + std::vector reserved_keys; + auto s = db_->storage()->XRead(args_, results, reserved_keys); + + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else if (!s.ok() && s.ToString() == + "The > ID can be specified only when calling " + "XREADGROUP using the GROUP " + " option.") { + res_.SetRes(CmdRes::kSyntaxErr, s.ToString()); + } else if (!s.ok()) { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } + + if (results.empty()) { + res_.AppendArrayLen(-1); + return; + } + + assert(results.size() == reserved_keys.size()); + + // 2 do the scan + res_.AppendArrayLenUint64(results.size()); + for (size_t i = 0; i < results.size(); ++i) { + res_.AppendArrayLen(2); + res_.AppendString(reserved_keys[i]); + AppendMessagesToRes(res_, results[i], db_.get()); + } +} + +void XTrimCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameXTrim); + return; + } + + key_ = argv_[1]; + ParseAddOrTrimArgsOrReply(res_, argv_, args_, nullptr, false); + if (res_.ret() != CmdRes::kNone) { + return; + } +} + +void XTrimCmd::Do() { + int32_t count{0}; + auto s = db_->storage()->XTrim(key_, args_, count); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else if (!s.ok() && !s.IsNotFound()) { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + return; + } + + if (count > INT_MAX) { + return res_.SetRes(CmdRes::kErrOther, "count is larger than INT_MAX"); + } + + res_.AppendInteger(count); + return; +} + +void XInfoCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameXInfo); + return; + } + + subcmd_ = argv_[1]; + key_ = argv_[2]; + if (!strcasecmp(subcmd_.c_str(), "STREAM")) { + if (argv_.size() > 3 && strcasecmp(subcmd_.c_str(), "FULL") != 0) { + is_full_ = true; + if (argv_.size() > 4 && !storage::StreamUtils::string2uint64(argv_[4].c_str(), count_)) { + res_.SetRes(CmdRes::kInvalidParameter, "invalid count"); + return; + } + } else if (argv_.size() > 3) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + + } else if (!strcasecmp(subcmd_.c_str(), "GROUPS")) { + if (argv_.size() != 3) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + cgroupname_ = argv_[3]; + + } else if (!strcasecmp(subcmd_.c_str(), "CONSUMERS")) { + if (argv_.size() != 4) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + cgroupname_ = argv_[3]; + consumername_ = argv_[4]; + } else { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } +} + +void XInfoCmd::Do() { + if (!strcasecmp(subcmd_.c_str(), "STREAM")) { + this->StreamInfo(db_); + } else if (!strcasecmp(subcmd_.c_str(), "GROUPS")) { + // Korpse: TODO: + // this->GroupsInfo(slot); + } else if (!strcasecmp(subcmd_.c_str(), "CONSUMERS")) { + // Korpse: TODO: + // this->ConsumersInfo(slot); + } else { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } +} + +void XInfoCmd::StreamInfo(std::shared_ptr& db) { + storage::StreamInfoResult info; + auto s = db_->storage()->XInfo(key_, info); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else if (!s.ok() && !s.IsNotFound()) { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + return; + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kNotFound); + return; + } + + // // 2 append the stream info + res_.AppendArrayLen(10); + res_.AppendString("length"); + res_.AppendInteger(static_cast(info.length)); + res_.AppendString("last-generated-id"); + res_.AppendString(info.last_id_str); + res_.AppendString("max-deleted-entry-id"); + res_.AppendString(info.max_deleted_entry_id_str); + res_.AppendString("entries-added"); + res_.AppendInteger(static_cast(info.entries_added)); + res_.AppendString("recorded-first-entry-id"); + res_.AppendString(info.first_id_str); +} diff --git a/tools/pika_migrate/src/pika_table.cc b/tools/pika_migrate/src/pika_table.cc deleted file mode 100644 index 20d2c9be4f..0000000000 --- a/tools/pika_migrate/src/pika_table.cc +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright (c) 2018-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_table.h" - -#include "include/pika_server.h" -#include "include/pika_cmd_table_manager.h" - -extern PikaServer* g_pika_server; -extern PikaCmdTableManager* g_pika_cmd_table_manager; - -std::string TablePath(const std::string& path, - const std::string& table_name) { - char buf[100]; - snprintf(buf, sizeof(buf), "%s/", table_name.data()); - return path + buf; -} - -Table::Table(const std::string& table_name, - uint32_t partition_num, - const std::string& db_path, - const std::string& log_path) : - table_name_(table_name), - partition_num_(partition_num) { - - db_path_ = TablePath(db_path, table_name_); - log_path_ = TablePath(log_path, "log_" + table_name_); - - slash::CreatePath(db_path_); - slash::CreatePath(log_path_); - - pthread_rwlock_init(&partitions_rw_, NULL); -} - -Table::~Table() { - StopKeyScan(); - pthread_rwlock_destroy(&partitions_rw_); - partitions_.clear(); -} - -std::string Table::GetTableName() { - return table_name_; -} - -void Table::BgSaveTable() { - slash::RWLock l(&partitions_rw_, false); - for (const auto& item : partitions_) { - item.second->BgSavePartition(); - } -} - -void Table::CompactTable(const blackwidow::DataType& type) { - slash::RWLock l(&partitions_rw_, false); - for (const auto& item : partitions_) { - item.second->Compact(type); - } -} - -bool Table::FlushPartitionDB() { - slash::RWLock rwl(&partitions_rw_, false); - slash::MutexLock ml(&key_scan_protector_); - if (key_scan_info_.key_scaning_) { - return false; - } - for (const auto& item : partitions_) { - item.second->FlushDB(); - } - return true; -} - -bool Table::FlushPartitionSubDB(const std::string& db_name) { - slash::RWLock rwl(&partitions_rw_, false); - slash::MutexLock ml(&key_scan_protector_); - if (key_scan_info_.key_scaning_) { - return false; - } - for (const auto& item : partitions_) { - item.second->FlushSubDB(db_name); - } - return true; -} - -bool Table::IsBinlogIoError() { - slash::RWLock l(&partitions_rw_, false); - for (const auto& item : partitions_) { - if (item.second->IsBinlogIoError()) { - return true; - } - } - return false; -} - -uint32_t Table::PartitionNum() { - return partition_num_; -} - -Status Table::AddPartitions(const std::set& partition_ids) { - slash::RWLock l(&partitions_rw_, true); - for (const uint32_t& id : partition_ids) { - if (id >= partition_num_) { - return Status::Corruption("partition index out of range[0, " - + std::to_string(partition_num_ - 1) + "]"); - } else if (partitions_.find(id) != partitions_.end()) { - return Status::Corruption("partition " - + std::to_string(id) + " already exist"); - } - } - - for (const uint32_t& id : partition_ids) { - partitions_.emplace(id, std::make_shared( - table_name_, id, db_path_, log_path_)); - } - return Status::OK(); -} - -Status Table::RemovePartitions(const std::set& partition_ids) { - slash::RWLock l(&partitions_rw_, true); - for (const uint32_t& id : partition_ids) { - if (partitions_.find(id) == partitions_.end()) { - return Status::Corruption("partition " + std::to_string(id) + " not found"); - } - } - - for (const uint32_t& id : partition_ids) { - partitions_[id]->Leave(); - partitions_.erase(id); - } - return Status::OK(); -} - -void Table::KeyScan() { - slash::MutexLock ml(&key_scan_protector_); - if (key_scan_info_.key_scaning_) { - return; - } - - key_scan_info_.key_scaning_ = true; - key_scan_info_.duration = -2; // duration -2 mean the task in waiting status, - // has not been scheduled for exec - BgTaskArg* bg_task_arg = new BgTaskArg(); - bg_task_arg->table = shared_from_this(); - g_pika_server->KeyScanTaskSchedule(&DoKeyScan, reinterpret_cast(bg_task_arg)); -} - -bool Table::IsKeyScaning() { - slash::MutexLock ml(&key_scan_protector_); - return key_scan_info_.key_scaning_; -} - -void Table::RunKeyScan() { - Status s; - std::vector new_key_infos(5); - - InitKeyScan(); - slash::RWLock rwl(&partitions_rw_, false); - for (const auto& item : partitions_) { - std::vector tmp_key_infos; - s = item.second->GetKeyNum(&tmp_key_infos); - if (s.ok()) { - for (size_t idx = 0; idx < tmp_key_infos.size(); ++idx) { - new_key_infos[idx].keys += tmp_key_infos[idx].keys; - new_key_infos[idx].expires += tmp_key_infos[idx].expires; - new_key_infos[idx].avg_ttl += tmp_key_infos[idx].avg_ttl; - new_key_infos[idx].invaild_keys += tmp_key_infos[idx].invaild_keys; - } - } else { - break; - } - } - key_scan_info_.duration = time(NULL) - key_scan_info_.start_time; - - slash::MutexLock lm(&key_scan_protector_); - if (s.ok()) { - key_scan_info_.key_infos = new_key_infos; - } - key_scan_info_.key_scaning_ = false; -} - -void Table::StopKeyScan() { - slash::RWLock rwl(&partitions_rw_, false); - slash::MutexLock ml(&key_scan_protector_); - for (const auto& item : partitions_) { - item.second->db()->StopScanKeyNum(); - } - key_scan_info_.key_scaning_ = false; -} - -void Table::ScanDatabase(const blackwidow::DataType& type) { - slash::RWLock rwl(&partitions_rw_, false); - for (const auto& item : partitions_) { - printf("\n\npartition name : %s\n", item.second->GetPartitionName().c_str()); - item.second->db()->ScanDatabase(type); - } -} - -Status Table::GetPartitionsKeyScanInfo(std::map* infos) { - slash::RWLock rwl(&partitions_rw_, false); - for (const auto& item : partitions_) { - (*infos)[item.first] = item.second->GetKeyScanInfo(); - } - return Status::OK(); -} - -KeyScanInfo Table::GetKeyScanInfo() { - slash::MutexLock lm(&key_scan_protector_); - return key_scan_info_; -} - -void Table::Compact(const blackwidow::DataType& type) { - slash::RWLock rwl(&partitions_rw_, true); - for (const auto& item : partitions_) { - item.second->Compact(type); - } -} - -void Table::DoKeyScan(void *arg) { - BgTaskArg* bg_task_arg = reinterpret_cast(arg); - bg_task_arg->table->RunKeyScan(); - delete bg_task_arg; -} - -void Table::InitKeyScan() { - key_scan_info_.start_time = time(NULL); - char s_time[32]; - int len = strftime(s_time, sizeof(s_time), "%Y-%m-%d %H:%M:%S", localtime(&key_scan_info_.start_time)); - key_scan_info_.s_start_time.assign(s_time, len); - key_scan_info_.duration = -1; // duration -1 mean the task in processing -} - -void Table::LeaveAllPartition() { - slash::RWLock rwl(&partitions_rw_, true); - for (const auto& item : partitions_) { - item.second->Leave(); - } - partitions_.clear(); -} - -std::set Table::GetPartitionIds() { - std::set ids; - slash::RWLock l(&partitions_rw_, false); - for (const auto& item : partitions_) { - ids.insert(item.first); - } - return ids; -} - -std::shared_ptr Table::GetPartitionById(uint32_t partition_id) { - slash::RWLock rwl(&partitions_rw_, false); - auto iter = partitions_.find(partition_id); - return (iter == partitions_.end()) ? NULL : iter->second; -} - -std::shared_ptr Table::GetPartitionByKey(const std::string& key) { - assert(partition_num_ != 0); - uint32_t index = g_pika_cmd_table_manager->DistributeKey(key, partition_num_); - slash::RWLock rwl(&partitions_rw_, false); - auto iter = partitions_.find(index); - return (iter == partitions_.end()) ? NULL : iter->second; -} diff --git a/tools/pika_migrate/src/pika_transaction.cc b/tools/pika_migrate/src/pika_transaction.cc new file mode 100644 index 0000000000..85381dcf8d --- /dev/null +++ b/tools/pika_migrate/src/pika_transaction.cc @@ -0,0 +1,313 @@ +// Copyright (c) 2018-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include + +#include "include/pika_transaction.h" +#include "include/pika_admin.h" +#include "include/pika_client_conn.h" +#include "include/pika_define.h" +#include "include/pika_list.h" +#include "include/pika_rm.h" +#include "include/pika_server.h" +#include "src/pstd/include/scope_record_lock.h" + +extern std::unique_ptr g_pika_server; +extern std::unique_ptr g_pika_rm; + +void MultiCmd::Do() { + auto conn = GetConn(); + auto client_conn = std::dynamic_pointer_cast(conn); + if (conn == nullptr || client_conn == nullptr) { + res_.SetRes(CmdRes::kErrOther, name()); + return; + } + if (client_conn->IsInTxn()) { + res_.SetRes(CmdRes::kErrOther, "MULTI calls can not be nested"); + return; + } + client_conn->SetTxnStartState(true); + res_.SetRes(CmdRes::kOk); +} + +void MultiCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, name()); + return; + } +} + +void ExecCmd::Do() { + auto conn = GetConn(); + auto client_conn = std::dynamic_pointer_cast(conn); + std::vector res_vec = {}; + std::vector> resp_strs; + for (size_t i = 0; i < cmds_.size(); ++i) { + resp_strs.emplace_back(std::make_shared()); + } + auto resp_strs_iter = resp_strs.begin(); + std::for_each(cmds_.begin(), cmds_.end(), [&client_conn, &res_vec, &resp_strs_iter](CmdInfo& each_cmd_info) { + each_cmd_info.cmd_->SetResp(*resp_strs_iter++); + auto& cmd = each_cmd_info.cmd_; + auto& db = each_cmd_info.db_; + auto sync_db = each_cmd_info.sync_db_; + cmd->res() = {}; + if (cmd->name() == kCmdNameFlushall) { + auto flushall = std::dynamic_pointer_cast(cmd); + flushall->FlushAllWithoutLock(); + client_conn->SetTxnFailedIfKeyExists(); + } else if (cmd->name() == kCmdNameFlushdb) { + auto flushdb = std::dynamic_pointer_cast(cmd); + flushdb->DoWithoutLock(); + if (cmd->res().ok()) { + cmd->res().SetRes(CmdRes::kOk); + } + client_conn->SetTxnFailedIfKeyExists(each_cmd_info.db_->GetDBName()); + } else { + cmd->Do(); + if (cmd->res().ok() && cmd->is_write()) { + cmd->DoBinlog(); + auto db_keys = cmd->current_key(); + for (auto& item : db_keys) { + item = cmd->db_name().append(item); + } + if (cmd->IsNeedUpdateCache()) { + cmd->DoUpdateCache(); + } + client_conn->SetTxnFailedFromKeys(db_keys); + } + } + res_vec.emplace_back(cmd->res()); + }); + + res_.AppendArrayLen(res_vec.size()); + for (auto& r : res_vec) { + res_.AppendStringRaw(r.message()); + } +} + +void ExecCmd::Execute() { + auto conn = GetConn(); + auto client_conn = std::dynamic_pointer_cast(conn); + if (client_conn == nullptr) { + res_.SetRes(CmdRes::kErrOther, name()); + return; + } + if (!client_conn->IsInTxn()) { + res_.SetRes(CmdRes::kErrOther, "EXEC without MULTI"); + return; + } + if (IsTxnFailedAndSetState()) { + client_conn->ExitTxn(); + return; + } + SetCmdsVec(); + Lock(); + Do(); + + Unlock(); + ServeToBLrPopWithKeys(); + list_cmd_.clear(); + client_conn->ExitTxn(); +} + +void ExecCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, name()); + return; + } + auto conn = GetConn(); + auto client_conn = std::dynamic_pointer_cast(conn); + if (client_conn == nullptr) { + res_.SetRes(CmdRes::kErrOther, name()); + return; + } +} + +bool ExecCmd::IsTxnFailedAndSetState() { + auto conn = GetConn(); + auto client_conn = std::dynamic_pointer_cast(conn); + if (client_conn->IsTxnInitFailed()) { + res_.SetRes(CmdRes::kTxnAbort, "Transaction discarded because of previous errors."); + return true; + } + if (client_conn->IsTxnWatchFailed()) { + res_.AppendStringLen(-1); + return true; + } + return false; +} + +void ExecCmd::Lock() { + g_pika_server->DBLockShared(); + std::for_each(lock_db_.begin(), lock_db_.end(), [](auto& need_lock_db) { + need_lock_db->DBLock(); + }); + if (is_lock_rm_dbs_) { + g_pika_rm->DBLock(); + } + + std::for_each(r_lock_dbs_.begin(), r_lock_dbs_.end(), [this](auto& need_lock_db) { + if (lock_db_keys_.count(need_lock_db) != 0) { + pstd::lock::MultiRecordLock record_lock(need_lock_db->LockMgr()); + record_lock.Lock(lock_db_keys_[need_lock_db]); + } + need_lock_db->DBLockShared(); + }); +} + +void ExecCmd::Unlock() { + std::for_each(r_lock_dbs_.begin(), r_lock_dbs_.end(), [this](auto& need_lock_db) { + if (lock_db_keys_.count(need_lock_db) != 0) { + pstd::lock::MultiRecordLock record_lock(need_lock_db->LockMgr()); + record_lock.Unlock(lock_db_keys_[need_lock_db]); + } + need_lock_db->DBUnlockShared(); + }); + if (is_lock_rm_dbs_) { + g_pika_rm->DBUnlock(); + } + std::for_each(lock_db_.begin(), lock_db_.end(), [](auto& need_lock_db) { + need_lock_db->DBUnlock(); + }); + g_pika_server->DBUnlockShared(); +} + +void ExecCmd::SetCmdsVec() { + auto client_conn = std::dynamic_pointer_cast(GetConn()); + auto cmd_que = client_conn->GetTxnCmdQue(); + + while (!cmd_que.empty()) { + auto cmd = cmd_que.front(); + auto cmd_db = client_conn->GetCurrentTable(); + auto db = g_pika_server->GetDB(cmd_db); + auto sync_db = g_pika_rm->GetSyncMasterDBByName(DBInfo(cmd->db_name())); + cmds_.emplace_back(cmd, db, sync_db); + if (cmd->name() == kCmdNameSelect) { + cmd->Do(); + } else if (cmd->name() == kCmdNameFlushdb) { + is_lock_rm_dbs_ = true; + lock_db_.emplace(g_pika_server->GetDB(cmd_db)); + } else if (cmd->name() == kCmdNameFlushall) { + is_lock_rm_dbs_ = true; + for (const auto& db_item : g_pika_server->GetDB()) { + lock_db_.emplace(db_item.second); + } + } else { + r_lock_dbs_.emplace(db); + if (lock_db_keys_.count(db) == 0) { + lock_db_keys_.emplace(db, std::vector{}); + } + auto cmd_keys = cmd->current_key(); + lock_db_keys_[db].insert(lock_db_keys_[db].end(), cmd_keys.begin(), cmd_keys.end()); + if (cmd->name() == kCmdNameLPush || cmd->name() == kCmdNameRPush) { + list_cmd_.insert(list_cmd_.end(), cmds_.back()); + } + } + cmd_que.pop(); + } +} + +void ExecCmd::ServeToBLrPopWithKeys() { + for (auto each_list_cmd : list_cmd_) { + auto push_keys = each_list_cmd.cmd_->current_key(); + //PS: currently, except for blpop/brpop, there are three cmds inherited from BlockingBaseCmd: lpush, rpush, rpoplpush + //For rpoplpush which has 2 keys(source and receiver), push_keys[0] fetchs the receiver, push_keys[1] fetchs the source.(see RpopLpushCmd::current_key() + auto push_key = push_keys[0]; + if (auto push_list_cmd = std::dynamic_pointer_cast(each_list_cmd.cmd_); + push_list_cmd != nullptr) { + push_list_cmd->TryToServeBLrPopWithThisKey(push_key, each_list_cmd.db_); + } + } +} + +void WatchCmd::Execute() { + Do(); +} + +void WatchCmd::Do() { + auto mp = std::map{}; + for (const auto& key : keys_) { + auto type_count = db_->storage()->IsExist(key, &mp); + if (type_count > 1) { + res_.SetRes(CmdRes::CmdRet::kErrOther, "EXEC WATCH watch key must be unique"); + return; + } + mp.clear(); + } + + + auto conn = GetConn(); + auto client_conn = std::dynamic_pointer_cast(conn); + if (client_conn == nullptr) { + res_.SetRes(CmdRes::kErrOther, name()); + return; + } + if (client_conn->IsInTxn()) { + res_.SetRes(CmdRes::CmdRet::kErrOther, "WATCH inside MULTI is not allowed"); + return; + } + client_conn->AddKeysToWatch(db_keys_); + res_.SetRes(CmdRes::kOk); +} + +void WatchCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, name()); + return; + } + size_t pos = 1; + while (pos < argv_.size()) { + keys_.emplace_back(argv_[pos]); + db_keys_.push_back(db_name() + "_" + argv_[pos++]); + } +} + +void UnwatchCmd::Do() { + auto conn = GetConn(); + auto client_conn = std::dynamic_pointer_cast(conn); + if (client_conn == nullptr) { + res_.SetRes(CmdRes::kErrOther, name()); + return; + } + if (client_conn->IsTxnExecing()) { + res_.SetRes(CmdRes::CmdRet::kOk); + return ; + } + client_conn->RemoveWatchedKeys(); + if (client_conn->IsTxnWatchFailed()) { + client_conn->SetTxnWatchFailState(false); + } + res_.SetRes(CmdRes::CmdRet::kOk); +} + +void UnwatchCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, name()); + return; + } +} + +void DiscardCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, name()); + return; + } +} + +void DiscardCmd::Do() { + auto conn = GetConn(); + auto client_conn = std::dynamic_pointer_cast(conn); + if (client_conn == nullptr) { + res_.SetRes(CmdRes::kErrOther, name()); + return; + } + if (!client_conn->IsInTxn()) { + res_.SetRes(CmdRes::kErrOther, "DISCARD without MULTI"); + return; + } + client_conn->ExitTxn(); + res_.SetRes(CmdRes::CmdRet::kOk); +} diff --git a/tools/pika_migrate/src/pika_zset.cc b/tools/pika_migrate/src/pika_zset.cc index 23e144567e..6b62dbf93b 100644 --- a/tools/pika_migrate/src/pika_zset.cc +++ b/tools/pika_migrate/src/pika_zset.cc @@ -4,8 +4,12 @@ // of patent rights can be found in the PATENTS file in the same directory. #include "include/pika_zset.h" +#include "include/pika_slot_command.h" -#include "slash/include/slash_string.h" +#include + +#include "pstd/include/pstd_string.h" +#include "include/pika_cache.h" void ZAddCmd::DoInitial() { if (!CheckArg(argv_.size())) { @@ -22,24 +26,35 @@ void ZAddCmd::DoInitial() { double score; size_t index = 2; for (; index < argc; index += 2) { - if (!slash::string2d(argv_[index].data(), argv_[index].size(), &score)) { + if (pstd::string2d(argv_[index].data(), argv_[index].size(), &score) == 0) { res_.SetRes(CmdRes::kInvalidFloat); return; } score_members.push_back({score, argv_[index + 1]}); } - return; } -void ZAddCmd::Do(std::shared_ptr partition) { +void ZAddCmd::Do() { int32_t count = 0; - rocksdb::Status s = partition->db()->ZAdd(key_, score_members, &count); - if (s.ok()) { + s_ = db_->storage()->ZAdd(key_, score_members, &count); + if (s_.ok()) { res_.AppendInteger(count); + AddSlotKey("z", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void ZAddCmd::DoThroughDB() { + Do(); +} + +void ZAddCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->ZAddIfKeyExist(key_, score_members); } - return; } void ZCardCmd::DoInitial() { @@ -48,17 +63,30 @@ void ZCardCmd::DoInitial() { return; } key_ = argv_[1]; - return; } -void ZCardCmd::Do(std::shared_ptr partition) { +void ZCardCmd::Do() { int32_t card = 0; - rocksdb::Status s = partition->db()->ZCard(key_, &card); - if (s.ok() || s.IsNotFound()) { + s_ = db_->storage()->ZCard(key_, &card); + if (s_.ok() || s_.IsNotFound()) { res_.AppendInteger(card); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { res_.SetRes(CmdRes::kErrOther, "zcard error"); } +} + +void ZCardCmd::ReadCache() { + res_.SetRes(CmdRes::kCacheMiss); +} + +void ZCardCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void ZCardCmd::DoUpdateCache() { return; } @@ -68,23 +96,23 @@ void ZScanCmd::DoInitial() { return; } key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &cursor_)) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &cursor_) == 0) { res_.SetRes(CmdRes::kWrongNum, kCmdNameZScan); return; } - size_t argc = argv_.size(), index = 3; + size_t argc = argv_.size(); + size_t index = 3; while (index < argc) { std::string opt = argv_[index]; - if (!strcasecmp(opt.data(), "match") - || !strcasecmp(opt.data(), "count")) { + if ((strcasecmp(opt.data(), "match") == 0) || (strcasecmp(opt.data(), "count") == 0)) { index++; if (index >= argc) { res_.SetRes(CmdRes::kSyntaxErr); return; } - if (!strcasecmp(opt.data(), "match")) { + if (strcasecmp(opt.data(), "match") == 0) { pattern_ = argv_[index]; - } else if (!slash::string2l(argv_[index].data(), argv_[index].size(), &count_)) { + } else if (pstd::string2int(argv_[index].data(), argv_[index].size(), &count_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } @@ -98,32 +126,32 @@ void ZScanCmd::DoInitial() { res_.SetRes(CmdRes::kSyntaxErr); return; } - return; } -void ZScanCmd::Do(std::shared_ptr partition) { +void ZScanCmd::Do() { int64_t next_cursor = 0; - std::vector score_members; - rocksdb::Status s = partition->db()->ZScan(key_, cursor_, pattern_, count_, &score_members, &next_cursor); + std::vector score_members; + rocksdb::Status s = db_->storage()->ZScan(key_, cursor_, pattern_, count_, &score_members, &next_cursor); if (s.ok() || s.IsNotFound()) { res_.AppendContent("*2"); char buf[32]; - int64_t len = slash::ll2string(buf, sizeof(buf), next_cursor); + int64_t len = pstd::ll2string(buf, sizeof(buf), next_cursor); res_.AppendStringLen(len); res_.AppendContent(buf); - res_.AppendArrayLen(score_members.size() * 2); + res_.AppendArrayLenUint64(score_members.size() * 2); for (const auto& score_member : score_members) { res_.AppendString(score_member.member); - len = slash::d2string(buf, sizeof(buf), score_member.score); + len = pstd::d2string(buf, sizeof(buf), score_member.score); res_.AppendStringLen(len); res_.AppendContent(buf); } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } - return; } void ZIncrbyCmd::DoInitial() { @@ -132,45 +160,56 @@ void ZIncrbyCmd::DoInitial() { return; } key_ = argv_[1]; - if (!slash::string2d(argv_[2].data(), argv_[2].size(), &by_)) { + if (pstd::string2d(argv_[2].data(), argv_[2].size(), &by_) == 0) { res_.SetRes(CmdRes::kInvalidFloat); return; } member_ = argv_[3]; - return; } -void ZIncrbyCmd::Do(std::shared_ptr partition) { - double score = 0; - rocksdb::Status s = partition->db()->ZIncrby(key_, member_, by_, &score); +void ZIncrbyCmd::Do() { + double score = 0.0; + rocksdb::Status s = db_->storage()->ZIncrby(key_, member_, by_, &score); if (s.ok()) { + score_ = score; char buf[32]; - int64_t len = slash::d2string(buf, sizeof(buf), score); + int64_t len = pstd::d2string(buf, sizeof(buf), score); res_.AppendStringLen(len); res_.AppendContent(buf); + AddSlotKey("z", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } - return; +} + +void ZIncrbyCmd::DoThroughDB() { + Do(); +} + +void ZIncrbyCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->ZIncrbyIfKeyExist(key_, member_, by_, this, db_); + } } void ZsetRangeParentCmd::DoInitial() { - if (argv_.size() == 5 && !strcasecmp(argv_[4].data(), "withscores")) { + if (argv_.size() == 5 && (strcasecmp(argv_[4].data(), "withscores") == 0)) { is_ws_ = true; } else if (argv_.size() != 4) { res_.SetRes(CmdRes::kSyntaxErr); return; } key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &start_)) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &start_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } - if (!slash::string2l(argv_[3].data(), argv_[3].size(), &stop_)) { + if (pstd::string2int(argv_[3].data(), argv_[3].size(), &stop_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } - return; } void ZRangeCmd::DoInitial() { @@ -181,10 +220,39 @@ void ZRangeCmd::DoInitial() { ZsetRangeParentCmd::DoInitial(); } -void ZRangeCmd::Do(std::shared_ptr partition) { - std::vector score_members; - rocksdb::Status s = partition->db()->ZRange(key_, start_, stop_, &score_members); - if (s.ok() || s.IsNotFound()) { +void ZRangeCmd::Do() { + std::vector score_members; + s_ = db_->storage()->ZRange(key_, static_cast(start_), static_cast(stop_), &score_members); + if (s_.ok() || s_.IsNotFound()) { + if (is_ws_) { + char buf[32]; + int64_t len = 0; + res_.AppendArrayLenUint64(score_members.size() * 2); + for (const auto& sm : score_members) { + res_.AppendStringLenUint64(sm.member.size()); + res_.AppendContent(sm.member); + len = pstd::d2string(buf, sizeof(buf), sm.score); + res_.AppendStringLen(len); + res_.AppendContent(buf); + } + } else { + res_.AppendArrayLenUint64(score_members.size()); + for (const auto& sm : score_members) { + res_.AppendStringLenUint64(sm.member.size()); + res_.AppendContent(sm.member); + } + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void ZRangeCmd::ReadCache() { + std::vector score_members; + auto s = db_->cache()->ZRange(key_, start_, stop_, &score_members, db_); + if (s.ok()) { if (is_ws_) { char buf[32]; int64_t len; @@ -192,7 +260,7 @@ void ZRangeCmd::Do(std::shared_ptr partition) { for (const auto& sm : score_members) { res_.AppendStringLen(sm.member.size()); res_.AppendContent(sm.member); - len = slash::d2string(buf, sizeof(buf), sm.score); + len = pstd::d2string(buf, sizeof(buf), sm.score); res_.AppendStringLen(len); res_.AppendContent(buf); } @@ -203,12 +271,25 @@ void ZRangeCmd::Do(std::shared_ptr partition) { res_.AppendContent(sm.member); } } + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } return; } +void ZRangeCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void ZRangeCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_ZSET, key_, db_); + } +} + void ZRevrangeCmd::DoInitial() { if (!CheckArg(argv_.size())) { res_.SetRes(CmdRes::kWrongNum, kCmdNameZRevrange); @@ -217,10 +298,40 @@ void ZRevrangeCmd::DoInitial() { ZsetRangeParentCmd::DoInitial(); } -void ZRevrangeCmd::Do(std::shared_ptr partition) { - std::vector score_members; - rocksdb::Status s = partition->db()->ZRevrange(key_, start_, stop_, &score_members); - if (s.ok() || s.IsNotFound()) { +void ZRevrangeCmd::Do() { + std::vector score_members; + s_ = db_->storage()->ZRevrange(key_, static_cast(start_), static_cast(stop_), &score_members); + if (s_.ok() || s_.IsNotFound()) { + if (is_ws_) { + char buf[32]; + int64_t len = 0; + res_.AppendArrayLenUint64(score_members.size() * 2); + for (const auto& sm : score_members) { + res_.AppendStringLenUint64(sm.member.size()); + res_.AppendContent(sm.member); + len = pstd::d2string(buf, sizeof(buf), sm.score); + res_.AppendStringLen(len); + res_.AppendContent(buf); + } + } else { + res_.AppendArrayLenUint64(score_members.size()); + for (const auto& sm : score_members) { + res_.AppendStringLenUint64(sm.member.size()); + res_.AppendContent(sm.member); + } + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void ZRevrangeCmd::ReadCache() { + std::vector score_members; + auto s = db_->cache()->ZRevrange(key_, start_, stop_, &score_members, db_); + + if (s.ok()) { if (is_ws_) { char buf[32]; int64_t len; @@ -228,7 +339,7 @@ void ZRevrangeCmd::Do(std::shared_ptr partition) { for (const auto& sm : score_members) { res_.AppendStringLen(sm.member.size()); res_.AppendContent(sm.member); - len = slash::d2string(buf, sizeof(buf), sm.score); + len = pstd::d2string(buf, sizeof(buf), sm.score); res_.AppendStringLen(len); res_.AppendContent(buf); } @@ -239,40 +350,54 @@ void ZRevrangeCmd::Do(std::shared_ptr partition) { res_.AppendContent(sm.member); } } + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); } return; } -int32_t DoScoreStrRange(std::string begin_score, std::string end_score, bool *left_close, bool *right_close, double *min_score, double *max_score) { - if (begin_score.size() > 0 && begin_score.at(0) == '(') { +void ZRevrangeCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void ZRevrangeCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_ZSET, key_, db_); + } +} + +int32_t DoScoreStrRange(std::string begin_score, std::string end_score, bool* left_close, bool* right_close, + double* min_score, double* max_score) { + if (!begin_score.empty() && begin_score.at(0) == '(') { *left_close = false; begin_score.erase(begin_score.begin()); } if (begin_score == "-inf") { - *min_score = blackwidow::ZSET_SCORE_MIN; + *min_score = storage::ZSET_SCORE_MIN; } else if (begin_score == "inf" || begin_score == "+inf") { - *min_score = blackwidow::ZSET_SCORE_MAX; - } else if (!slash::string2d(begin_score.data(), begin_score.size(), min_score)) { + *min_score = storage::ZSET_SCORE_MAX; + } else if (pstd::string2d(begin_score.data(), begin_score.size(), min_score) == 0) { return -1; - } - - if (end_score.size() > 0 && end_score.at(0) == '(') { + } + + if (!end_score.empty() && end_score.at(0) == '(') { *right_close = false; end_score.erase(end_score.begin()); } if (end_score == "+inf" || end_score == "inf") { - *max_score = blackwidow::ZSET_SCORE_MAX; + *max_score = storage::ZSET_SCORE_MAX; } else if (end_score == "-inf") { - *max_score = blackwidow::ZSET_SCORE_MIN; - } else if (!slash::string2d(end_score.data(), end_score.size(), max_score)) { + *max_score = storage::ZSET_SCORE_MIN; + } else if (pstd::string2d(end_score.data(), end_score.size(), max_score) == 0) { return -1; } return 0; } -static void FitLimit(int64_t &count, int64_t &offset, const int64_t size) { +static void FitLimit(int64_t& count, int64_t& offset, const int64_t size) { count = count >= 0 ? count : size; offset = (offset >= 0 && offset < size) ? offset : size; count = (offset + count < size) ? count : size - offset; @@ -280,6 +405,8 @@ static void FitLimit(int64_t &count, int64_t &offset, const int64_t size) { void ZsetRangebyscoreParentCmd::DoInitial() { key_ = argv_[1]; + min_ = argv_[2]; + max_ = argv_[3]; int32_t ret = DoScoreStrRange(argv_[2], argv_[3], &left_close_, &right_close_, &min_score_, &max_score_); if (ret == -1) { res_.SetRes(CmdRes::kErrOther, "min or max is not a float"); @@ -291,23 +418,23 @@ void ZsetRangebyscoreParentCmd::DoInitial() { } size_t index = 4; while (index < argc) { - if (!strcasecmp(argv_[index].data(), "withscores")) { + if (strcasecmp(argv_[index].data(), "withscores") == 0) { with_scores_ = true; - } else if (!strcasecmp(argv_[index].data(), "limit")) { + } else if (strcasecmp(argv_[index].data(), "limit") == 0) { if (index + 3 > argc) { res_.SetRes(CmdRes::kSyntaxErr); return; } index++; - if (!slash::string2l(argv_[index].data(), argv_[index].size(), &offset_)) { + if (pstd::string2int(argv_[index].data(), argv_[index].size(), &offset_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } index++; - if (!slash::string2l(argv_[index].data(), argv_[index].size(), &count_)) { + if (pstd::string2int(argv_[index].data(), argv_[index].size(), &count_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; - } + } } else { res_.SetRes(CmdRes::kSyntaxErr); return; @@ -324,38 +451,89 @@ void ZRangebyscoreCmd::DoInitial() { ZsetRangebyscoreParentCmd::DoInitial(); } -void ZRangebyscoreCmd::Do(std::shared_ptr partition) { - if (min_score_ == blackwidow::ZSET_SCORE_MAX || max_score_ == blackwidow::ZSET_SCORE_MIN) { +void ZRangebyscoreCmd::Do() { + if (min_score_ == storage::ZSET_SCORE_MAX || max_score_ == storage::ZSET_SCORE_MIN) { res_.AppendContent("*0"); return; } - std::vector score_members; - rocksdb::Status s = partition->db()->ZRangebyscore(key_, min_score_, max_score_, left_close_, right_close_, &score_members); - if (!s.ok() && !s.IsNotFound()) { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + std::vector score_members; + s_ = db_->storage()->ZRangebyscore(key_, min_score_, max_score_, left_close_, right_close_, &score_members); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else if (!s_.ok() && !s_.IsNotFound()) { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); return; } - FitLimit(count_, offset_, score_members.size()); - size_t index = offset_, end = offset_ + count_; + FitLimit(count_, offset_, static_cast(score_members.size())); + size_t index = offset_; + size_t end = offset_ + count_; if (with_scores_) { char buf[32]; int64_t len; res_.AppendArrayLen(count_ * 2); for (; index < end; index++) { - res_.AppendStringLen(score_members[index].member.size()); + res_.AppendStringLenUint64(score_members[index].member.size()); res_.AppendContent(score_members[index].member); - len = slash::d2string(buf, sizeof(buf), score_members[index].score); + len = pstd::d2string(buf, sizeof(buf), score_members[index].score); res_.AppendStringLen(len); res_.AppendContent(buf); } } else { res_.AppendArrayLen(count_); for (; index < end; index++) { - res_.AppendStringLen(score_members[index].member.size()); + res_.AppendStringLenUint64(score_members[index].member.size()); res_.AppendContent(score_members[index].member); } } - return; +} + +void ZRangebyscoreCmd::ReadCache() { + if (min_score_ == storage::ZSET_SCORE_MAX || max_score_ == storage::ZSET_SCORE_MIN) { + res_.AppendContent("*0"); + return; + } + + std::vector score_members; + min_ = std::to_string(min_score_); + max_ = std::to_string(max_score_); + auto s = db_->cache()->ZRangebyscore(key_, min_, max_, &score_members, this); + if (s.ok()) { + auto sm_count = score_members.size(); + if (with_scores_) { + char buf[32]; + int64_t len; + res_.AppendArrayLen(sm_count * 2); + for (auto& item : score_members) { + res_.AppendStringLen(item.member.size()); + res_.AppendContent(item.member); + len = pstd::d2string(buf, sizeof(buf), item.score); + res_.AppendStringLen(len); + res_.AppendContent(buf); + } + } else { + res_.AppendArrayLen(sm_count); + for (auto& item : score_members) { + res_.AppendStringLen(item.member.size()); + res_.AppendContent(item.member); + } + } + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void ZRangebyscoreCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void ZRangebyscoreCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_ZSET, key_, db_); + } } void ZRevrangebyscoreCmd::DoInitial() { @@ -364,49 +542,98 @@ void ZRevrangebyscoreCmd::DoInitial() { return; } ZsetRangebyscoreParentCmd::DoInitial(); - double tmp_score; + double tmp_score = 0.0; tmp_score = min_score_; min_score_ = max_score_; max_score_ = tmp_score; - bool tmp_close; + bool tmp_close = false; tmp_close = left_close_; left_close_ = right_close_; right_close_ = tmp_close; } -void ZRevrangebyscoreCmd::Do(std::shared_ptr partition) { - if (min_score_ == blackwidow::ZSET_SCORE_MAX || max_score_ == blackwidow::ZSET_SCORE_MIN) { +void ZRevrangebyscoreCmd::Do() { + if (min_score_ == storage::ZSET_SCORE_MAX || max_score_ == storage::ZSET_SCORE_MIN) { res_.AppendContent("*0"); return; } - std::vector score_members; - rocksdb::Status s = partition->db()->ZRevrangebyscore(key_, min_score_, max_score_, left_close_, right_close_, &score_members); - if (!s.ok() && !s.IsNotFound()) { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + std::vector score_members; + s_ = db_->storage()->ZRevrangebyscore(key_, min_score_, max_score_, left_close_, right_close_, &score_members); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else if (!s_.ok() && !s_.IsNotFound()) { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); return; } - FitLimit(count_, offset_, score_members.size()); - int64_t index = offset_, end = offset_ + count_; + FitLimit(count_, offset_, static_cast(score_members.size())); + int64_t index = offset_; + int64_t end = offset_ + count_; if (with_scores_) { char buf[32]; - int64_t len; + int64_t len = 0; res_.AppendArrayLen(count_ * 2); for (; index < end; index++) { - res_.AppendStringLen(score_members[index].member.size()); + res_.AppendStringLenUint64(score_members[index].member.size()); res_.AppendContent(score_members[index].member); - len = slash::d2string(buf, sizeof(buf), score_members[index].score); + len = pstd::d2string(buf, sizeof(buf), score_members[index].score); res_.AppendStringLen(len); res_.AppendContent(buf); } } else { res_.AppendArrayLen(count_); for (; index < end; index++) { - res_.AppendStringLen(score_members[index].member.size()); + res_.AppendStringLenUint64(score_members[index].member.size()); res_.AppendContent(score_members[index].member); } } - return; +} + +void ZRevrangebyscoreCmd::ReadCache() { + if (min_score_ == storage::ZSET_SCORE_MAX || max_score_ == storage::ZSET_SCORE_MIN + || max_score_ < min_score_) { + res_.AppendContent("*0"); + return; + } + std::vector score_members; + auto s = db_->cache()->ZRevrangebyscore(key_, min_, max_, &score_members, this, db_); + if (s.ok()) { + auto sm_count = score_members.size(); + if (with_scores_) { + char buf[32]; + int64_t len; + res_.AppendArrayLen(sm_count * 2); + for (auto& item : score_members) { + res_.AppendStringLen(item.member.size()); + res_.AppendContent(item.member); + len = pstd::d2string(buf, sizeof(buf), item.score); + res_.AppendStringLen(len); + res_.AppendContent(buf); + } + } else { + res_.AppendArrayLen(sm_count); + for (auto& item : score_members) { + res_.AppendStringLen(item.member.size()); + res_.AppendContent(item.member); + } + } + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void ZRevrangebyscoreCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void ZRevrangebyscoreCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_ZSET, key_, db_); + } } void ZCountCmd::DoInitial() { @@ -415,28 +642,57 @@ void ZCountCmd::DoInitial() { return; } key_ = argv_[1]; + min_ = argv_[2]; + max_ = argv_[3]; int32_t ret = DoScoreStrRange(argv_[2], argv_[3], &left_close_, &right_close_, &min_score_, &max_score_); if (ret == -1) { res_.SetRes(CmdRes::kErrOther, "min or max is not a float"); return; } - return; } -void ZCountCmd::Do(std::shared_ptr partition) { - if (min_score_ == blackwidow::ZSET_SCORE_MAX || max_score_ == blackwidow::ZSET_SCORE_MIN) { +void ZCountCmd::Do() { + if (min_score_ == storage::ZSET_SCORE_MAX || max_score_ == storage::ZSET_SCORE_MIN) { res_.AppendContent("*0"); return; } int32_t count = 0; - rocksdb::Status s = partition->db()->ZCount(key_, min_score_, max_score_, left_close_, right_close_, &count); - if (s.ok() || s.IsNotFound()) { + s_ = db_->storage()->ZCount(key_, min_score_, max_score_, left_close_, right_close_, &count); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendInteger(count); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void ZCountCmd::ReadCache() { + if (min_score_ == storage::ZSET_SCORE_MAX || max_score_ == storage::ZSET_SCORE_MIN) { + res_.AppendContent("*0"); + return; + } + uint64_t count = 0; + auto s = db_->cache()->ZCount(key_, min_, max_, &count, this); + if (s.ok()) { res_.AppendInteger(count); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } - return; +} + +void ZCountCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void ZCountCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_ZSET, key_, db_); + } } void ZRemCmd::DoInitial() { @@ -445,25 +701,34 @@ void ZRemCmd::DoInitial() { return; } key_ = argv_[1]; - PikaCmdArgsType::iterator iter = argv_.begin() + 2; + auto iter = argv_.begin() + 2; members_.assign(iter, argv_.end()); - return; } -void ZRemCmd::Do(std::shared_ptr partition) { - int32_t count = 0; - rocksdb::Status s = partition->db()->ZRem(key_, members_, &count); - if (s.ok() || s.IsNotFound()) { - res_.AppendInteger(count); +void ZRemCmd::Do() { + s_ = db_->storage()->ZRem(key_, members_, &deleted_); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendInteger(deleted_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void ZRemCmd::DoThroughDB() { + Do(); +} + +void ZRemCmd::DoUpdateCache() { + if (s_.ok() && deleted_ > 0) { + db_->cache()->ZRem(key_, members_, db_); } - return; } void ZsetUIstoreParentCmd::DoInitial() { dest_key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &num_keys_)) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &num_keys_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } @@ -471,42 +736,42 @@ void ZsetUIstoreParentCmd::DoInitial() { res_.SetRes(CmdRes::kErrOther, "at least 1 input key is needed for ZUNIONSTORE/ZINTERSTORE"); return; } - int argc = argv_.size(); + auto argc = argv_.size(); if (argc < num_keys_ + 3) { res_.SetRes(CmdRes::kSyntaxErr); return; } keys_.assign(argv_.begin() + 3, argv_.begin() + 3 + num_keys_); weights_.assign(num_keys_, 1); - int index = num_keys_ + 3; + auto index = num_keys_ + 3; while (index < argc) { - if (!strcasecmp(argv_[index].data(), "weights")) { + if (strcasecmp(argv_[index].data(), "weights") == 0) { index++; if (argc < index + num_keys_) { res_.SetRes(CmdRes::kSyntaxErr); return; } double weight; - int base = index; + auto base = index; for (; index < base + num_keys_; index++) { - if (!slash::string2d(argv_[index].data(), argv_[index].size(), &weight)) { + if (pstd::string2d(argv_[index].data(), argv_[index].size(), &weight) == 0) { res_.SetRes(CmdRes::kErrOther, "weight value is not a float"); return; } - weights_[index-base] = weight; + weights_[index - base] = weight; } - } else if (!strcasecmp(argv_[index].data(), "aggregate")) { + } else if (strcasecmp(argv_[index].data(), "aggregate") == 0) { index++; if (argc < index + 1) { res_.SetRes(CmdRes::kSyntaxErr); return; } - if (!strcasecmp(argv_[index].data(), "sum")) { - aggregate_ = blackwidow::SUM; - } else if (!strcasecmp(argv_[index].data(), "min")) { - aggregate_ = blackwidow::MIN; - } else if (!strcasecmp(argv_[index].data(), "max")) { - aggregate_ = blackwidow::MAX; + if (strcasecmp(argv_[index].data(), "sum") == 0) { + aggregate_ = storage::SUM; + } else if (strcasecmp(argv_[index].data(), "min") == 0) { + aggregate_ = storage::MIN; + } else if (strcasecmp(argv_[index].data(), "max") == 0) { + aggregate_ = storage::MAX; } else { res_.SetRes(CmdRes::kSyntaxErr); return; @@ -517,7 +782,6 @@ void ZsetUIstoreParentCmd::DoInitial() { return; } } - return; } void ZUnionstoreCmd::DoInitial() { @@ -528,15 +792,77 @@ void ZUnionstoreCmd::DoInitial() { ZsetUIstoreParentCmd::DoInitial(); } -void ZUnionstoreCmd::Do(std::shared_ptr partition) { +void ZUnionstoreCmd::Do() { int32_t count = 0; - rocksdb::Status s = partition->db()->ZUnionstore(dest_key_, keys_, weights_, aggregate_, &count); - if (s.ok()) { + s_ = db_->storage()->ZUnionstore(dest_key_, keys_, weights_, aggregate_, value_to_dest_, &count); + if (s_.ok()) { res_.AppendInteger(count); + AddSlotKey("z", dest_key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void ZUnionstoreCmd::DoThroughDB() { + Do(); +} + +void ZUnionstoreCmd::DoUpdateCache() { + if (s_.ok()) { + std::vector v; + v.emplace_back(dest_key_); + db_->cache()->Del(v); } - return; +} + +void ZUnionstoreCmd::DoBinlog() { + PikaCmdArgsType del_args; + del_args.emplace_back("del"); + del_args.emplace_back(dest_key_); + std::shared_ptr del_cmd = std::make_unique(kCmdNameDel, -2, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB); + del_cmd->Initial(del_args, db_name_); + del_cmd->SetConn(GetConn()); + del_cmd->SetResp(resp_.lock()); + del_cmd->DoBinlog(); + + if (value_to_dest_.empty()) { + // The union operation got an empty set, only use del to simulate overwrite the dest_key with empty set + return; + } + + PikaCmdArgsType initial_args; + initial_args.emplace_back("zadd"); + initial_args.emplace_back(dest_key_); + auto first_pair = value_to_dest_.begin(); + char buf[32]; + int64_t d_len = pstd::d2string(buf, sizeof(buf), first_pair->second); + initial_args.emplace_back(buf); + initial_args.emplace_back(first_pair->first); + value_to_dest_.erase(value_to_dest_.begin()); + zadd_cmd_->Initial(initial_args, db_name_); + zadd_cmd_->SetConn(GetConn()); + zadd_cmd_->SetResp(resp_.lock()); + + auto& zadd_argv = zadd_cmd_->argv(); + size_t data_size = d_len + zadd_argv[3].size(); + constexpr size_t kDataSize = 131072; //128KB + for (const auto& it : value_to_dest_) { + if (data_size >= kDataSize) { + // If the binlog has reached the size of 128KB. (131,072 bytes = 128KB) + zadd_cmd_->DoBinlog(); + zadd_argv.clear(); + zadd_argv.emplace_back("zadd"); + zadd_argv.emplace_back(dest_key_); + data_size = 0; + } + d_len = pstd::d2string(buf, sizeof(buf), it.second); + zadd_argv.emplace_back(buf); + zadd_argv.emplace_back(it.first); + data_size += (d_len + it.first.size()); + } + zadd_cmd_->DoBinlog(); } void ZInterstoreCmd::DoInitial() { @@ -545,24 +871,81 @@ void ZInterstoreCmd::DoInitial() { return; } ZsetUIstoreParentCmd::DoInitial(); - return; } -void ZInterstoreCmd::Do(std::shared_ptr partition) { +void ZInterstoreCmd::Do() { int32_t count = 0; - rocksdb::Status s = partition->db()->ZInterstore(dest_key_, keys_, weights_, aggregate_, &count); - if (s.ok()) { + s_ = db_->storage()->ZInterstore(dest_key_, keys_, weights_, aggregate_, value_to_dest_, &count); + if (s_.ok()) { res_.AppendInteger(count); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); } - return; +} + +void ZInterstoreCmd::DoThroughDB() { + Do(); +} + +void ZInterstoreCmd::DoUpdateCache() { + if (s_.ok()) { + std::vector v; + v.emplace_back(dest_key_); + db_->cache()->Del(v); + } +} + +void ZInterstoreCmd::DoBinlog() { + PikaCmdArgsType del_args; + del_args.emplace_back("del"); + del_args.emplace_back(dest_key_); + std::shared_ptr del_cmd = std::make_unique(kCmdNameDel, -2, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB); + del_cmd->Initial(del_args, db_name_); + del_cmd->SetConn(GetConn()); + del_cmd->SetResp(resp_.lock()); + del_cmd->DoBinlog(); + + if (value_to_dest_.size() == 0) { + //The inter operation got an empty set, just exec del to simulate overwrite an empty set to dest_key + return; + } + + PikaCmdArgsType initial_args; + initial_args.emplace_back("zadd"); + initial_args.emplace_back(dest_key_); + char buf[32]; + int64_t d_len = pstd::d2string(buf, sizeof(buf), value_to_dest_[0].score); + initial_args.emplace_back(buf); + initial_args.emplace_back(value_to_dest_[0].member); + zadd_cmd_->Initial(initial_args, db_name_); + zadd_cmd_->SetConn(GetConn()); + zadd_cmd_->SetResp(resp_.lock()); + + auto& zadd_argv = zadd_cmd_->argv(); + size_t data_size = d_len + value_to_dest_[0].member.size(); + constexpr size_t kDataSize = 131072; //128KB + for (size_t i = 1; i < value_to_dest_.size(); i++) { + if (data_size >= kDataSize) { + // If the binlog has reached the size of 128KB. (131,072 bytes = 128KB) + zadd_cmd_->DoBinlog(); + zadd_argv.clear(); + zadd_argv.emplace_back("zadd"); + zadd_argv.emplace_back(dest_key_); + data_size = 0; + } + d_len = pstd::d2string(buf, sizeof(buf), value_to_dest_[i].score); + zadd_argv.emplace_back(buf); + zadd_argv.emplace_back(value_to_dest_[i].member); + data_size += (value_to_dest_[i].member.size() + d_len); + } + zadd_cmd_->DoBinlog(); } void ZsetRankParentCmd::DoInitial() { key_ = argv_[1]; member_ = argv_[2]; - return; } void ZRankCmd::DoInitial() { @@ -573,18 +956,43 @@ void ZRankCmd::DoInitial() { ZsetRankParentCmd::DoInitial(); } -void ZRankCmd::Do(std::shared_ptr partition) { +void ZRankCmd::Do() { int32_t rank = 0; - rocksdb::Status s = partition->db()->ZRank(key_, member_, &rank); - if (s.ok()) { + s_ = db_->storage()->ZRank(key_, member_, &rank); + if (s_.ok()) { res_.AppendInteger(rank); - } else if (s.IsNotFound()){ + } else if (s_.IsNotFound()) { res_.AppendContent("$-1"); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void ZRankCmd::ReadCache() { + int64_t rank = 0; + auto s = db_->cache()->ZRank(key_, member_, &rank, db_); + if (s.ok()) { + res_.AppendInteger(rank); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } } +void ZRankCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void ZRankCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_ZSET, key_, db_); + } +} + void ZRevrankCmd::DoInitial() { if (!CheckArg(argv_.size())) { res_.SetRes(CmdRes::kWrongNum, kCmdNameZRevrank); @@ -593,18 +1001,43 @@ void ZRevrankCmd::DoInitial() { ZsetRankParentCmd::DoInitial(); } -void ZRevrankCmd::Do(std::shared_ptr partition) { +void ZRevrankCmd::Do() { int32_t revrank = 0; - rocksdb::Status s = partition->db()->ZRevrank(key_, member_, &revrank); - if (s.ok()) { + s_ = db_->storage()->ZRevrank(key_, member_, &revrank); + if (s_.ok()) { res_.AppendInteger(revrank); - } else if (s.IsNotFound()){ + } else if (s_.IsNotFound()) { res_.AppendContent("$-1"); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void ZRevrankCmd::ReadCache() { + int64_t revrank = 0; + auto s = db_->cache()->ZRevrank(key_, member_, &revrank, db_); + if (s.ok()) { + res_.AppendInteger(revrank); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } } +void ZRevrankCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void ZRevrankCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_ZSET, key_, db_); + } +} + void ZScoreCmd::DoInitial() { if (!CheckArg(argv_.size())) { res_.SetRes(CmdRes::kWrongNum, kCmdNameZScore); @@ -614,36 +1047,57 @@ void ZScoreCmd::DoInitial() { member_ = argv_[2]; } -void ZScoreCmd::Do(std::shared_ptr partition) { - double score = 0; - rocksdb::Status s = partition->db()->ZScore(key_, member_, &score); +void ZScoreCmd::Do() { + double score = 0.0; + s_ = db_->storage()->ZScore(key_, member_, &score); + if (s_.ok()) { + char buf[32]; + int64_t len = pstd::d2string(buf, sizeof(buf), score); + res_.AppendStringLen(len); + res_.AppendContent(buf); + } else if (s_.IsNotFound()) { + res_.AppendContent("$-1"); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void ZScoreCmd::ReadCache() { + double score = 0.0; + auto s = db_->cache()->ZScore(key_, member_, &score, db_); if (s.ok()) { char buf[32]; - int64_t len = slash::d2string(buf, sizeof(buf), score); + int64_t len = pstd::d2string(buf, sizeof(buf), score); res_.AppendStringLen(len); res_.AppendContent(buf); } else if (s.IsNotFound()) { - res_.AppendContent("$-1"); + res_.SetRes(CmdRes::kCacheMiss); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } +} + +void ZScoreCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void ZScoreCmd::DoUpdateCache() { return; } -static int32_t DoMemberRange(const std::string &raw_min_member, - const std::string &raw_max_member, - bool *left_close, - bool *right_close, - std::string* min_member, - std::string* max_member) { +static int32_t DoMemberRange(const std::string& raw_min_member, const std::string& raw_max_member, bool* left_close, + bool* right_close, std::string* min_member, std::string* max_member) { if (raw_min_member == "-") { *min_member = "-"; } else if (raw_min_member == "+") { *min_member = "+"; } else { - if (raw_min_member.size() > 0 && raw_min_member.at(0) == '(') { + if (!raw_min_member.empty() && raw_min_member.at(0) == '(') { *left_close = false; - } else if (raw_min_member.size() > 0 && raw_min_member.at(0) == '[') { + } else if (!raw_min_member.empty() && raw_min_member.at(0) == '[') { *left_close = true; } else { return -1; @@ -656,9 +1110,9 @@ static int32_t DoMemberRange(const std::string &raw_min_member, } else if (raw_max_member == "-") { *max_member = "-"; } else { - if (raw_max_member.size() > 0 && raw_max_member.at(0) == '(') { + if (!raw_max_member.empty() && raw_max_member.at(0) == '(') { *right_close = false; - } else if (raw_max_member.size() > 0 && raw_max_member.at(0) == '[') { + } else if (!raw_max_member.empty() && raw_max_member.at(0) == '[') { *right_close = true; } else { return -1; @@ -670,6 +1124,8 @@ static int32_t DoMemberRange(const std::string &raw_min_member, void ZsetRangebylexParentCmd::DoInitial() { key_ = argv_[1]; + min_ = argv_[2]; + max_ = argv_[3]; int32_t ret = DoMemberRange(argv_[2], argv_[3], &left_close_, &right_close_, &min_member_, &max_member_); if (ret == -1) { res_.SetRes(CmdRes::kErrOther, "min or max not valid string range item"); @@ -678,15 +1134,15 @@ void ZsetRangebylexParentCmd::DoInitial() { size_t argc = argv_.size(); if (argc == 4) { return; - } else if (argc != 7 || strcasecmp(argv_[4].data(), "limit")) { + } else if (argc != 7 || strcasecmp(argv_[4].data(), "limit") != 0) { res_.SetRes(CmdRes::kSyntaxErr); return; } - if (!slash::string2l(argv_[5].data(), argv_[5].size(), &offset_)) { + if (pstd::string2int(argv_[5].data(), argv_[5].size(), &offset_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } - if (!slash::string2l(argv_[6].data(), argv_[6].size(), &count_)) { + if (pstd::string2int(argv_[6].data(), argv_[6].size(), &count_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } @@ -700,26 +1156,64 @@ void ZRangebylexCmd::DoInitial() { ZsetRangebylexParentCmd::DoInitial(); } -void ZRangebylexCmd::Do(std::shared_ptr partition) { +void ZRangebylexCmd::Do() { if (min_member_ == "+" || max_member_ == "-") { res_.AppendContent("*0"); return; } std::vector members; - rocksdb::Status s = partition->db()->ZRangebylex(key_, min_member_, max_member_, left_close_, right_close_, &members); - if (!s.ok() && !s.IsNotFound()) { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + s_ = db_->storage()->ZRangebylex(key_, min_member_, max_member_, left_close_, right_close_, &members); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else if (!s_.ok() && !s_.IsNotFound()) { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); return; } - FitLimit(count_, offset_, members.size()); + FitLimit(count_, offset_, static_cast(members.size())); res_.AppendArrayLen(count_); - size_t index = offset_, end = offset_ + count_; + size_t index = offset_; + size_t end = offset_ + count_; for (; index < end; index++) { - res_.AppendStringLen(members[index].size()); + res_.AppendStringLenUint64(members[index].size()); res_.AppendContent(members[index]); } - return; +} + +void ZRangebylexCmd::ReadCache() { + if (min_member_ == "+" || max_member_ == "-") { + res_.AppendContent("*0"); + return; + } + std::vector members; + auto s = db_->cache()->ZRangebylex(key_, min_, max_, &members, db_); + if (s.ok()) { + FitLimit(count_, offset_, members.size()); + + res_.AppendArrayLen(count_); + size_t index = offset_; + size_t end = offset_ + count_; + for (; index < end; index++) { + res_.AppendStringLen(members[index].size()); + res_.AppendContent(members[index]); + } + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void ZRangebylexCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void ZRangebylexCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_ZSET, key_, db_); + } } void ZRevrangebylexCmd::DoInitial() { @@ -734,32 +1228,67 @@ void ZRevrangebylexCmd::DoInitial() { min_member_ = max_member_; max_member_ = tmp_s; - bool tmp_b; + bool tmp_b = false; tmp_b = left_close_; left_close_ = right_close_; right_close_ = tmp_b; } -void ZRevrangebylexCmd::Do(std::shared_ptr partition) { +void ZRevrangebylexCmd::Do() { if (min_member_ == "+" || max_member_ == "-") { res_.AppendContent("*0"); return; } std::vector members; - rocksdb::Status s = partition->db()->ZRangebylex(key_, min_member_, max_member_, left_close_, right_close_, &members); - if (!s.ok() && !s.IsNotFound()) { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + s_ = db_->storage()->ZRangebylex(key_, min_member_, max_member_, left_close_, right_close_, &members); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); return; } - FitLimit(count_, offset_, members.size()); - + if (!s_.ok() && !s_.IsNotFound()) { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + return; + } + FitLimit(count_, offset_, static_cast(members.size())); + res_.AppendArrayLen(count_); - int64_t index = members.size() - 1 - offset_, end = index - count_; + int64_t index = static_cast(members.size()) - 1 - offset_; + int64_t end = index - count_; for (; index > end; index--) { - res_.AppendStringLen(members[index].size()); + res_.AppendStringLenUint64(members[index].size()); res_.AppendContent(members[index]); } - return; +} + +void ZRevrangebylexCmd::ReadCache() { + if (min_member_ == "+" || max_member_ == "-") { + res_.AppendContent("*0"); + return; + } + std::vector members; + auto s = db_->cache()->ZRevrangebylex(key_, min_, max_, &members, db_); + if (s.ok()) { + auto size = count_ < members.size() ? count_ : members.size(); + res_.AppendArrayLen(static_cast(size)); + for (int i = 0; i < size; ++i) { + res_.AppendString(members[i]); + } + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void ZRevrangebylexCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void ZRevrangebylexCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_ZSET, key_, db_); + } } void ZLexcountCmd::DoInitial() { @@ -768,6 +1297,8 @@ void ZLexcountCmd::DoInitial() { return; } key_ = argv_[1]; + min_ = argv_[2]; + max_ = argv_[3]; int32_t ret = DoMemberRange(argv_[2], argv_[3], &left_close_, &right_close_, &min_member_, &max_member_); if (ret == -1) { res_.SetRes(CmdRes::kErrOther, "min or max not valid string range item"); @@ -775,19 +1306,48 @@ void ZLexcountCmd::DoInitial() { } } -void ZLexcountCmd::Do(std::shared_ptr partition) { +void ZLexcountCmd::Do() { if (min_member_ == "+" || max_member_ == "-") { res_.AppendContent(":0"); return; } int32_t count = 0; - rocksdb::Status s = partition->db()->ZLexcount(key_, min_member_, max_member_, left_close_, right_close_, &count); - if (!s.ok() && !s.IsNotFound()) { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + s_ = db_->storage()->ZLexcount(key_, min_member_, max_member_, left_close_, right_close_, &count); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else if (!s_.ok() && !s_.IsNotFound()) { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); return; } res_.AppendInteger(count); - return; +} + +void ZLexcountCmd::ReadCache() { + if (min_member_ == "+" || max_member_ == "-") { + res_.AppendContent(":0"); + return; + } + uint64_t count = 0; + auto s = db_->cache()->ZLexcount(key_, min_, max_, &count, db_); + if (s.ok()) { + res_.AppendInteger(count); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void ZLexcountCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void ZLexcountCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_ZSET, key_, db_); + } } void ZRemrangebyrankCmd::DoInitial() { @@ -796,25 +1356,38 @@ void ZRemrangebyrankCmd::DoInitial() { return; } key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &start_rank_)) { + min_ = argv_[2]; + max_ = argv_[3]; + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &start_rank_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } - if (!slash::string2l(argv_[3].data(), argv_[3].size(), &stop_rank_)) { + if (pstd::string2int(argv_[3].data(), argv_[3].size(), &stop_rank_) == 0) { res_.SetRes(CmdRes::kInvalidInt); return; } } -void ZRemrangebyrankCmd::Do(std::shared_ptr partition) { +void ZRemrangebyrankCmd::Do() { int32_t count = 0; - rocksdb::Status s = partition->db()->ZRemrangebyrank(key_, start_rank_, stop_rank_, &count); - if (s.ok() || s.IsNotFound()) { + s_ = db_->storage()->ZRemrangebyrank(key_, static_cast(start_rank_), static_cast(stop_rank_), &count); + if (s_.ok() || s_.IsNotFound()) { res_.AppendInteger(count); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void ZRemrangebyrankCmd::DoThroughDB() { + Do(); +} + +void ZRemrangebyrankCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->ZRemrangebyrank(key_, min_, max_, ele_deleted_, db_); } - return; } void ZRemrangebyscoreCmd::DoInitial() { @@ -828,22 +1401,33 @@ void ZRemrangebyscoreCmd::DoInitial() { res_.SetRes(CmdRes::kErrOther, "min or max is not a float"); return; } - return; } -void ZRemrangebyscoreCmd::Do(std::shared_ptr partition) { - if (min_score_ == blackwidow::ZSET_SCORE_MAX || max_score_ == blackwidow::ZSET_SCORE_MIN) { +void ZRemrangebyscoreCmd::Do() { + if (min_score_ == storage::ZSET_SCORE_MAX || max_score_ == storage::ZSET_SCORE_MIN) { res_.AppendContent(":0"); return; } int32_t count = 0; - rocksdb::Status s = partition->db()->ZRemrangebyscore(key_, min_score_, max_score_, left_close_, right_close_, &count); - if (!s.ok() && !s.IsNotFound()) { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + s_ = db_->storage()->ZRemrangebyscore(key_, min_score_, max_score_, left_close_, right_close_, &count); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else if (!s_.ok() && !s_.IsNotFound()) { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); return; } res_.AppendInteger(count); - return; +} + +void ZRemrangebyscoreCmd::DoThroughDB() { + Do(); +} + +void ZRemrangebyscoreCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->ZRemrangebyscore(key_, min_, max_, db_); + } } void ZRemrangebylexCmd::DoInitial() { @@ -857,24 +1441,35 @@ void ZRemrangebylexCmd::DoInitial() { res_.SetRes(CmdRes::kErrOther, "min or max not valid string range item"); return; } - return; } -void ZRemrangebylexCmd::Do(std::shared_ptr partition) { +void ZRemrangebylexCmd::Do() { if (min_member_ == "+" || max_member_ == "-") { res_.AppendContent("*0"); return; } int32_t count = 0; - rocksdb::Status s = partition->db()->ZRemrangebylex(key_, min_member_, max_member_, left_close_, right_close_, &count); - if (!s.ok() && !s.IsNotFound()) { - res_.SetRes(CmdRes::kErrOther, s.ToString()); + + s_ = db_->storage()->ZRemrangebylex(key_, min_member_, max_member_, left_close_, right_close_, &count); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else if (!s_.ok() && !s_.IsNotFound()) { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); return; } res_.AppendInteger(count); - return; } +void ZRemrangebylexCmd::DoThroughDB() { + Do(); +} + +void ZRemrangebylexCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->ZRemrangebylex(key_, min_, max_, db_); + } +} void ZPopmaxCmd::DoInitial() { if (!CheckArg(argv_.size())) { @@ -882,64 +1477,67 @@ void ZPopmaxCmd::DoInitial() { return; } key_ = argv_[1]; - if (argv_.size() == 2) { - count_ = 1; - return; - } - if (!slash::string2ll(argv_[2].data(), argv_[2].size(), (long long*)(&count_))) { - res_.SetRes(CmdRes::kInvalidInt); - return; + count_ = 1; + if (argv_.size() > 3) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZPopmax); + } else if (argv_.size() == 3) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), static_cast(&count_)) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + } } } -void ZPopmaxCmd::Do(std::shared_ptr partition) { - std::vector score_members; - rocksdb::Status s = partition->db()->ZPopMax(key_, count_, &score_members); +void ZPopmaxCmd::Do() { + std::vector score_members; + rocksdb::Status s = db_->storage()->ZPopMax(key_, count_, &score_members); if (s.ok() || s.IsNotFound()) { char buf[32]; - int64_t len; - res_.AppendArrayLen(score_members.size() * 2); + int64_t len = 0; + res_.AppendArrayLenUint64(score_members.size() * 2); for (const auto& sm : score_members) { res_.AppendString(sm.member); - len = slash::d2string(buf, sizeof(buf), sm.score); + len = pstd::d2string(buf, sizeof(buf), sm.score); res_.AppendStringLen(len); res_.AppendContent(buf); } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } } - void ZPopminCmd::DoInitial() { if (!CheckArg(argv_.size())) { res_.SetRes(CmdRes::kWrongNum, kCmdNameZPopmin); return; } key_ = argv_[1]; - if (argv_.size() == 2) { - count_ = 1; - return; - } - if (!slash::string2ll(argv_[2].data(), argv_[2].size(), (long long*)(&count_))) { - res_.SetRes(CmdRes::kInvalidInt); - return; + count_ = 1; + if (argv_.size() > 3) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZPopmin); + } else if (argv_.size() == 3) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), static_cast(&count_)) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + } } } -void ZPopminCmd::Do(std::shared_ptr partition) { - std::vector score_members; - rocksdb::Status s = partition->db()->ZPopMin(key_, count_, &score_members); +void ZPopminCmd::Do() { + std::vector score_members; + rocksdb::Status s = db_->storage()->ZPopMin(key_, count_, &score_members); if (s.ok() || s.IsNotFound()) { char buf[32]; - int64_t len; - res_.AppendArrayLen(score_members.size() * 2); + int64_t len = 0; + res_.AppendArrayLenUint64(score_members.size() * 2); for (const auto& sm : score_members) { res_.AppendString(sm.member); - len = slash::d2string(buf, sizeof(buf), sm.score); + len = pstd::d2string(buf, sizeof(buf), sm.score); res_.AppendStringLen(len); res_.AppendContent(buf); } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); } else { res_.SetRes(CmdRes::kErrOther, s.ToString()); } diff --git a/tools/pika_migrate/src/redis_sender.cc b/tools/pika_migrate/src/redis_sender.cc index 74c41eabbd..dfc90464cb 100644 --- a/tools/pika_migrate/src/redis_sender.cc +++ b/tools/pika_migrate/src/redis_sender.cc @@ -11,15 +11,11 @@ #include -#include "slash/include/xdebug.h" - static time_t kCheckDiff = 1; RedisSender::RedisSender(int id, std::string ip, int64_t port, std::string password): id_(id), cli_(NULL), - rsignal_(&commands_mutex_), - wsignal_(&commands_mutex_), ip_(ip), port_(port), password_(password), @@ -37,29 +33,28 @@ RedisSender::~RedisSender() { void RedisSender::ConnectRedis() { while (cli_ == NULL) { // Connect to redis - cli_ = pink::NewRedisCli(); + cli_ = std::shared_ptr(net::NewRedisCli()); cli_->set_connect_timeout(1000); cli_->set_recv_timeout(10000); cli_->set_send_timeout(10000); - slash::Status s = cli_->Connect(ip_, port_); + pstd::Status s = cli_->Connect(ip_, port_); if (!s.ok()) { LOG(WARNING) << "Can not connect to " << ip_ << ":" << port_ << ", status: " << s.ToString(); - delete cli_; cli_ = NULL; sleep(3); continue; } else { // Connect success - + // LOG(INFO) << "RedisSender thread " << id_ << "Connect to redis(" << ip_ << ":" << port_ << ") success"; // Authentication if (!password_.empty()) { - pink::RedisCmdArgsType argv, resp; + net::RedisCmdArgsType argv, resp; std::string cmd; argv.push_back("AUTH"); argv.push_back(password_); - pink::SerializeRedisCommand(argv, &cmd); - slash::Status s = cli_->Send(&cmd); + net::SerializeRedisCommand(argv, &cmd); + pstd::Status s = cli_->Send(&cmd); if (s.ok()) { s = cli_->Recv(&resp); @@ -67,7 +62,6 @@ void RedisSender::ConnectRedis() { } else { LOG(FATAL) << "Connect to redis(" << ip_ << ":" << port_ << ") Invalid password"; cli_->Close(); - delete cli_; cli_ = NULL; should_exit_ = true; return; @@ -75,18 +69,17 @@ void RedisSender::ConnectRedis() { } else { LOG(WARNING) << "send auth failed: " << s.ToString(); cli_->Close(); - delete cli_; cli_ = NULL; continue; } } else { // If forget to input password - pink::RedisCmdArgsType argv, resp; + net::RedisCmdArgsType argv, resp; std::string cmd; argv.push_back("PING"); - pink::SerializeRedisCommand(argv, &cmd); - slash::Status s = cli_->Send(&cmd); + net::SerializeRedisCommand(argv, &cmd); + pstd::Status s = cli_->Send(&cmd); if (s.ok()) { s = cli_->Recv(&resp); @@ -94,7 +87,6 @@ void RedisSender::ConnectRedis() { if (resp[0] == "NOAUTH Authentication required.") { LOG(FATAL) << "Ping redis(" << ip_ << ":" << port_ << ") NOAUTH Authentication required"; cli_->Close(); - delete cli_; cli_ = NULL; should_exit_ = true; return; @@ -102,7 +94,6 @@ void RedisSender::ConnectRedis() { } else { LOG(WARNING) << s.ToString(); cli_->Close(); - delete cli_; cli_ = NULL; } } @@ -114,26 +105,18 @@ void RedisSender::ConnectRedis() { void RedisSender::Stop() { set_should_stop(); should_exit_ = true; - commands_mutex_.Lock(); - rsignal_.Signal(); - commands_mutex_.Unlock(); + rsignal_.notify_all(); + wsignal_.notify_all(); } void RedisSender::SendRedisCommand(const std::string &command) { - commands_mutex_.Lock(); - if (commands_queue_.size() < 100000) { + std::unique_lock lock(signal_mutex_); + wsignal_.wait(lock, [this]() { return commandQueueSize() < 100000; }); + if (!should_exit_) { + std::lock_guard l(keys_mutex_); commands_queue_.push(command); - rsignal_.Signal(); - commands_mutex_.Unlock(); - return; - } - - while (commands_queue_.size() > 100000) { - wsignal_.Wait(); + rsignal_.notify_one(); } - commands_queue_.push(command); - rsignal_.Signal(); - commands_mutex_.Unlock(); } int RedisSender::SendCommand(std::string &command) { @@ -141,6 +124,7 @@ int RedisSender::SendCommand(std::string &command) { if (kCheckDiff < now - last_write_time_) { int ret = cli_->CheckAliveness(); if (ret < 0) { + cli_ = nullptr; ConnectRedis(); } last_write_time_ = now; @@ -149,19 +133,18 @@ int RedisSender::SendCommand(std::string &command) { // Send command int idx = 0; do { - slash::Status s = cli_->Send(&command); + pstd::Status s = cli_->Send(&command); + if (s.ok()) { + cli_->Recv(nullptr); return 0; } - LOG(WARNING) << "RedisSender " << id_ << "fails to send redis command " << command << ", times: " << idx + 1 << ", error: " << s.ToString(); - cli_->Close(); - delete cli_; cli_ = NULL; ConnectRedis(); } while(++idx < 3); - + LOG(WARNING) << "RedisSender " << id_ << " fails to send redis command " << command << ", times: " << idx << ", error: " << "send command failed"; return -1; } @@ -173,49 +156,34 @@ void *RedisSender::ThreadMain() { ConnectRedis(); while (!should_exit_) { - commands_mutex_.Lock(); - while (commands_queue_.size() == 0 && !should_exit_) { - rsignal_.TimedWait(100); - // rsignal_.Wait(); + std::unique_lock lock(signal_mutex_); + while (commandQueueSize() == 0 && !should_exit_) { + rsignal_.wait_for(lock, std::chrono::milliseconds(100)); } - // if (commands_queue_.size() == 0 && should_exit_) { + if (should_exit_) { - commands_mutex_.Unlock(); break; } - if (commands_queue_.size() == 0) { - commands_mutex_.Unlock(); + if (commandQueueSize() == 0) { continue; } - commands_mutex_.Unlock(); // get redis command std::string command; - commands_mutex_.Lock(); - command = commands_queue_.front(); - // printf("%d, command %s\n", id_, command.c_str()); - elements_++; - commands_queue_.pop(); - wsignal_.Signal(); - commands_mutex_.Unlock(); - ret = SendCommand(command); - if (ret == 0) { - cnt_++; + { + std::lock_guard l(keys_mutex_); + command = commands_queue_.front(); + elements_++; + commands_queue_.pop(); } - if (cnt_ >= 200) { - for(; cnt_ > 0; cnt_--) { - cli_->Recv(NULL); - } - } - } - for(; cnt_ > 0; cnt_--) { - cli_->Recv(NULL); + wsignal_.notify_one(); + ret = SendCommand(command); + } LOG(INFO) << "RedisSender thread " << id_ << " complete"; - delete cli_; cli_ = NULL; return NULL; } diff --git a/tools/pika_migrate/src/rsync_client.cc b/tools/pika_migrate/src/rsync_client.cc new file mode 100644 index 0000000000..61fab0e0d1 --- /dev/null +++ b/tools/pika_migrate/src/rsync_client.cc @@ -0,0 +1,526 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include + +#include "rocksdb/env.h" +#include "pstd/include/pstd_defer.h" +#include "include/pika_server.h" +#include "include/rsync_client.h" + +using namespace net; +using namespace pstd; +using namespace RsyncService; + +extern PikaServer* g_pika_server; + +const int kFlushIntervalUs = 10 * 1000 * 1000; +const int kBytesPerRequest = 4 << 20; +const int kThrottleCheckCycle = 10; + +namespace rsync { +RsyncClient::RsyncClient(const std::string& dir, const std::string& db_name) + : snapshot_uuid_(""), dir_(dir), db_name_(db_name), + state_(IDLE), max_retries_(10), master_ip_(""), master_port_(0), + parallel_num_(g_pika_conf->max_rsync_parallel_num()) { + wo_mgr_.reset(new WaitObjectManager()); + client_thread_ = std::make_unique(3000, 60, wo_mgr_.get()); + client_thread_->set_thread_name("RsyncClientThread"); + work_threads_.resize(GetParallelNum()); + finished_work_cnt_.store(0); +} + +void RsyncClient::Copy(const std::set& file_set, int index) { + Status s = Status::OK(); + for (const auto& file : file_set) { + while (state_.load() == RUNNING) { + LOG(INFO) << "copy remote file, filename: " << file; + s = CopyRemoteFile(file, index); + if (!s.ok()) { + LOG(WARNING) << "copy remote file failed, msg: " << s.ToString(); + continue; + } + break; + } + if (state_.load() != RUNNING) { + break; + } + } + if (!error_stopped_.load()) { + LOG(INFO) << "work_thread index: " << index << " copy remote files done"; + } + finished_work_cnt_.fetch_add(1); + cond_.notify_all(); +} + +bool RsyncClient::Init() { + if (state_ != IDLE) { + LOG(WARNING) << "State should be IDLE when Init"; + return false; + } + master_ip_ = g_pika_server->master_ip(); + master_port_ = g_pika_server->master_port() + kPortShiftRsync2; + file_set_.clear(); + client_thread_->StartThread(); + bool ret = ComparisonUpdate(); + if (!ret) { + LOG(WARNING) << "RsyncClient recover failed"; + client_thread_->StopThread(); + state_.store(IDLE); + return false; + } + finished_work_cnt_.store(0); + LOG(INFO) << "RsyncClient recover success"; + return true; +} + +void* RsyncClient::ThreadMain() { + if (file_set_.empty()) { + LOG(INFO) << "No remote files need copy, RsyncClient exit and going to delete dir:" << dir_; + DeleteDirIfExist(dir_); + state_.store(STOP); + all_worker_exited_.store(true); + return nullptr; + } + + Status s = Status::OK(); + LOG(INFO) << "RsyncClient begin to copy remote files"; + std::vector > file_vec(GetParallelNum()); + int index = 0; + for (const auto& file : file_set_) { + file_vec[index++ % GetParallelNum()].insert(file); + } + all_worker_exited_.store(false); + for (int i = 0; i < GetParallelNum(); i++) { + work_threads_[i] = std::move(std::thread(&RsyncClient::Copy, this, file_vec[i], i)); + } + + std::string meta_file_path = GetLocalMetaFilePath(); + std::ofstream outfile; + outfile.open(meta_file_path, std::ios_base::app); + if (!outfile.is_open()) { + LOG(ERROR) << "unable to open meta file " << meta_file_path << ", error:" << strerror(errno); + error_stopped_.store(true); + state_.store(STOP); + } + DEFER { + outfile.close(); + }; + + std::string meta_rep; + uint64_t start_time = pstd::NowMicros(); + + while (state_.load() == RUNNING) { + uint64_t elapse = pstd::NowMicros() - start_time; + if (elapse < kFlushIntervalUs) { + int wait_for_us = kFlushIntervalUs - elapse; + std::unique_lock lock(mu_); + cond_.wait_for(lock, std::chrono::microseconds(wait_for_us)); + } + + if (state_.load() != RUNNING) { + break; + } + + start_time = pstd::NowMicros(); + std::map files_map; + { + std::lock_guard guard(mu_); + files_map.swap(meta_table_); + } + for (const auto& file : files_map) { + meta_rep.append(file.first + ":" + file.second); + meta_rep.append("\n"); + } + outfile << meta_rep; + outfile.flush(); + meta_rep.clear(); + + if (finished_work_cnt_.load() == GetParallelNum()) { + break; + } + } + + for (int i = 0; i < GetParallelNum(); i++) { + work_threads_[i].join(); + } + finished_work_cnt_.store(0); + state_.store(STOP); + if (!error_stopped_.load()) { + LOG(INFO) << "RsyncClient copy remote files done"; + } else { + if (DeleteDirIfExist(dir_)) { + //the dir_ doesn't not exist OR it's existing but successfully deleted + LOG(ERROR) << "RsyncClient stopped with errors, deleted:" << dir_; + } else { + //the dir_ exists but failed to delete + LOG(ERROR) << "RsyncClient stopped with errors, but failed to delete " << dir_ << " when cleaning"; + } + } + all_worker_exited_.store(true); + return nullptr; +} + +Status RsyncClient::CopyRemoteFile(const std::string& filename, int index) { + const std::string filepath = dir_ + "/" + filename; + std::unique_ptr writer(new RsyncWriter(filepath)); + Status s = Status::OK(); + size_t offset = 0; + int retries = 0; + + DEFER { + if (writer) { + writer->Close(); + writer.reset(); + } + if (!s.ok()) { + DeleteFile(filepath); + } + }; + + while (retries < max_retries_) { + if (state_.load() != RUNNING) { + break; + } + size_t copy_file_begin_time = pstd::NowMicros(); + size_t count = Throttle::GetInstance().ThrottledByThroughput(kBytesPerRequest); + if (count == 0) { + std::this_thread::sleep_for(std::chrono::milliseconds(1000 / kThrottleCheckCycle)); + continue; + } + RsyncRequest request; + request.set_reader_index(index); + request.set_type(kRsyncFile); + request.set_db_name(db_name_); + /* + * Since the slot field is written in protobuffer, + * slot_id is set to the default value 0 for compatibility + * with older versions, but slot_id is not used + */ + request.set_slot_id(0); + FileRequest* file_req = request.mutable_file_req(); + file_req->set_filename(filename); + file_req->set_offset(offset); + file_req->set_count(count); + + std::string to_send; + request.SerializeToString(&to_send); + WaitObject* wo = wo_mgr_->UpdateWaitObject(index, filename, kRsyncFile, offset); + s = client_thread_->Write(master_ip_, master_port_, to_send); + if (!s.ok()) { + LOG(WARNING) << "send rsync request failed"; + continue; + } + + std::shared_ptr resp = nullptr; + s = wo->Wait(resp); + if (s.IsTimeout() || resp == nullptr) { + LOG(WARNING) << s.ToString(); + retries++; + continue; + } + + if (resp->code() != RsyncService::kOk) { + return Status::IOError("kRsyncFile request failed, master response error code"); + } + + size_t ret_count = resp->file_resp().count(); + size_t elaspe_time_us = pstd::NowMicros() - copy_file_begin_time; + Throttle::GetInstance().ReturnUnusedThroughput(count, ret_count, elaspe_time_us); + + if (resp->snapshot_uuid() != snapshot_uuid_) { + LOG(WARNING) << "receive newer dump, reset state to STOP, local_snapshot_uuid:" + << snapshot_uuid_ << ", remote snapshot uuid: " << resp->snapshot_uuid(); + state_.store(STOP); + error_stopped_.store(true); + return s; + } + + s = writer->Write((uint64_t)offset, ret_count, resp->file_resp().data().c_str()); + if (!s.ok()) { + LOG(WARNING) << "rsync client write file error"; + break; + } + + offset += resp->file_resp().count(); + if (resp->file_resp().eof()) { + s = writer->Fsync(); + if (!s.ok()) { + return s; + } + mu_.lock(); + meta_table_[filename] = ""; + mu_.unlock(); + break; + } + retries = 0; + } + + return s; +} + +Status RsyncClient::Start() { + StartThread(); + return Status::OK(); +} + +Status RsyncClient::Stop() { + if (state_ == IDLE) { + return Status::OK(); + } + LOG(WARNING) << "RsyncClient stop ..."; + state_ = STOP; + cond_.notify_all(); + StopThread(); + client_thread_->StopThread(); + JoinThread(); + client_thread_->JoinThread(); + state_ = IDLE; + return Status::OK(); +} + +bool RsyncClient::ComparisonUpdate() { + std::string local_snapshot_uuid; + std::string remote_snapshot_uuid; + std::set local_file_set; + std::set remote_file_set; + std::map local_file_map; + + Status s = PullRemoteMeta(&remote_snapshot_uuid, &remote_file_set); + if (!s.ok()) { + LOG(WARNING) << "copy remote meta failed! error:" << s.ToString(); + return false; + } + + s = LoadLocalMeta(&local_snapshot_uuid, &local_file_map); + if (!s.ok()) { + LOG(WARNING) << "load local meta failed"; + return false; + } + for (auto const& file : local_file_map) { + local_file_set.insert(file.first); + } + + std::set expired_files; + if (remote_snapshot_uuid != local_snapshot_uuid) { + snapshot_uuid_ = remote_snapshot_uuid; + file_set_ = remote_file_set; + expired_files = local_file_set; + } else { + std::set newly_files; + set_difference(remote_file_set.begin(), remote_file_set.end(), + local_file_set.begin(), local_file_set.end(), + inserter(newly_files, newly_files.begin())); + set_difference(local_file_set.begin(), local_file_set.end(), + remote_file_set.begin(), remote_file_set.end(), + inserter(expired_files, expired_files.begin())); + file_set_.insert(newly_files.begin(), newly_files.end()); + } + + s = CleanUpExpiredFiles(local_snapshot_uuid != remote_snapshot_uuid, expired_files); + if (!s.ok()) { + LOG(WARNING) << "clean up expired files failed"; + return false; + } + s = UpdateLocalMeta(snapshot_uuid_, expired_files, &local_file_map); + if (!s.ok()) { + LOG(WARNING) << "update local meta failed"; + return false; + } + + state_.store(RUNNING); + error_stopped_.store(false); + LOG(INFO) << "copy meta data done, db name: " << db_name_ + << " snapshot_uuid: " << snapshot_uuid_ + << " file count: " << file_set_.size() + << " expired file count: " << expired_files.size() + << " local file count: " << local_file_set.size() + << " remote file count: " << remote_file_set.size() + << " remote snapshot_uuid: " << remote_snapshot_uuid + << " local snapshot_uuid: " << local_snapshot_uuid + << " file_set_: " << file_set_.size(); + for_each(file_set_.begin(), file_set_.end(), + [](auto& file) {LOG(WARNING) << "file_set: " << file;}); + return true; +} + +Status RsyncClient::PullRemoteMeta(std::string* snapshot_uuid, std::set* file_set) { + Status s; + int retries = 0; + RsyncRequest request; + request.set_reader_index(0); + request.set_db_name(db_name_); + /* + * Since the slot field is written in protobuffer, + * slot_id is set to the default value 0 for compatibility + * with older versions, but slot_id is not used + */ + request.set_slot_id(0); + request.set_type(kRsyncMeta); + std::string to_send; + request.SerializeToString(&to_send); + while (retries < max_retries_) { + WaitObject* wo = wo_mgr_->UpdateWaitObject(0, "", kRsyncMeta, kInvalidOffset); + s = client_thread_->Write(master_ip_, master_port_, to_send); + if (!s.ok()) { + retries++; + } + std::shared_ptr resp; + s = wo->Wait(resp); + if (s.IsTimeout()) { + LOG(WARNING) << "rsync PullRemoteMeta request timeout, " + << "retry times: " << retries; + retries++; + continue; + } + + if (resp.get() == nullptr || resp->code() != RsyncService::kOk) { + s = Status::IOError("kRsyncMeta request failed! db is not exist or doing bgsave"); + LOG(WARNING) << s.ToString() << ", retries:" << retries; + sleep(1); + retries++; + continue; + } + LOG(INFO) << "receive rsync meta infos, snapshot_uuid: " << resp->snapshot_uuid() + << "files count: " << resp->meta_resp().filenames_size(); + for (std::string item : resp->meta_resp().filenames()) { + file_set->insert(item); + } + + *snapshot_uuid = resp->snapshot_uuid(); + s = Status::OK(); + break; + } + return s; +} + +Status RsyncClient::LoadLocalMeta(std::string* snapshot_uuid, std::map* file_map) { + std::string meta_file_path = GetLocalMetaFilePath(); + if (!FileExists(meta_file_path)) { + LOG(WARNING) << kDumpMetaFileName << " not exist"; + return Status::OK(); + } + + FILE* fp; + char* line = nullptr; + size_t len = 0; + size_t read = 0; + int32_t line_num = 0; + + std::atomic_int8_t retry_times = 5; + + while (retry_times > 0) { + retry_times--; + fp = fopen(meta_file_path.c_str(), "r"); + if (fp == nullptr) { + LOG(WARNING) << "open meta file failed, meta_path: " << dir_; + } else { + break; + } + } + + // if the file cannot be read from disk, use the remote file directly + if (fp == nullptr) { + LOG(WARNING) << "open meta file failed, meta_path: " << meta_file_path << ", retry times: " << retry_times; + return Status::IOError("open meta file failed, dir: ", meta_file_path); + } + + while ((read = getline(&line, &len, fp)) != -1) { + std::string str(line); + std::string::size_type pos; + while ((pos = str.find("\r")) != std::string::npos) { + str.erase(pos, 1); + } + while ((pos = str.find("\n")) != std::string::npos) { + str.erase(pos, 1); + } + + if (str.empty()) { + continue; + } + + if (line_num == 0) { + *snapshot_uuid = str.erase(0, kUuidPrefix.size()); + } else { + if ((pos = str.find(":")) != std::string::npos) { + std::string filename = str.substr(0, pos); + std::string shecksum = str.substr(pos + 1, str.size()); + (*file_map)[filename] = shecksum; + } + } + + line_num++; + } + fclose(fp); + return Status::OK(); +} + +Status RsyncClient::CleanUpExpiredFiles(bool need_reset_path, const std::set& files) { + if (need_reset_path) { + std::string db_path = dir_ + (dir_.back() == '/' ? "" : "/"); + pstd::DeleteDirIfExist(db_path); + int db_instance_num = g_pika_conf->db_instance_num(); + for (int idx = 0; idx < db_instance_num; idx++) { + pstd::CreatePath(db_path + std::to_string(idx)); + } + return Status::OK(); + } + + std::string db_path = dir_ + (dir_.back() == '/' ? "" : "/"); + for (const auto& file : files) { + bool b = pstd::DeleteDirIfExist(db_path + file); + if (!b) { + LOG(WARNING) << "delete file failed, file: " << file; + return Status::IOError("delete file failed"); + } + } + return Status::OK(); +} + +Status RsyncClient::UpdateLocalMeta(const std::string& snapshot_uuid, const std::set& expired_files, + std::map* localFileMap) { + if (localFileMap->empty()) { + return Status::OK(); + } + + for (const auto& item : expired_files) { + localFileMap->erase(item); + } + + std::string meta_file_path = GetLocalMetaFilePath(); + pstd::DeleteFile(meta_file_path); + + std::unique_ptr file; + pstd::Status s = pstd::NewWritableFile(meta_file_path, file); + if (!s.ok()) { + LOG(WARNING) << "create meta file failed, meta_file_path: " << meta_file_path; + return s; + } + file->Append(kUuidPrefix + snapshot_uuid + "\n"); + + for (const auto& item : *localFileMap) { + std::string line = item.first + ":" + item.second + "\n"; + file->Append(line); + } + s = file->Close(); + if (!s.ok()) { + LOG(WARNING) << "flush meta file failed, meta_file_path: " << meta_file_path; + return s; + } + return Status::OK(); +} + +std::string RsyncClient::GetLocalMetaFilePath() { + std::string db_path = dir_ + (dir_.back() == '/' ? "" : "/"); + return db_path + kDumpMetaFileName; +} + +int RsyncClient::GetParallelNum() { + return parallel_num_; +} + +} // end namespace rsync + diff --git a/tools/pika_migrate/src/rsync_client_thread.cc b/tools/pika_migrate/src/rsync_client_thread.cc new file mode 100644 index 0000000000..8e93a4c69b --- /dev/null +++ b/tools/pika_migrate/src/rsync_client_thread.cc @@ -0,0 +1,45 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/rsync_client_thread.h" +#include "include/rsync_client.h" +#include "include/pika_define.h" + +using namespace pstd; +using namespace net; +using namespace RsyncService; + +namespace rsync { +class RsyncClient; +RsyncClientConn::RsyncClientConn(int fd, const std::string& ip_port, + net::Thread* thread, void* worker_specific_data, NetMultiplexer* mpx) + : PbConn(fd, ip_port, thread, mpx), cb_handler_(worker_specific_data) {} + +RsyncClientConn::~RsyncClientConn() {} + +int RsyncClientConn::DealMessage() { + RsyncResponse* response = new RsyncResponse(); + ::google::protobuf::io::ArrayInputStream input(rbuf_ + cur_pos_ - header_len_, header_len_); + ::google::protobuf::io::CodedInputStream decoder(&input); + decoder.SetTotalBytesLimit(PIKA_MAX_CONN_RBUF); + bool success = response->ParseFromCodedStream(&decoder) && decoder.ConsumedEntireMessage(); + if (!success) { + delete response; + LOG(WARNING) << "ParseFromArray FAILED! " + << " msg_len: " << header_len_; + return -1; + } + WaitObjectManager* handler = (WaitObjectManager*)cb_handler_; + handler->WakeUp(response); + return 0; +} + +RsyncClientThread::RsyncClientThread(int cron_interval, int keepalive_timeout, void* scheduler) + : ClientThread(&conn_factory_, cron_interval, keepalive_timeout, &handle_, nullptr), + conn_factory_(scheduler) {} + +RsyncClientThread::~RsyncClientThread() {} +} //end namespace rsync + diff --git a/tools/pika_migrate/src/rsync_server.cc b/tools/pika_migrate/src/rsync_server.cc new file mode 100644 index 0000000000..5696719980 --- /dev/null +++ b/tools/pika_migrate/src/rsync_server.cc @@ -0,0 +1,249 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include + +#include +#include + +#include "pstd_hash.h" +#include "include/pika_server.h" +#include "include/rsync_server.h" +#include "pstd/include/pstd_defer.h" + +extern PikaServer* g_pika_server; +namespace rsync { + +using namespace net; +using namespace pstd; +using namespace RsyncService; + +void RsyncWriteResp(RsyncService::RsyncResponse& response, std::shared_ptr conn) { + std::string reply_str; + if (!response.SerializeToString(&reply_str) || (conn->WriteResp(reply_str) != 0)) { + LOG(WARNING) << "Process FileRsync request serialization failed"; + conn->NotifyClose(); + return; + } + conn->NotifyWrite(); +} + +RsyncServer::RsyncServer(const std::set& ips, const int port) { + work_thread_ = std::make_unique(2, 100000, "RsyncServerWork"); + rsync_server_thread_ = std::make_unique(ips, port, 1 * 1000, this); +} + +RsyncServer::~RsyncServer() { + //TODO: handle destory + LOG(INFO) << "Rsync server destroyed"; +} + +void RsyncServer::Schedule(net::TaskFunc func, void* arg) { + work_thread_->Schedule(func, arg); +} + +int RsyncServer::Start() { + LOG(INFO) << "start RsyncServer ..."; + rsync_server_thread_->set_thread_name("RsyncServerThread"); + int res = rsync_server_thread_->StartThread(); + if (res != net::kSuccess) { + LOG(FATAL) << "Start rsync Server Thread Error. ret_code: " << res << " message: " + << (res == net::kBindError ? ": bind port conflict" : ": other error"); + } + res = work_thread_->start_thread_pool(); + if (res != net::kSuccess) { + LOG(FATAL) << "Start rsync Server ThreadPool Error, ret_code: " << res << " message: " + << (res == net::kCreateThreadError ? ": create thread error " : ": other error"); + } + LOG(INFO) << "RsyncServer started ..."; + return res; +} + +int RsyncServer::Stop() { + LOG(INFO) << "stop RsyncServer ..."; + work_thread_->stop_thread_pool(); + rsync_server_thread_->StopThread(); + return 0; +} + +RsyncServerConn::RsyncServerConn(int connfd, const std::string& ip_port, Thread* thread, + void* worker_specific_data, NetMultiplexer* mpx) + : PbConn(connfd, ip_port, thread, mpx), data_(worker_specific_data) { + readers_.resize(kMaxRsyncParallelNum); + for (int i = 0; i < kMaxRsyncParallelNum; i++) { + readers_[i].reset(new RsyncReader()); + } +} + +RsyncServerConn::~RsyncServerConn() { + std::lock_guard guard(mu_); + for (int i = 0; i < readers_.size(); i++) { + readers_[i].reset(); + } +} + +int RsyncServerConn::DealMessage() { + std::shared_ptr req = std::make_shared(); + bool parse_res = req->ParseFromArray(rbuf_ + cur_pos_ - header_len_, header_len_); + if (!parse_res) { + LOG(WARNING) << "Pika rsync server connection pb parse error."; + return -1; + } + switch (req->type()) { + case RsyncService::kRsyncMeta: { + auto task_arg = + new RsyncServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); + ((RsyncServer*)(data_))->Schedule(&RsyncServerConn::HandleMetaRsyncRequest, task_arg); + break; + } + case RsyncService::kRsyncFile: { + auto task_arg = + new RsyncServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); + ((RsyncServer*)(data_))->Schedule(&RsyncServerConn::HandleFileRsyncRequest, task_arg); + break; + } + default: { + LOG(WARNING) << "Invalid RsyncRequest type"; + } + } + return 0; +} + +void RsyncServerConn::HandleMetaRsyncRequest(void* arg) { + std::unique_ptr task_arg(static_cast(arg)); + const std::shared_ptr req = task_arg->req; + std::shared_ptr conn = task_arg->conn; + std::string db_name = req->db_name(); + std::shared_ptr db = g_pika_server->GetDB(db_name); + + RsyncService::RsyncResponse response; + response.set_reader_index(req->reader_index()); + response.set_code(RsyncService::kOk); + response.set_type(RsyncService::kRsyncMeta); + response.set_db_name(db_name); + /* + * Since the slot field is written in protobuffer, + * slot_id is set to the default value 0 for compatibility + * with older versions, but slot_id is not used + */ + response.set_slot_id(0); + + std::string snapshot_uuid; + if (!db || db->IsBgSaving()) { + LOG(WARNING) << "waiting bgsave done..."; + response.set_snapshot_uuid(snapshot_uuid); + response.set_code(RsyncService::kErr); + RsyncWriteResp(response, conn); + return; + } + + std::vector filenames; + g_pika_server->GetDumpMeta(db_name, &filenames, &snapshot_uuid); + response.set_snapshot_uuid(snapshot_uuid); + + LOG(INFO) << "Rsync Meta request, snapshot_uuid: " << snapshot_uuid + << " files count: " << filenames.size() << " file list: "; + std::for_each(filenames.begin(), filenames.end(), [](auto& file) { + LOG(INFO) << "rsync snapshot file: " << file; + }); + + RsyncService::MetaResponse* meta_resp = response.mutable_meta_resp(); + for (const auto& filename : filenames) { + meta_resp->add_filenames(filename); + } + RsyncWriteResp(response, conn); +} + +void RsyncServerConn::HandleFileRsyncRequest(void* arg) { + std::unique_ptr task_arg(static_cast(arg)); + const std::shared_ptr req = task_arg->req; + std::shared_ptr conn = task_arg->conn; + + std::string db_name = req->db_name(); + std::string filename = req->file_req().filename(); + size_t offset = req->file_req().offset(); + size_t count = req->file_req().count(); + + RsyncService::RsyncResponse response; + response.set_reader_index(req->reader_index()); + response.set_code(RsyncService::kOk); + response.set_type(RsyncService::kRsyncFile); + response.set_db_name(db_name); + /* + * Since the slot field is written in protobuffer, + * slot_id is set to the default value 0 for compatibility + * with older versions, but slot_id is not used + */ + response.set_slot_id(0); + + std::string snapshot_uuid; + Status s = g_pika_server->GetDumpUUID(db_name, &snapshot_uuid); + response.set_snapshot_uuid(snapshot_uuid); + if (!s.ok()) { + LOG(WARNING) << "rsyncserver get snapshotUUID failed"; + response.set_code(RsyncService::kErr); + RsyncWriteResp(response, conn); + return; + } + + std::shared_ptr db = g_pika_server->GetDB(db_name); + if (!db) { + LOG(WARNING) << "cannot find db for db_name: " << db_name; + response.set_code(RsyncService::kErr); + RsyncWriteResp(response, conn); + } + + const std::string filepath = db->bgsave_info().path + "/" + filename; + char* buffer = new char[req->file_req().count() + 1]; + size_t bytes_read{0}; + std::string checksum = ""; + bool is_eof = false; + std::shared_ptr reader = conn->readers_[req->reader_index()]; + s = reader->Read(filepath, offset, count, buffer, + &bytes_read, &checksum, &is_eof); + if (!s.ok()) { + response.set_code(RsyncService::kErr); + RsyncWriteResp(response, conn); + delete []buffer; + return; + } + + RsyncService::FileResponse* file_resp = response.mutable_file_resp(); + file_resp->set_data(buffer, bytes_read); + file_resp->set_eof(is_eof); + file_resp->set_checksum(checksum); + file_resp->set_filename(filename); + file_resp->set_count(bytes_read); + file_resp->set_offset(offset); + + RsyncWriteResp(response, conn); + delete []buffer; +} + +RsyncServerThread::RsyncServerThread(const std::set& ips, int port, int cron_interval, RsyncServer* arg) + : HolyThread(ips, port, &conn_factory_, cron_interval, &handle_, true), conn_factory_(arg) {} + +RsyncServerThread::~RsyncServerThread() { + LOG(WARNING) << "RsyncServerThread destroyed"; +} + +void RsyncServerThread::RsyncServerHandle::FdClosedHandle(int fd, const std::string& ip_port) const { + LOG(WARNING) << "ip_port: " << ip_port << " connection closed"; +} + +void RsyncServerThread::RsyncServerHandle::FdTimeoutHandle(int fd, const std::string& ip_port) const { + LOG(WARNING) << "ip_port: " << ip_port << " connection timeout"; +} + +bool RsyncServerThread::RsyncServerHandle::AccessHandle(int fd, std::string& ip_port) const { + LOG(WARNING) << "fd: "<< fd << " ip_port: " << ip_port << " connection accepted"; + return true; +} + +void RsyncServerThread::RsyncServerHandle::CronHandle() const { +} + +} // end namespace rsync + diff --git a/tools/pika_migrate/src/rsync_service.proto b/tools/pika_migrate/src/rsync_service.proto new file mode 100644 index 0000000000..ee23b3e8a4 --- /dev/null +++ b/tools/pika_migrate/src/rsync_service.proto @@ -0,0 +1,51 @@ +syntax = "proto2"; +package RsyncService; + +enum Type { + kRsyncMeta = 1; + kRsyncFile = 2; +} + +enum StatusCode { + kOk = 1; + kErr = 2; +} + +message MetaResponse { + repeated string filenames = 1; +} + +message FileRequest { + required string filename = 1; + required uint64 count = 2; + required uint64 offset = 3; +} + +message FileResponse { + required int32 eof = 1; + required uint64 count = 2; + required uint64 offset = 3; + required bytes data = 4; + required string checksum = 5; + required string filename = 6; +} + +message RsyncRequest { + required Type type = 1; + required int32 reader_index = 2; + required string db_name = 3; + required uint32 slot_id = 4; + optional FileRequest file_req = 5; +} + +message RsyncResponse { + required Type type = 1; + required int32 reader_index = 2; + required string snapshot_uuid = 3; + required string db_name = 4; + required uint32 slot_id = 5; + required StatusCode code = 6; + optional MetaResponse meta_resp = 7; + optional FileResponse file_resp = 8; +} + diff --git a/tools/pika_migrate/src/throttle.cc b/tools/pika_migrate/src/throttle.cc new file mode 100644 index 0000000000..4919fb453a --- /dev/null +++ b/tools/pika_migrate/src/throttle.cc @@ -0,0 +1,56 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/throttle.h" +#include +#include +#include "pstd/include/env.h" + +namespace rsync { + +Throttle::Throttle(size_t throttle_throughput_bytes, size_t check_cycle) + : throttle_throughput_bytes_(throttle_throughput_bytes), + last_throughput_check_time_us_(caculate_check_time_us_(pstd::NowMicros(), check_cycle)), + cur_throughput_bytes_(0) {} + +Throttle::~Throttle() {} + +size_t Throttle::ThrottledByThroughput(size_t bytes) { + size_t available_size = bytes; + size_t now = pstd::NowMicros(); + size_t limit_per_cycle = throttle_throughput_bytes_.load() / check_cycle_; + std::unique_lock lock(keys_mutex_); + if (cur_throughput_bytes_ + bytes > limit_per_cycle) { + // reading another |bytes| excceds the limit + if (now - last_throughput_check_time_us_ <= 1 * 1000 * 1000 / check_cycle_) { + // if a time interval is less than or equal to a cycle, read more data + // to make full use of the throughput of the current cycle. + available_size = limit_per_cycle > cur_throughput_bytes_ ? limit_per_cycle - cur_throughput_bytes_ : 0; + cur_throughput_bytes_ = limit_per_cycle; + } else { + // otherwise, read the data in the next cycle. + available_size = bytes > limit_per_cycle ? limit_per_cycle : bytes; + cur_throughput_bytes_ = available_size; + last_throughput_check_time_us_ = caculate_check_time_us_(now, check_cycle_); + } + } else { + // reading another |bytes| doesn't excced limit (less than or equal to), + // put it in the current cycle + available_size = bytes; + cur_throughput_bytes_ += available_size; + } + return available_size; +} + +void Throttle::ReturnUnusedThroughput(size_t acquired, size_t consumed, size_t elaspe_time_us) { + size_t now = pstd::NowMicros(); + std::unique_lock lock(keys_mutex_); + if (now - elaspe_time_us < last_throughput_check_time_us_) { + // Tokens are aqured in last cycle, ignore + return; + } + cur_throughput_bytes_ = std::max(cur_throughput_bytes_ - (acquired - consumed), size_t(0)); +} +} // namespace rsync diff --git a/tools/pika_migrate/tests/README.md b/tools/pika_migrate/tests/README.md deleted file mode 100644 index 47b371236f..0000000000 --- a/tools/pika_migrate/tests/README.md +++ /dev/null @@ -1,4 +0,0 @@ -### Pika test - - * 在Pika目录下执行 `./pikatests.sh geo` 测试Pika GEO命令 - * 如果是`unit/type`接口, 例如 SET, 执行 `./pikatests.sh type/set` 测试Pika SET命令 diff --git a/tools/pika_migrate/tests/assets/default.conf b/tools/pika_migrate/tests/assets/default.conf deleted file mode 100644 index c9cb8183f7..0000000000 --- a/tools/pika_migrate/tests/assets/default.conf +++ /dev/null @@ -1,79 +0,0 @@ -# Pika port -port : 9221 -# Thread Number -thread-num : 1 -# Sync Thread Number -sync-thread-num : 6 -# Item count of sync thread queue -sync-buffer-size : 10 -# Pika log path -log-path : ./log/ -# Pika glog level: only INFO and ERROR -loglevel : info -# Pika db path -db-path : ./db/ -# Pika write-buffer-size -write-buffer-size : 268435456 -# Pika timeout -timeout : 60 -# Requirepass -requirepass : -# Masterauth -masterauth : -# Userpass -userpass : -# User Blacklist -userblacklist : -# Dump Prefix -dump-prefix : -# daemonize [yes | no] -#daemonize : yes -# slotmigrate [yes | no] -#slotmigrate : no -# Dump Path -dump-path : ./dump/ -# Expire-dump-days -dump-expire : 0 -# pidfile Path -pidfile : ./pika.pid -# Max Connection -maxclients : 20000 -# the per file size of sst to compact, defalut is 2M -target-file-size-base : 20971520 -# Expire-logs-days -expire-logs-days : 7 -# Expire-logs-nums -expire-logs-nums : 10 -# Root-connection-num -root-connection-num : 2 -# Slowlog-log-slower-than -slowlog-log-slower-than : 10000 -# slave-read-only(yes/no, 1/0) -slave-read-only : 0 -# Pika db sync path -db-sync-path : ./dbsync/ -# db sync speed(MB) max is set to 125MB, min is set to 0, and if below 0 or above 125, the value will be adjust to 125 -db-sync-speed : -1 -# network interface -# network-interface : eth1 -# replication -# slaveof : master-ip:master-port -# CronTask, format: start:end-ratio, like 02-04/60, pika will check to schedule compaction between 2 to 4 o'clock everyday -# if the freesize/disksize > 60% -# compact-cron : - -################### -## Critical Settings -################### -# binlog file size: default is 100M, limited in [1K, 2G] -binlog-file-size : 104857600 -# Compression -compression : snappy -# max-background-flushes: default is 1, limited in [1, 4] -max-background-flushes : 1 -# max-background-compactions: default is 1, limited in [1, 4] -max-background-compactions : 2 -# max-cache-files default is 5000 -max-cache-files : 5000 -# max_bytes_for_level_multiplier: default is 10, you can change it to 5 -max-bytes-for-level-multiplier : 10 diff --git a/tools/pika_migrate/tests/assets/encodings.rdb b/tools/pika_migrate/tests/assets/encodings.rdb deleted file mode 100644 index 9fd9b705d1..0000000000 Binary files a/tools/pika_migrate/tests/assets/encodings.rdb and /dev/null differ diff --git a/tools/pika_migrate/tests/assets/hash-zipmap.rdb b/tools/pika_migrate/tests/assets/hash-zipmap.rdb deleted file mode 100644 index 27a42ed4bb..0000000000 Binary files a/tools/pika_migrate/tests/assets/hash-zipmap.rdb and /dev/null differ diff --git a/tools/pika_migrate/tests/helpers/bg_complex_data.tcl b/tools/pika_migrate/tests/helpers/bg_complex_data.tcl deleted file mode 100644 index dffd7c6688..0000000000 --- a/tools/pika_migrate/tests/helpers/bg_complex_data.tcl +++ /dev/null @@ -1,10 +0,0 @@ -source tests/support/redis.tcl -source tests/support/util.tcl - -proc bg_complex_data {host port db ops} { - set r [redis $host $port] - $r select $db - createComplexDataset $r $ops -} - -bg_complex_data [lindex $argv 0] [lindex $argv 1] [lindex $argv 2] [lindex $argv 3] diff --git a/tools/pika_migrate/tests/helpers/gen_write_load.tcl b/tools/pika_migrate/tests/helpers/gen_write_load.tcl deleted file mode 100644 index 6d1a345166..0000000000 --- a/tools/pika_migrate/tests/helpers/gen_write_load.tcl +++ /dev/null @@ -1,15 +0,0 @@ -source tests/support/redis.tcl - -proc gen_write_load {host port seconds} { - set start_time [clock seconds] - set r [redis $host $port 1] - $r select 9 - while 1 { - $r set [expr rand()] [expr rand()] - if {[clock seconds]-$start_time > $seconds} { - exit 0 - } - } -} - -gen_write_load [lindex $argv 0] [lindex $argv 1] [lindex $argv 2] diff --git a/tools/pika_migrate/tests/instances.tcl b/tools/pika_migrate/tests/instances.tcl deleted file mode 100644 index 426508f33a..0000000000 --- a/tools/pika_migrate/tests/instances.tcl +++ /dev/null @@ -1,407 +0,0 @@ -# Multi-instance test framework. -# This is used in order to test Sentinel and Redis Cluster, and provides -# basic capabilities for spawning and handling N parallel Redis / Sentinel -# instances. -# -# Copyright (C) 2014 Salvatore Sanfilippo antirez@gmail.com -# This software is released under the BSD License. See the COPYING file for -# more information. - -package require Tcl 8.5 - -set tcl_precision 17 -source ../support/redis.tcl -source ../support/util.tcl -source ../support/server.tcl -source ../support/test.tcl - -set ::verbose 0 -set ::pause_on_error 0 -set ::simulate_error 0 -set ::sentinel_instances {} -set ::redis_instances {} -set ::sentinel_base_port 20000 -set ::redis_base_port 30000 -set ::pids {} ; # We kill everything at exit -set ::dirs {} ; # We remove all the temp dirs at exit -set ::run_matching {} ; # If non empty, only tests matching pattern are run. - -if {[catch {cd tmp}]} { - puts "tmp directory not found." - puts "Please run this test from the Redis source root." - exit 1 -} - -# Spawn a redis or sentinel instance, depending on 'type'. -proc spawn_instance {type base_port count {conf {}}} { - for {set j 0} {$j < $count} {incr j} { - set port [find_available_port $base_port] - incr base_port - puts "Starting $type #$j at port $port" - - # Create a directory for this instance. - set dirname "${type}_${j}" - lappend ::dirs $dirname - catch {exec rm -rf $dirname} - file mkdir $dirname - - # Write the instance config file. - set cfgfile [file join $dirname $type.conf] - set cfg [open $cfgfile w] - puts $cfg "port $port" - puts $cfg "dir ./$dirname" - puts $cfg "logfile log.txt" - # Add additional config files - foreach directive $conf { - puts $cfg $directive - } - close $cfg - - # Finally exec it and remember the pid for later cleanup. - if {$type eq "redis"} { - set prgname redis-server - } elseif {$type eq "sentinel"} { - set prgname redis-sentinel - } else { - error "Unknown instance type." - } - set pid [exec ../../../src/${prgname} $cfgfile &] - lappend ::pids $pid - - # Check availability - if {[server_is_up 127.0.0.1 $port 100] == 0} { - abort_sentinel_test "Problems starting $type #$j: ping timeout" - } - - # Push the instance into the right list - set link [redis 127.0.0.1 $port] - $link reconnect 1 - lappend ::${type}_instances [list \ - pid $pid \ - host 127.0.0.1 \ - port $port \ - link $link \ - ] - } -} - -proc cleanup {} { - puts "Cleaning up..." - foreach pid $::pids { - catch {exec kill -9 $pid} - } - foreach dir $::dirs { - catch {exec rm -rf $dir} - } -} - -proc abort_sentinel_test msg { - puts "WARNING: Aborting the test." - puts ">>>>>>>> $msg" - cleanup - exit 1 -} - -proc parse_options {} { - for {set j 0} {$j < [llength $::argv]} {incr j} { - set opt [lindex $::argv $j] - set val [lindex $::argv [expr $j+1]] - if {$opt eq "--single"} { - incr j - set ::run_matching "*${val}*" - } elseif {$opt eq "--pause-on-error"} { - set ::pause_on_error 1 - } elseif {$opt eq "--fail"} { - set ::simulate_error 1 - } elseif {$opt eq "--help"} { - puts "Hello, I'm sentinel.tcl and I run Sentinel unit tests." - puts "\nOptions:" - puts "--single Only runs tests specified by pattern." - puts "--pause-on-error Pause for manual inspection on error." - puts "--fail Simulate a test failure." - puts "--help Shows this help." - exit 0 - } else { - puts "Unknown option $opt" - exit 1 - } - } -} - -# If --pause-on-error option was passed at startup this function is called -# on error in order to give the developer a chance to understand more about -# the error condition while the instances are still running. -proc pause_on_error {} { - puts "" - puts [colorstr yellow "*** Please inspect the error now ***"] - puts "\nType \"continue\" to resume the test, \"help\" for help screen.\n" - while 1 { - puts -nonewline "> " - flush stdout - set line [gets stdin] - set argv [split $line " "] - set cmd [lindex $argv 0] - if {$cmd eq {continue}} { - break - } elseif {$cmd eq {show-redis-logs}} { - set count 10 - if {[lindex $argv 1] ne {}} {set count [lindex $argv 1]} - foreach_redis_id id { - puts "=== REDIS $id ====" - puts [exec tail -$count redis_$id/log.txt] - puts "---------------------\n" - } - } elseif {$cmd eq {show-sentinel-logs}} { - set count 10 - if {[lindex $argv 1] ne {}} {set count [lindex $argv 1]} - foreach_sentinel_id id { - puts "=== SENTINEL $id ====" - puts [exec tail -$count sentinel_$id/log.txt] - puts "---------------------\n" - } - } elseif {$cmd eq {ls}} { - foreach_redis_id id { - puts -nonewline "Redis $id" - set errcode [catch { - set str {} - append str "@[RI $id tcp_port]: " - append str "[RI $id role] " - if {[RI $id role] eq {slave}} { - append str "[RI $id master_host]:[RI $id master_port]" - } - set str - } retval] - if {$errcode} { - puts " -- $retval" - } else { - puts $retval - } - } - foreach_sentinel_id id { - puts -nonewline "Sentinel $id" - set errcode [catch { - set str {} - append str "@[SI $id tcp_port]: " - append str "[join [S $id sentinel get-master-addr-by-name mymaster]]" - set str - } retval] - if {$errcode} { - puts " -- $retval" - } else { - puts $retval - } - } - } elseif {$cmd eq {help}} { - puts "ls List Sentinel and Redis instances." - puts "show-sentinel-logs \[N\] Show latest N lines of logs." - puts "show-redis-logs \[N\] Show latest N lines of logs." - puts "S cmd ... arg Call command in Sentinel ." - puts "R cmd ... arg Call command in Redis ." - puts "SI Show Sentinel INFO ." - puts "RI Show Sentinel INFO ." - puts "continue Resume test." - } else { - set errcode [catch {eval $line} retval] - if {$retval ne {}} {puts "$retval"} - } - } -} - -# We redefine 'test' as for Sentinel we don't use the server-client -# architecture for the test, everything is sequential. -proc test {descr code} { - set ts [clock format [clock seconds] -format %H:%M:%S] - puts -nonewline "$ts> $descr: " - flush stdout - - if {[catch {set retval [uplevel 1 $code]} error]} { - if {[string match "assertion:*" $error]} { - set msg [string range $error 10 end] - puts [colorstr red $msg] - if {$::pause_on_error} pause_on_error - puts "(Jumping to next unit after error)" - return -code continue - } else { - # Re-raise, let handler up the stack take care of this. - error $error $::errorInfo - } - } else { - puts [colorstr green OK] - } -} - -proc run_tests {} { - set tests [lsort [glob ../tests/*]] - foreach test $tests { - if {$::run_matching ne {} && [string match $::run_matching $test] == 0} { - continue - } - if {[file isdirectory $test]} continue - puts [colorstr yellow "Testing unit: [lindex [file split $test] end]"] - source $test - } -} - -# The "S" command is used to interact with the N-th Sentinel. -# The general form is: -# -# S command arg arg arg ... -# -# Example to ping the Sentinel 0 (first instance): S 0 PING -proc S {n args} { - set s [lindex $::sentinel_instances $n] - [dict get $s link] {*}$args -} - -# Like R but to chat with Redis instances. -proc R {n args} { - set r [lindex $::redis_instances $n] - [dict get $r link] {*}$args -} - -proc get_info_field {info field} { - set fl [string length $field] - append field : - foreach line [split $info "\n"] { - set line [string trim $line "\r\n "] - if {[string range $line 0 $fl] eq $field} { - return [string range $line [expr {$fl+1}] end] - } - } - return {} -} - -proc SI {n field} { - get_info_field [S $n info] $field -} - -proc RI {n field} { - get_info_field [R $n info] $field -} - -# Iterate over IDs of sentinel or redis instances. -proc foreach_instance_id {instances idvar code} { - upvar 1 $idvar id - for {set id 0} {$id < [llength $instances]} {incr id} { - set errcode [catch {uplevel 1 $code} result] - if {$errcode == 1} { - error $result $::errorInfo $::errorCode - } elseif {$errcode == 4} { - continue - } elseif {$errcode == 3} { - break - } elseif {$errcode != 0} { - return -code $errcode $result - } - } -} - -proc foreach_sentinel_id {idvar code} { - set errcode [catch {uplevel 1 [list foreach_instance_id $::sentinel_instances $idvar $code]} result] - return -code $errcode $result -} - -proc foreach_redis_id {idvar code} { - set errcode [catch {uplevel 1 [list foreach_instance_id $::redis_instances $idvar $code]} result] - return -code $errcode $result -} - -# Get the specific attribute of the specified instance type, id. -proc get_instance_attrib {type id attrib} { - dict get [lindex [set ::${type}_instances] $id] $attrib -} - -# Set the specific attribute of the specified instance type, id. -proc set_instance_attrib {type id attrib newval} { - set d [lindex [set ::${type}_instances] $id] - dict set d $attrib $newval - lset ::${type}_instances $id $d -} - -# Create a master-slave cluster of the given number of total instances. -# The first instance "0" is the master, all others are configured as -# slaves. -proc create_redis_master_slave_cluster n { - foreach_redis_id id { - if {$id == 0} { - # Our master. - R $id slaveof no one - R $id flushall - } elseif {$id < $n} { - R $id slaveof [get_instance_attrib redis 0 host] \ - [get_instance_attrib redis 0 port] - } else { - # Instances not part of the cluster. - R $id slaveof no one - } - } - # Wait for all the slaves to sync. - wait_for_condition 1000 50 { - [RI 0 connected_slaves] == ($n-1) - } else { - fail "Unable to create a master-slaves cluster." - } -} - -proc get_instance_id_by_port {type port} { - foreach_${type}_id id { - if {[get_instance_attrib $type $id port] == $port} { - return $id - } - } - fail "Instance $type port $port not found." -} - -# Kill an instance of the specified type/id with SIGKILL. -# This function will mark the instance PID as -1 to remember that this instance -# is no longer running and will remove its PID from the list of pids that -# we kill at cleanup. -# -# The instance can be restarted with restart-instance. -proc kill_instance {type id} { - set pid [get_instance_attrib $type $id pid] - if {$pid == -1} { - error "You tried to kill $type $id twice." - } - exec kill -9 $pid - set_instance_attrib $type $id pid -1 - set_instance_attrib $type $id link you_tried_to_talk_with_killed_instance - - # Remove the PID from the list of pids to kill at exit. - set ::pids [lsearch -all -inline -not -exact $::pids $pid] -} - -# Return true of the instance of the specified type/id is killed. -proc instance_is_killed {type id} { - set pid [get_instance_attrib $type $id pid] - expr {$pid == -1} -} - -# Restart an instance previously killed by kill_instance -proc restart_instance {type id} { - set dirname "${type}_${id}" - set cfgfile [file join $dirname $type.conf] - set port [get_instance_attrib $type $id port] - - # Execute the instance with its old setup and append the new pid - # file for cleanup. - if {$type eq "redis"} { - set prgname redis-server - } else { - set prgname redis-sentinel - } - set pid [exec ../../../src/${prgname} $cfgfile &] - set_instance_attrib $type $id pid $pid - lappend ::pids $pid - - # Check that the instance is running - if {[server_is_up 127.0.0.1 $port 100] == 0} { - abort_sentinel_test "Problems starting $type #$id: ping timeout" - } - - # Connect with it with a fresh link - set link [redis 127.0.0.1 $port] - $link reconnect 1 - set_instance_attrib $type $id link $link -} - diff --git a/tools/pika_migrate/tests/integration/aof-race.tcl b/tools/pika_migrate/tests/integration/aof-race.tcl deleted file mode 100644 index 207f207393..0000000000 --- a/tools/pika_migrate/tests/integration/aof-race.tcl +++ /dev/null @@ -1,35 +0,0 @@ -set defaults { appendonly {yes} appendfilename {appendonly.aof} } -set server_path [tmpdir server.aof] -set aof_path "$server_path/appendonly.aof" - -proc start_server_aof {overrides code} { - upvar defaults defaults srv srv server_path server_path - set config [concat $defaults $overrides] - start_server [list overrides $config] $code -} - -tags {"aof"} { - # Specific test for a regression where internal buffers were not properly - # cleaned after a child responsible for an AOF rewrite exited. This buffer - # was subsequently appended to the new AOF, resulting in duplicate commands. - start_server_aof [list dir $server_path] { - set client [redis [srv host] [srv port]] - set bench [open "|src/redis-benchmark -q -p [srv port] -c 20 -n 20000 incr foo" "r+"] - after 100 - - # Benchmark should be running by now: start background rewrite - $client bgrewriteaof - - # Read until benchmark pipe reaches EOF - while {[string length [read $bench]] > 0} {} - - # Check contents of foo - assert_equal 20000 [$client get foo] - } - - # Restart server to replay AOF - start_server_aof [list dir $server_path] { - set client [redis [srv host] [srv port]] - assert_equal 20000 [$client get foo] - } -} diff --git a/tools/pika_migrate/tests/integration/aof.tcl b/tools/pika_migrate/tests/integration/aof.tcl deleted file mode 100644 index 7ea70943c6..0000000000 --- a/tools/pika_migrate/tests/integration/aof.tcl +++ /dev/null @@ -1,236 +0,0 @@ -set defaults { appendonly {yes} appendfilename {appendonly.aof} } -set server_path [tmpdir server.aof] -set aof_path "$server_path/appendonly.aof" - -proc append_to_aof {str} { - upvar fp fp - puts -nonewline $fp $str -} - -proc create_aof {code} { - upvar fp fp aof_path aof_path - set fp [open $aof_path w+] - uplevel 1 $code - close $fp -} - -proc start_server_aof {overrides code} { - upvar defaults defaults srv srv server_path server_path - set config [concat $defaults $overrides] - set srv [start_server [list overrides $config]] - uplevel 1 $code - kill_server $srv -} - -tags {"aof"} { - ## Server can start when aof-load-truncated is set to yes and AOF - ## is truncated, with an incomplete MULTI block. - create_aof { - append_to_aof [formatCommand set foo hello] - append_to_aof [formatCommand multi] - append_to_aof [formatCommand set bar world] - } - - start_server_aof [list dir $server_path aof-load-truncated yes] { - test "Unfinished MULTI: Server should start if load-truncated is yes" { - assert_equal 1 [is_alive $srv] - } - } - - ## Should also start with truncated AOF without incomplete MULTI block. - create_aof { - append_to_aof [formatCommand incr foo] - append_to_aof [formatCommand incr foo] - append_to_aof [formatCommand incr foo] - append_to_aof [formatCommand incr foo] - append_to_aof [formatCommand incr foo] - append_to_aof [string range [formatCommand incr foo] 0 end-1] - } - - start_server_aof [list dir $server_path aof-load-truncated yes] { - test "Short read: Server should start if load-truncated is yes" { - assert_equal 1 [is_alive $srv] - } - - set client [redis [dict get $srv host] [dict get $srv port]] - - test "Truncated AOF loaded: we expect foo to be equal to 5" { - assert {[$client get foo] eq "5"} - } - - test "Append a new command after loading an incomplete AOF" { - $client incr foo - } - } - - # Now the AOF file is expected to be correct - start_server_aof [list dir $server_path aof-load-truncated yes] { - test "Short read + command: Server should start" { - assert_equal 1 [is_alive $srv] - } - - set client [redis [dict get $srv host] [dict get $srv port]] - - test "Truncated AOF loaded: we expect foo to be equal to 6 now" { - assert {[$client get foo] eq "6"} - } - } - - ## Test that the server exits when the AOF contains a format error - create_aof { - append_to_aof [formatCommand set foo hello] - append_to_aof "!!!" - append_to_aof [formatCommand set foo hello] - } - - start_server_aof [list dir $server_path aof-load-truncated yes] { - test "Bad format: Server should have logged an error" { - set pattern "*Bad file format reading the append only file*" - set retry 10 - while {$retry} { - set result [exec tail -n1 < [dict get $srv stdout]] - if {[string match $pattern $result]} { - break - } - incr retry -1 - after 1000 - } - if {$retry == 0} { - error "assertion:expected error not found on config file" - } - } - } - - ## Test the server doesn't start when the AOF contains an unfinished MULTI - create_aof { - append_to_aof [formatCommand set foo hello] - append_to_aof [formatCommand multi] - append_to_aof [formatCommand set bar world] - } - - start_server_aof [list dir $server_path aof-load-truncated no] { - test "Unfinished MULTI: Server should have logged an error" { - set pattern "*Unexpected end of file reading the append only file*" - set retry 10 - while {$retry} { - set result [exec tail -n1 < [dict get $srv stdout]] - if {[string match $pattern $result]} { - break - } - incr retry -1 - after 1000 - } - if {$retry == 0} { - error "assertion:expected error not found on config file" - } - } - } - - ## Test that the server exits when the AOF contains a short read - create_aof { - append_to_aof [formatCommand set foo hello] - append_to_aof [string range [formatCommand set bar world] 0 end-1] - } - - start_server_aof [list dir $server_path aof-load-truncated no] { - test "Short read: Server should have logged an error" { - set pattern "*Unexpected end of file reading the append only file*" - set retry 10 - while {$retry} { - set result [exec tail -n1 < [dict get $srv stdout]] - if {[string match $pattern $result]} { - break - } - incr retry -1 - after 1000 - } - if {$retry == 0} { - error "assertion:expected error not found on config file" - } - } - } - - ## Test that redis-check-aof indeed sees this AOF is not valid - test "Short read: Utility should confirm the AOF is not valid" { - catch { - exec src/redis-check-aof $aof_path - } result - assert_match "*not valid*" $result - } - - test "Short read: Utility should be able to fix the AOF" { - set result [exec src/redis-check-aof --fix $aof_path << "y\n"] - assert_match "*Successfully truncated AOF*" $result - } - - ## Test that the server can be started using the truncated AOF - start_server_aof [list dir $server_path aof-load-truncated no] { - test "Fixed AOF: Server should have been started" { - assert_equal 1 [is_alive $srv] - } - - test "Fixed AOF: Keyspace should contain values that were parseable" { - set client [redis [dict get $srv host] [dict get $srv port]] - wait_for_condition 50 100 { - [catch {$client ping} e] == 0 - } else { - fail "Loading DB is taking too much time." - } - assert_equal "hello" [$client get foo] - assert_equal "" [$client get bar] - } - } - - ## Test that SPOP (that modifies the client's argc/argv) is correctly free'd - create_aof { - append_to_aof [formatCommand sadd set foo] - append_to_aof [formatCommand sadd set bar] - append_to_aof [formatCommand spop set] - } - - start_server_aof [list dir $server_path aof-load-truncated no] { - test "AOF+SPOP: Server should have been started" { - assert_equal 1 [is_alive $srv] - } - - test "AOF+SPOP: Set should have 1 member" { - set client [redis [dict get $srv host] [dict get $srv port]] - wait_for_condition 50 100 { - [catch {$client ping} e] == 0 - } else { - fail "Loading DB is taking too much time." - } - assert_equal 1 [$client scard set] - } - } - - ## Test that EXPIREAT is loaded correctly - create_aof { - append_to_aof [formatCommand rpush list foo] - append_to_aof [formatCommand expireat list 1000] - append_to_aof [formatCommand rpush list bar] - } - - start_server_aof [list dir $server_path aof-load-truncated no] { - test "AOF+EXPIRE: Server should have been started" { - assert_equal 1 [is_alive $srv] - } - - test "AOF+EXPIRE: List should be empty" { - set client [redis [dict get $srv host] [dict get $srv port]] - wait_for_condition 50 100 { - [catch {$client ping} e] == 0 - } else { - fail "Loading DB is taking too much time." - } - assert_equal 0 [$client llen list] - } - } - - start_server {overrides {appendonly {yes} appendfilename {appendonly.aof}}} { - test {Redis should not try to convert DEL into EXPIREAT for EXPIRE -1} { - r set x 10 - r expire x -1 - } - } -} diff --git a/tools/pika_migrate/tests/integration/convert-zipmap-hash-on-load.tcl b/tools/pika_migrate/tests/integration/convert-zipmap-hash-on-load.tcl deleted file mode 100644 index cf3577f284..0000000000 --- a/tools/pika_migrate/tests/integration/convert-zipmap-hash-on-load.tcl +++ /dev/null @@ -1,35 +0,0 @@ -# Copy RDB with zipmap encoded hash to server path -set server_path [tmpdir "server.convert-zipmap-hash-on-load"] - -exec cp -f tests/assets/hash-zipmap.rdb $server_path -start_server [list overrides [list "dir" $server_path "dbfilename" "hash-zipmap.rdb"]] { - test "RDB load zipmap hash: converts to ziplist" { - r select 0 - - assert_match "*ziplist*" [r debug object hash] - assert_equal 2 [r hlen hash] - assert_match {v1 v2} [r hmget hash f1 f2] - } -} - -exec cp -f tests/assets/hash-zipmap.rdb $server_path -start_server [list overrides [list "dir" $server_path "dbfilename" "hash-zipmap.rdb" "hash-max-ziplist-entries" 1]] { - test "RDB load zipmap hash: converts to hash table when hash-max-ziplist-entries is exceeded" { - r select 0 - - assert_match "*hashtable*" [r debug object hash] - assert_equal 2 [r hlen hash] - assert_match {v1 v2} [r hmget hash f1 f2] - } -} - -exec cp -f tests/assets/hash-zipmap.rdb $server_path -start_server [list overrides [list "dir" $server_path "dbfilename" "hash-zipmap.rdb" "hash-max-ziplist-value" 1]] { - test "RDB load zipmap hash: converts to hash table when hash-max-ziplist-value is exceeded" { - r select 0 - - assert_match "*hashtable*" [r debug object hash] - assert_equal 2 [r hlen hash] - assert_match {v1 v2} [r hmget hash f1 f2] - } -} diff --git a/tools/pika_migrate/tests/integration/rdb.tcl b/tools/pika_migrate/tests/integration/rdb.tcl deleted file mode 100644 index 71876a6edc..0000000000 --- a/tools/pika_migrate/tests/integration/rdb.tcl +++ /dev/null @@ -1,98 +0,0 @@ -set server_path [tmpdir "server.rdb-encoding-test"] - -# Copy RDB with different encodings in server path -exec cp tests/assets/encodings.rdb $server_path - -start_server [list overrides [list "dir" $server_path "dbfilename" "encodings.rdb"]] { - test "RDB encoding loading test" { - r select 0 - csvdump r - } {"compressible","string","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" -"hash","hash","a","1","aa","10","aaa","100","b","2","bb","20","bbb","200","c","3","cc","30","ccc","300","ddd","400","eee","5000000000", -"hash_zipped","hash","a","1","b","2","c","3", -"list","list","1","2","3","a","b","c","100000","6000000000","1","2","3","a","b","c","100000","6000000000","1","2","3","a","b","c","100000","6000000000", -"list_zipped","list","1","2","3","a","b","c","100000","6000000000", -"number","string","10" -"set","set","1","100000","2","3","6000000000","a","b","c", -"set_zipped_1","set","1","2","3","4", -"set_zipped_2","set","100000","200000","300000","400000", -"set_zipped_3","set","1000000000","2000000000","3000000000","4000000000","5000000000","6000000000", -"string","string","Hello World" -"zset","zset","a","1","b","2","c","3","aa","10","bb","20","cc","30","aaa","100","bbb","200","ccc","300","aaaa","1000","cccc","123456789","bbbb","5000000000", -"zset_zipped","zset","a","1","b","2","c","3", -} -} - -set server_path [tmpdir "server.rdb-startup-test"] - -start_server [list overrides [list "dir" $server_path]] { - test {Server started empty with non-existing RDB file} { - r debug digest - } {0000000000000000000000000000000000000000} - # Save an RDB file, needed for the next test. - r save -} - -start_server [list overrides [list "dir" $server_path]] { - test {Server started empty with empty RDB file} { - r debug digest - } {0000000000000000000000000000000000000000} -} - -# Helper function to start a server and kill it, just to check the error -# logged. -set defaults {} -proc start_server_and_kill_it {overrides code} { - upvar defaults defaults srv srv server_path server_path - set config [concat $defaults $overrides] - set srv [start_server [list overrides $config]] - uplevel 1 $code - kill_server $srv -} - -# Make the RDB file unreadable -file attributes [file join $server_path dump.rdb] -permissions 0222 - -# Detect root account (it is able to read the file even with 002 perm) -set isroot 0 -catch { - open [file join $server_path dump.rdb] - set isroot 1 -} - -# Now make sure the server aborted with an error -if {!$isroot} { - start_server_and_kill_it [list "dir" $server_path] { - test {Server should not start if RDB file can't be open} { - wait_for_condition 50 100 { - [string match {*Fatal error loading*} \ - [exec tail -n1 < [dict get $srv stdout]]] - } else { - fail "Server started even if RDB was unreadable!" - } - } - } -} - -# Fix permissions of the RDB file. -file attributes [file join $server_path dump.rdb] -permissions 0666 - -# Corrupt its CRC64 checksum. -set filesize [file size [file join $server_path dump.rdb]] -set fd [open [file join $server_path dump.rdb] r+] -fconfigure $fd -translation binary -seek $fd -8 end -puts -nonewline $fd "foobar00"; # Corrupt the checksum -close $fd - -# Now make sure the server aborted with an error -start_server_and_kill_it [list "dir" $server_path] { - test {Server should not start if RDB is corrupted} { - wait_for_condition 50 100 { - [string match {*RDB checksum*} \ - [exec tail -n1 < [dict get $srv stdout]]] - } else { - fail "Server started even if RDB was corrupted!" - } - } -} diff --git a/tools/pika_migrate/tests/integration/redis-cli.tcl b/tools/pika_migrate/tests/integration/redis-cli.tcl deleted file mode 100644 index 40e4222e3e..0000000000 --- a/tools/pika_migrate/tests/integration/redis-cli.tcl +++ /dev/null @@ -1,208 +0,0 @@ -start_server {tags {"cli"}} { - proc open_cli {} { - set ::env(TERM) dumb - set fd [open [format "|src/redis-cli -p %d -n 9" [srv port]] "r+"] - fconfigure $fd -buffering none - fconfigure $fd -blocking false - fconfigure $fd -translation binary - assert_equal "redis> " [read_cli $fd] - set _ $fd - } - - proc close_cli {fd} { - close $fd - } - - proc read_cli {fd} { - set buf [read $fd] - while {[string length $buf] == 0} { - # wait some time and try again - after 10 - set buf [read $fd] - } - set _ $buf - } - - proc write_cli {fd buf} { - puts $fd $buf - flush $fd - } - - # Helpers to run tests in interactive mode - proc run_command {fd cmd} { - write_cli $fd $cmd - set lines [split [read_cli $fd] "\n"] - assert_equal "redis> " [lindex $lines end] - join [lrange $lines 0 end-1] "\n" - } - - proc test_interactive_cli {name code} { - set ::env(FAKETTY) 1 - set fd [open_cli] - test "Interactive CLI: $name" $code - close_cli $fd - unset ::env(FAKETTY) - } - - # Helpers to run tests where stdout is not a tty - proc write_tmpfile {contents} { - set tmp [tmpfile "cli"] - set tmpfd [open $tmp "w"] - puts -nonewline $tmpfd $contents - close $tmpfd - set _ $tmp - } - - proc _run_cli {opts args} { - set cmd [format "src/redis-cli -p %d -n 9 $args" [srv port]] - foreach {key value} $opts { - if {$key eq "pipe"} { - set cmd "sh -c \"$value | $cmd\"" - } - if {$key eq "path"} { - set cmd "$cmd < $value" - } - } - - set fd [open "|$cmd" "r"] - fconfigure $fd -buffering none - fconfigure $fd -translation binary - set resp [read $fd 1048576] - close $fd - set _ $resp - } - - proc run_cli {args} { - _run_cli {} {*}$args - } - - proc run_cli_with_input_pipe {cmd args} { - _run_cli [list pipe $cmd] {*}$args - } - - proc run_cli_with_input_file {path args} { - _run_cli [list path $path] {*}$args - } - - proc test_nontty_cli {name code} { - test "Non-interactive non-TTY CLI: $name" $code - } - - # Helpers to run tests where stdout is a tty (fake it) - proc test_tty_cli {name code} { - set ::env(FAKETTY) 1 - test "Non-interactive TTY CLI: $name" $code - unset ::env(FAKETTY) - } - - test_interactive_cli "INFO response should be printed raw" { - set lines [split [run_command $fd info] "\n"] - foreach line $lines { - assert [regexp {^[a-z0-9_]+:[a-z0-9_]+} $line] - } - } - - test_interactive_cli "Status reply" { - assert_equal "OK" [run_command $fd "set key foo"] - } - - test_interactive_cli "Integer reply" { - assert_equal "(integer) 1" [run_command $fd "incr counter"] - } - - test_interactive_cli "Bulk reply" { - r set key foo - assert_equal "\"foo\"" [run_command $fd "get key"] - } - - test_interactive_cli "Multi-bulk reply" { - r rpush list foo - r rpush list bar - assert_equal "1. \"foo\"\n2. \"bar\"" [run_command $fd "lrange list 0 -1"] - } - - test_interactive_cli "Parsing quotes" { - assert_equal "OK" [run_command $fd "set key \"bar\""] - assert_equal "bar" [r get key] - assert_equal "OK" [run_command $fd "set key \" bar \""] - assert_equal " bar " [r get key] - assert_equal "OK" [run_command $fd "set key \"\\\"bar\\\"\""] - assert_equal "\"bar\"" [r get key] - assert_equal "OK" [run_command $fd "set key \"\tbar\t\""] - assert_equal "\tbar\t" [r get key] - - # invalid quotation - assert_equal "Invalid argument(s)" [run_command $fd "get \"\"key"] - assert_equal "Invalid argument(s)" [run_command $fd "get \"key\"x"] - - # quotes after the argument are weird, but should be allowed - assert_equal "OK" [run_command $fd "set key\"\" bar"] - assert_equal "bar" [r get key] - } - - test_tty_cli "Status reply" { - assert_equal "OK\n" [run_cli set key bar] - assert_equal "bar" [r get key] - } - - test_tty_cli "Integer reply" { - r del counter - assert_equal "(integer) 1\n" [run_cli incr counter] - } - - test_tty_cli "Bulk reply" { - r set key "tab\tnewline\n" - assert_equal "\"tab\\tnewline\\n\"\n" [run_cli get key] - } - - test_tty_cli "Multi-bulk reply" { - r del list - r rpush list foo - r rpush list bar - assert_equal "1. \"foo\"\n2. \"bar\"\n" [run_cli lrange list 0 -1] - } - - test_tty_cli "Read last argument from pipe" { - assert_equal "OK\n" [run_cli_with_input_pipe "echo foo" set key] - assert_equal "foo\n" [r get key] - } - - test_tty_cli "Read last argument from file" { - set tmpfile [write_tmpfile "from file"] - assert_equal "OK\n" [run_cli_with_input_file $tmpfile set key] - assert_equal "from file" [r get key] - } - - test_nontty_cli "Status reply" { - assert_equal "OK" [run_cli set key bar] - assert_equal "bar" [r get key] - } - - test_nontty_cli "Integer reply" { - r del counter - assert_equal "1" [run_cli incr counter] - } - - test_nontty_cli "Bulk reply" { - r set key "tab\tnewline\n" - assert_equal "tab\tnewline\n" [run_cli get key] - } - - test_nontty_cli "Multi-bulk reply" { - r del list - r rpush list foo - r rpush list bar - assert_equal "foo\nbar" [run_cli lrange list 0 -1] - } - - test_nontty_cli "Read last argument from pipe" { - assert_equal "OK" [run_cli_with_input_pipe "echo foo" set key] - assert_equal "foo\n" [r get key] - } - - test_nontty_cli "Read last argument from file" { - set tmpfile [write_tmpfile "from file"] - assert_equal "OK" [run_cli_with_input_file $tmpfile set key] - assert_equal "from file" [r get key] - } -} diff --git a/tools/pika_migrate/tests/integration/replication-2.tcl b/tools/pika_migrate/tests/integration/replication-2.tcl deleted file mode 100644 index 9446e5cd91..0000000000 --- a/tools/pika_migrate/tests/integration/replication-2.tcl +++ /dev/null @@ -1,87 +0,0 @@ -start_server {tags {"repl"}} { - start_server {} { - test {First server should have role slave after SLAVEOF} { - r -1 slaveof [srv 0 host] [srv 0 port] - after 1000 - s -1 role - } {slave} - - test {If min-slaves-to-write is honored, write is accepted} { - r config set min-slaves-to-write 1 - r config set min-slaves-max-lag 10 - r set foo 12345 - wait_for_condition 50 100 { - [r -1 get foo] eq {12345} - } else { - fail "Write did not reached slave" - } - } - - test {No write if min-slaves-to-write is < attached slaves} { - r config set min-slaves-to-write 2 - r config set min-slaves-max-lag 10 - catch {r set foo 12345} err - set err - } {NOREPLICAS*} - - test {If min-slaves-to-write is honored, write is accepted (again)} { - r config set min-slaves-to-write 1 - r config set min-slaves-max-lag 10 - r set foo 12345 - wait_for_condition 50 100 { - [r -1 get foo] eq {12345} - } else { - fail "Write did not reached slave" - } - } - - test {No write if min-slaves-max-lag is > of the slave lag} { - r -1 deferred 1 - r config set min-slaves-to-write 1 - r config set min-slaves-max-lag 2 - r -1 debug sleep 6 - assert {[r set foo 12345] eq {OK}} - after 4000 - catch {r set foo 12345} err - assert {[r -1 read] eq {OK}} - r -1 deferred 0 - set err - } {NOREPLICAS*} - - test {min-slaves-to-write is ignored by slaves} { - r config set min-slaves-to-write 1 - r config set min-slaves-max-lag 10 - r -1 config set min-slaves-to-write 1 - r -1 config set min-slaves-max-lag 10 - r set foo aaabbb - wait_for_condition 50 100 { - [r -1 get foo] eq {aaabbb} - } else { - fail "Write did not reached slave" - } - } - - # Fix parameters for the next test to work - r config set min-slaves-to-write 0 - r -1 config set min-slaves-to-write 0 - r flushall - - test {MASTER and SLAVE dataset should be identical after complex ops} { - createComplexDataset r 10000 - after 500 - if {[r debug digest] ne [r -1 debug digest]} { - set csv1 [csvdump r] - set csv2 [csvdump {r -1}] - set fd [open /tmp/repldump1.txt w] - puts -nonewline $fd $csv1 - close $fd - set fd [open /tmp/repldump2.txt w] - puts -nonewline $fd $csv2 - close $fd - puts "Master - Slave inconsistency" - puts "Run diff -u against /tmp/repldump*.txt for more info" - } - assert_equal [r debug digest] [r -1 debug digest] - } - } -} diff --git a/tools/pika_migrate/tests/integration/replication-3.tcl b/tools/pika_migrate/tests/integration/replication-3.tcl deleted file mode 100644 index 0fcbad45b0..0000000000 --- a/tools/pika_migrate/tests/integration/replication-3.tcl +++ /dev/null @@ -1,101 +0,0 @@ -start_server {tags {"repl"}} { - start_server {} { - test {First server should have role slave after SLAVEOF} { - r -1 slaveof [srv 0 host] [srv 0 port] - wait_for_condition 50 100 { - [s -1 master_link_status] eq {up} - } else { - fail "Replication not started." - } - } - - if {$::accurate} {set numops 50000} else {set numops 5000} - - test {MASTER and SLAVE consistency with expire} { - createComplexDataset r $numops useexpire - after 4000 ;# Make sure everything expired before taking the digest - r keys * ;# Force DEL syntesizing to slave - after 1000 ;# Wait another second. Now everything should be fine. - if {[r debug digest] ne [r -1 debug digest]} { - set csv1 [csvdump r] - set csv2 [csvdump {r -1}] - set fd [open /tmp/repldump1.txt w] - puts -nonewline $fd $csv1 - close $fd - set fd [open /tmp/repldump2.txt w] - puts -nonewline $fd $csv2 - close $fd - puts "Master - Slave inconsistency" - puts "Run diff -u against /tmp/repldump*.txt for more info" - } - assert_equal [r debug digest] [r -1 debug digest] - } - } -} - -start_server {tags {"repl"}} { - start_server {} { - test {First server should have role slave after SLAVEOF} { - r -1 slaveof [srv 0 host] [srv 0 port] - wait_for_condition 50 100 { - [s -1 master_link_status] eq {up} - } else { - fail "Replication not started." - } - } - - set numops 20000 ;# Enough to trigger the Script Cache LRU eviction. - - # While we are at it, enable AOF to test it will be consistent as well - # after the test. - r config set appendonly yes - - test {MASTER and SLAVE consistency with EVALSHA replication} { - array set oldsha {} - for {set j 0} {$j < $numops} {incr j} { - set key "key:$j" - # Make sure to create scripts that have different SHA1s - set script "return redis.call('incr','$key')" - set sha1 [r eval "return redis.sha1hex(\"$script\")" 0] - set oldsha($j) $sha1 - r eval $script 0 - set res [r evalsha $sha1 0] - assert {$res == 2} - # Additionally call one of the old scripts as well, at random. - set res [r evalsha $oldsha([randomInt $j]) 0] - assert {$res > 2} - - # Trigger an AOF rewrite while we are half-way, this also - # forces the flush of the script cache, and we will cover - # more code as a result. - if {$j == $numops / 2} { - catch {r bgrewriteaof} - } - } - - wait_for_condition 50 100 { - [r dbsize] == $numops && - [r -1 dbsize] == $numops && - [r debug digest] eq [r -1 debug digest] - } else { - set csv1 [csvdump r] - set csv2 [csvdump {r -1}] - set fd [open /tmp/repldump1.txt w] - puts -nonewline $fd $csv1 - close $fd - set fd [open /tmp/repldump2.txt w] - puts -nonewline $fd $csv2 - close $fd - puts "Master - Slave inconsistency" - puts "Run diff -u against /tmp/repldump*.txt for more info" - - } - - set old_digest [r debug digest] - r config set appendonly no - r debug loadaof - set new_digest [r debug digest] - assert {$old_digest eq $new_digest} - } - } -} diff --git a/tools/pika_migrate/tests/integration/replication-4.tcl b/tools/pika_migrate/tests/integration/replication-4.tcl deleted file mode 100644 index 6db9ffe2bc..0000000000 --- a/tools/pika_migrate/tests/integration/replication-4.tcl +++ /dev/null @@ -1,136 +0,0 @@ -proc start_bg_complex_data {host port db ops} { - set tclsh [info nameofexecutable] - exec $tclsh tests/helpers/bg_complex_data.tcl $host $port $db $ops & -} - -proc stop_bg_complex_data {handle} { - catch {exec /bin/kill -9 $handle} -} - -start_server {tags {"repl"}} { - start_server {} { - - set master [srv -1 client] - set master_host [srv -1 host] - set master_port [srv -1 port] - set slave [srv 0 client] - - set load_handle0 [start_bg_complex_data $master_host $master_port 9 100000] - set load_handle1 [start_bg_complex_data $master_host $master_port 11 100000] - set load_handle2 [start_bg_complex_data $master_host $master_port 12 100000] - - test {First server should have role slave after SLAVEOF} { - $slave slaveof $master_host $master_port - after 1000 - s 0 role - } {slave} - - test {Test replication with parallel clients writing in differnet DBs} { - after 5000 - stop_bg_complex_data $load_handle0 - stop_bg_complex_data $load_handle1 - stop_bg_complex_data $load_handle2 - set retry 10 - while {$retry && ([$master debug digest] ne [$slave debug digest])}\ - { - after 1000 - incr retry -1 - } - assert {[$master dbsize] > 0} - - if {[$master debug digest] ne [$slave debug digest]} { - set csv1 [csvdump r] - set csv2 [csvdump {r -1}] - set fd [open /tmp/repldump1.txt w] - puts -nonewline $fd $csv1 - close $fd - set fd [open /tmp/repldump2.txt w] - puts -nonewline $fd $csv2 - close $fd - puts "Master - Slave inconsistency" - puts "Run diff -u against /tmp/repldump*.txt for more info" - } - assert_equal [r debug digest] [r -1 debug digest] - } - } -} - -start_server {tags {"repl"}} { - start_server {} { - set master [srv -1 client] - set master_host [srv -1 host] - set master_port [srv -1 port] - set slave [srv 0 client] - - test {First server should have role slave after SLAVEOF} { - $slave slaveof $master_host $master_port - wait_for_condition 50 100 { - [s 0 master_link_status] eq {up} - } else { - fail "Replication not started." - } - } - - test {With min-slaves-to-write (1,3): master should be writable} { - $master config set min-slaves-max-lag 3 - $master config set min-slaves-to-write 1 - $master set foo bar - } {OK} - - test {With min-slaves-to-write (2,3): master should not be writable} { - $master config set min-slaves-max-lag 3 - $master config set min-slaves-to-write 2 - catch {$master set foo bar} e - set e - } {NOREPLICAS*} - - test {With min-slaves-to-write: master not writable with lagged slave} { - $master config set min-slaves-max-lag 2 - $master config set min-slaves-to-write 1 - assert {[$master set foo bar] eq {OK}} - $slave deferred 1 - $slave debug sleep 6 - after 4000 - catch {$master set foo bar} e - set e - } {NOREPLICAS*} - } -} - -start_server {tags {"repl"}} { - start_server {} { - set master [srv -1 client] - set master_host [srv -1 host] - set master_port [srv -1 port] - set slave [srv 0 client] - - test {First server should have role slave after SLAVEOF} { - $slave slaveof $master_host $master_port - wait_for_condition 50 100 { - [s 0 role] eq {slave} - } else { - fail "Replication not started." - } - } - - test {Replication: commands with many arguments (issue #1221)} { - # We now issue large MSET commands, that may trigger a specific - # class of bugs, see issue #1221. - for {set j 0} {$j < 100} {incr j} { - set cmd [list mset] - for {set x 0} {$x < 1000} {incr x} { - lappend cmd [randomKey] [randomValue] - } - $master {*}$cmd - } - - set retry 10 - while {$retry && ([$master debug digest] ne [$slave debug digest])}\ - { - after 1000 - incr retry -1 - } - assert {[$master dbsize] > 0} - } - } -} diff --git a/tools/pika_migrate/tests/integration/replication-psync.tcl b/tools/pika_migrate/tests/integration/replication-psync.tcl deleted file mode 100644 index f131dafe31..0000000000 --- a/tools/pika_migrate/tests/integration/replication-psync.tcl +++ /dev/null @@ -1,115 +0,0 @@ -proc start_bg_complex_data {host port db ops} { - set tclsh [info nameofexecutable] - exec $tclsh tests/helpers/bg_complex_data.tcl $host $port $db $ops & -} - -proc stop_bg_complex_data {handle} { - catch {exec /bin/kill -9 $handle} -} - -# Creates a master-slave pair and breaks the link continuously to force -# partial resyncs attempts, all this while flooding the master with -# write queries. -# -# You can specifiy backlog size, ttl, delay before reconnection, test duration -# in seconds, and an additional condition to verify at the end. -proc test_psync {descr duration backlog_size backlog_ttl delay cond} { - start_server {tags {"repl"}} { - start_server {} { - - set master [srv -1 client] - set master_host [srv -1 host] - set master_port [srv -1 port] - set slave [srv 0 client] - - $master config set repl-backlog-size $backlog_size - $master config set repl-backlog-ttl $backlog_ttl - - set load_handle0 [start_bg_complex_data $master_host $master_port 9 100000] - set load_handle1 [start_bg_complex_data $master_host $master_port 11 100000] - set load_handle2 [start_bg_complex_data $master_host $master_port 12 100000] - - test {Slave should be able to synchronize with the master} { - $slave slaveof $master_host $master_port - wait_for_condition 50 100 { - [lindex [r role] 0] eq {slave} && - [lindex [r role] 3] eq {connected} - } else { - fail "Replication not started." - } - } - - # Check that the background clients are actually writing. - test {Detect write load to master} { - wait_for_condition 50 100 { - [$master dbsize] > 100 - } else { - fail "Can't detect write load from background clients." - } - } - - test "Test replication partial resync: $descr" { - # Now while the clients are writing data, break the maste-slave - # link multiple times. - for {set j 0} {$j < $duration*10} {incr j} { - after 100 - # catch {puts "MASTER [$master dbsize] keys, SLAVE [$slave dbsize] keys"} - - if {($j % 20) == 0} { - catch { - if {$delay} { - $slave multi - $slave client kill $master_host:$master_port - $slave debug sleep $delay - $slave exec - } else { - $slave client kill $master_host:$master_port - } - } - } - } - stop_bg_complex_data $load_handle0 - stop_bg_complex_data $load_handle1 - stop_bg_complex_data $load_handle2 - set retry 10 - while {$retry && ([$master debug digest] ne [$slave debug digest])}\ - { - after 1000 - incr retry -1 - } - assert {[$master dbsize] > 0} - - if {[$master debug digest] ne [$slave debug digest]} { - set csv1 [csvdump r] - set csv2 [csvdump {r -1}] - set fd [open /tmp/repldump1.txt w] - puts -nonewline $fd $csv1 - close $fd - set fd [open /tmp/repldump2.txt w] - puts -nonewline $fd $csv2 - close $fd - puts "Master - Slave inconsistency" - puts "Run diff -u against /tmp/repldump*.txt for more info" - } - assert_equal [r debug digest] [r -1 debug digest] - eval $cond - } - } - } -} - -test_psync {ok psync} 6 1000000 3600 0 { - assert {[s -1 sync_partial_ok] > 0} -} - -test_psync {no backlog} 6 100 3600 0.5 { - assert {[s -1 sync_partial_err] > 0} -} - -test_psync {ok after delay} 3 100000000 3600 3 { - assert {[s -1 sync_partial_ok] > 0} -} - -test_psync {backlog expired} 3 100000000 1 3 { - assert {[s -1 sync_partial_err] > 0} -} diff --git a/tools/pika_migrate/tests/integration/replication.tcl b/tools/pika_migrate/tests/integration/replication.tcl deleted file mode 100644 index bb907eba8e..0000000000 --- a/tools/pika_migrate/tests/integration/replication.tcl +++ /dev/null @@ -1,215 +0,0 @@ -start_server {tags {"repl"}} { - set A [srv 0 client] - set A_host [srv 0 host] - set A_port [srv 0 port] - start_server {} { - set B [srv 0 client] - set B_host [srv 0 host] - set B_port [srv 0 port] - - test {Set instance A as slave of B} { - $A slaveof $B_host $B_port - wait_for_condition 50 100 { - [lindex [$A role] 0] eq {slave} && - [string match {*master_link_status:up*} [$A info replication]] - } else { - fail "Can't turn the instance into a slave" - } - } - - test {BRPOPLPUSH replication, when blocking against empty list} { - set rd [redis_deferring_client] - $rd brpoplpush a b 5 - r lpush a foo - wait_for_condition 50 100 { - [$A debug digest] eq [$B debug digest] - } else { - fail "Master and slave have different digest: [$A debug digest] VS [$B debug digest]" - } - } - - test {BRPOPLPUSH replication, list exists} { - set rd [redis_deferring_client] - r lpush c 1 - r lpush c 2 - r lpush c 3 - $rd brpoplpush c d 5 - after 1000 - assert_equal [$A debug digest] [$B debug digest] - } - - test {BLPOP followed by role change, issue #2473} { - set rd [redis_deferring_client] - $rd blpop foo 0 ; # Block while B is a master - - # Turn B into master of A - $A slaveof no one - $B slaveof $A_host $A_port - wait_for_condition 50 100 { - [lindex [$B role] 0] eq {slave} && - [string match {*master_link_status:up*} [$B info replication]] - } else { - fail "Can't turn the instance into a slave" - } - - # Push elements into the "foo" list of the new slave. - # If the client is still attached to the instance, we'll get - # a desync between the two instances. - $A rpush foo a b c - after 100 - - wait_for_condition 50 100 { - [$A debug digest] eq [$B debug digest] && - [$A lrange foo 0 -1] eq {a b c} && - [$B lrange foo 0 -1] eq {a b c} - } else { - fail "Master and slave have different digest: [$A debug digest] VS [$B debug digest]" - } - } - } -} - -start_server {tags {"repl"}} { - r set mykey foo - - start_server {} { - test {Second server should have role master at first} { - s role - } {master} - - test {SLAVEOF should start with link status "down"} { - r slaveof [srv -1 host] [srv -1 port] - s master_link_status - } {down} - - test {The role should immediately be changed to "slave"} { - s role - } {slave} - - wait_for_sync r - test {Sync should have transferred keys from master} { - r get mykey - } {foo} - - test {The link status should be up} { - s master_link_status - } {up} - - test {SET on the master should immediately propagate} { - r -1 set mykey bar - - wait_for_condition 500 100 { - [r 0 get mykey] eq {bar} - } else { - fail "SET on master did not propagated on slave" - } - } - - test {FLUSHALL should replicate} { - r -1 flushall - if {$::valgrind} {after 2000} - list [r -1 dbsize] [r 0 dbsize] - } {0 0} - - test {ROLE in master reports master with a slave} { - set res [r -1 role] - lassign $res role offset slaves - assert {$role eq {master}} - assert {$offset > 0} - assert {[llength $slaves] == 1} - lassign [lindex $slaves 0] master_host master_port slave_offset - assert {$slave_offset <= $offset} - } - - test {ROLE in slave reports slave in connected state} { - set res [r role] - lassign $res role master_host master_port slave_state slave_offset - assert {$role eq {slave}} - assert {$slave_state eq {connected}} - } - } -} - -foreach dl {no yes} { - start_server {tags {"repl"}} { - set master [srv 0 client] - $master config set repl-diskless-sync $dl - set master_host [srv 0 host] - set master_port [srv 0 port] - set slaves {} - set load_handle0 [start_write_load $master_host $master_port 3] - set load_handle1 [start_write_load $master_host $master_port 5] - set load_handle2 [start_write_load $master_host $master_port 20] - set load_handle3 [start_write_load $master_host $master_port 8] - set load_handle4 [start_write_load $master_host $master_port 4] - start_server {} { - lappend slaves [srv 0 client] - start_server {} { - lappend slaves [srv 0 client] - start_server {} { - lappend slaves [srv 0 client] - test "Connect multiple slaves at the same time (issue #141), diskless=$dl" { - # Send SLAVEOF commands to slaves - [lindex $slaves 0] slaveof $master_host $master_port - [lindex $slaves 1] slaveof $master_host $master_port - [lindex $slaves 2] slaveof $master_host $master_port - - # Wait for all the three slaves to reach the "online" - # state from the POV of the master. - set retry 500 - while {$retry} { - set info [r -3 info] - if {[string match {*slave0:*state=online*slave1:*state=online*slave2:*state=online*} $info]} { - break - } else { - incr retry -1 - after 100 - } - } - if {$retry == 0} { - error "assertion:Slaves not correctly synchronized" - } - - # Wait that slaves acknowledge they are online so - # we are sure that DBSIZE and DEBUG DIGEST will not - # fail because of timing issues. - wait_for_condition 500 100 { - [lindex [[lindex $slaves 0] role] 3] eq {connected} && - [lindex [[lindex $slaves 1] role] 3] eq {connected} && - [lindex [[lindex $slaves 2] role] 3] eq {connected} - } else { - fail "Slaves still not connected after some time" - } - - # Stop the write load - stop_write_load $load_handle0 - stop_write_load $load_handle1 - stop_write_load $load_handle2 - stop_write_load $load_handle3 - stop_write_load $load_handle4 - - # Make sure that slaves and master have same - # number of keys - wait_for_condition 500 100 { - [$master dbsize] == [[lindex $slaves 0] dbsize] && - [$master dbsize] == [[lindex $slaves 1] dbsize] && - [$master dbsize] == [[lindex $slaves 2] dbsize] - } else { - fail "Different number of keys between masted and slave after too long time." - } - - # Check digests - set digest [$master debug digest] - set digest0 [[lindex $slaves 0] debug digest] - set digest1 [[lindex $slaves 1] debug digest] - set digest2 [[lindex $slaves 2] debug digest] - assert {$digest ne 0000000000000000000000000000000000000000} - assert {$digest eq $digest0} - assert {$digest eq $digest1} - assert {$digest eq $digest2} - } - } - } - } - } -} diff --git a/tools/pika_migrate/tests/sentinel/run.tcl b/tools/pika_migrate/tests/sentinel/run.tcl deleted file mode 100644 index f330299599..0000000000 --- a/tools/pika_migrate/tests/sentinel/run.tcl +++ /dev/null @@ -1,22 +0,0 @@ -# Sentinel test suite. Copyright (C) 2014 Salvatore Sanfilippo antirez@gmail.com -# This software is released under the BSD License. See the COPYING file for -# more information. - -cd tests/sentinel -source ../instances.tcl - -set ::instances_count 5 ; # How many instances we use at max. - -proc main {} { - parse_options - spawn_instance sentinel $::sentinel_base_port $::instances_count - spawn_instance redis $::redis_base_port $::instances_count - run_tests - cleanup -} - -if {[catch main e]} { - puts $::errorInfo - cleanup - exit 1 -} diff --git a/tools/pika_migrate/tests/sentinel/tests/00-base.tcl b/tools/pika_migrate/tests/sentinel/tests/00-base.tcl deleted file mode 100644 index a79d0c371c..0000000000 --- a/tools/pika_migrate/tests/sentinel/tests/00-base.tcl +++ /dev/null @@ -1,126 +0,0 @@ -# Check the basic monitoring and failover capabilities. - -source "../tests/includes/init-tests.tcl" - -if {$::simulate_error} { - test "This test will fail" { - fail "Simulated error" - } -} - -test "Basic failover works if the master is down" { - set old_port [RI $master_id tcp_port] - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - assert {[lindex $addr 1] == $old_port} - kill_instance redis $master_id - foreach_sentinel_id id { - wait_for_condition 1000 50 { - [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port - } else { - fail "At least one Sentinel did not received failover info" - } - } - restart_instance redis $master_id - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - set master_id [get_instance_id_by_port redis [lindex $addr 1]] -} - -test "New master [join $addr {:}] role matches" { - assert {[RI $master_id role] eq {master}} -} - -test "All the other slaves now point to the new master" { - foreach_redis_id id { - if {$id != $master_id && $id != 0} { - wait_for_condition 1000 50 { - [RI $id master_port] == [lindex $addr 1] - } else { - fail "Redis ID $id not configured to replicate with new master" - } - } - } -} - -test "The old master eventually gets reconfigured as a slave" { - wait_for_condition 1000 50 { - [RI 0 master_port] == [lindex $addr 1] - } else { - fail "Old master not reconfigured as slave of new master" - } -} - -test "ODOWN is not possible without N (quorum) Sentinels reports" { - foreach_sentinel_id id { - S $id SENTINEL SET mymaster quorum [expr $sentinels+1] - } - set old_port [RI $master_id tcp_port] - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - assert {[lindex $addr 1] == $old_port} - kill_instance redis $master_id - - # Make sure failover did not happened. - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - assert {[lindex $addr 1] == $old_port} - restart_instance redis $master_id -} - -test "Failover is not possible without majority agreement" { - foreach_sentinel_id id { - S $id SENTINEL SET mymaster quorum $quorum - } - - # Crash majority of sentinels - for {set id 0} {$id < $quorum} {incr id} { - kill_instance sentinel $id - } - - # Kill the current master - kill_instance redis $master_id - - # Make sure failover did not happened. - set addr [S $quorum SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - assert {[lindex $addr 1] == $old_port} - restart_instance redis $master_id - - # Cleanup: restart Sentinels to monitor the master. - for {set id 0} {$id < $quorum} {incr id} { - restart_instance sentinel $id - } -} - -test "Failover works if we configure for absolute agreement" { - foreach_sentinel_id id { - S $id SENTINEL SET mymaster quorum $sentinels - } - - # Wait for Sentinels to monitor the master again - foreach_sentinel_id id { - wait_for_condition 1000 50 { - [dict get [S $id SENTINEL MASTER mymaster] info-refresh] < 100000 - } else { - fail "At least one Sentinel is not monitoring the master" - } - } - - kill_instance redis $master_id - - foreach_sentinel_id id { - wait_for_condition 1000 50 { - [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port - } else { - fail "At least one Sentinel did not received failover info" - } - } - restart_instance redis $master_id - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - set master_id [get_instance_id_by_port redis [lindex $addr 1]] - - # Set the min ODOWN agreement back to strict majority. - foreach_sentinel_id id { - S $id SENTINEL SET mymaster quorum $quorum - } -} - -test "New master [join $addr {:}] role matches" { - assert {[RI $master_id role] eq {master}} -} diff --git a/tools/pika_migrate/tests/sentinel/tests/01-conf-update.tcl b/tools/pika_migrate/tests/sentinel/tests/01-conf-update.tcl deleted file mode 100644 index 4998104d2f..0000000000 --- a/tools/pika_migrate/tests/sentinel/tests/01-conf-update.tcl +++ /dev/null @@ -1,39 +0,0 @@ -# Test Sentinel configuration consistency after partitions heal. - -source "../tests/includes/init-tests.tcl" - -test "We can failover with Sentinel 1 crashed" { - set old_port [RI $master_id tcp_port] - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - assert {[lindex $addr 1] == $old_port} - - # Crash Sentinel 1 - kill_instance sentinel 1 - - kill_instance redis $master_id - foreach_sentinel_id id { - if {$id != 1} { - wait_for_condition 1000 50 { - [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port - } else { - fail "Sentinel $id did not received failover info" - } - } - } - restart_instance redis $master_id - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - set master_id [get_instance_id_by_port redis [lindex $addr 1]] -} - -test "After Sentinel 1 is restarted, its config gets updated" { - restart_instance sentinel 1 - wait_for_condition 1000 50 { - [lindex [S 1 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port - } else { - fail "Restarted Sentinel did not received failover info" - } -} - -test "New master [join $addr {:}] role matches" { - assert {[RI $master_id role] eq {master}} -} diff --git a/tools/pika_migrate/tests/sentinel/tests/02-slaves-reconf.tcl b/tools/pika_migrate/tests/sentinel/tests/02-slaves-reconf.tcl deleted file mode 100644 index fa15d2efde..0000000000 --- a/tools/pika_migrate/tests/sentinel/tests/02-slaves-reconf.tcl +++ /dev/null @@ -1,84 +0,0 @@ -# Check that slaves are reconfigured at a latter time if they are partitioned. -# -# Here we should test: -# 1) That slaves point to the new master after failover. -# 2) That partitioned slaves point to new master when they are partitioned -# away during failover and return at a latter time. - -source "../tests/includes/init-tests.tcl" - -proc 02_test_slaves_replication {} { - uplevel 1 { - test "Check that slaves replicate from current master" { - set master_port [RI $master_id tcp_port] - foreach_redis_id id { - if {$id == $master_id} continue - if {[instance_is_killed redis $id]} continue - wait_for_condition 1000 50 { - ([RI $id master_port] == $master_port) && - ([RI $id master_link_status] eq {up}) - } else { - fail "Redis slave $id is replicating from wrong master" - } - } - } - } -} - -proc 02_crash_and_failover {} { - uplevel 1 { - test "Crash the master and force a failover" { - set old_port [RI $master_id tcp_port] - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - assert {[lindex $addr 1] == $old_port} - kill_instance redis $master_id - foreach_sentinel_id id { - wait_for_condition 1000 50 { - [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port - } else { - fail "At least one Sentinel did not received failover info" - } - } - restart_instance redis $master_id - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - set master_id [get_instance_id_by_port redis [lindex $addr 1]] - } - } -} - -02_test_slaves_replication -02_crash_and_failover -02_test_slaves_replication - -test "Kill a slave instance" { - foreach_redis_id id { - if {$id == $master_id} continue - set killed_slave_id $id - kill_instance redis $id - break - } -} - -02_crash_and_failover -02_test_slaves_replication - -test "Wait for failover to end" { - set inprogress 1 - while {$inprogress} { - set inprogress 0 - foreach_sentinel_id id { - if {[dict exists [S $id SENTINEL MASTER mymaster] failover-state]} { - incr inprogress - } - } - if {$inprogress} {after 100} - } -} - -test "Restart killed slave and test replication of slaves again..." { - restart_instance redis $killed_slave_id -} - -# Now we check if the slave rejoining the partition is reconfigured even -# if the failover finished. -02_test_slaves_replication diff --git a/tools/pika_migrate/tests/sentinel/tests/03-runtime-reconf.tcl b/tools/pika_migrate/tests/sentinel/tests/03-runtime-reconf.tcl deleted file mode 100644 index 426596c37e..0000000000 --- a/tools/pika_migrate/tests/sentinel/tests/03-runtime-reconf.tcl +++ /dev/null @@ -1 +0,0 @@ -# Test runtime reconfiguration command SENTINEL SET. diff --git a/tools/pika_migrate/tests/sentinel/tests/04-slave-selection.tcl b/tools/pika_migrate/tests/sentinel/tests/04-slave-selection.tcl deleted file mode 100644 index 3d2ca64845..0000000000 --- a/tools/pika_migrate/tests/sentinel/tests/04-slave-selection.tcl +++ /dev/null @@ -1,5 +0,0 @@ -# Test slave selection algorithm. -# -# This unit should test: -# 1) That when there are no suitable slaves no failover is performed. -# 2) That among the available slaves, the one with better offset is picked. diff --git a/tools/pika_migrate/tests/sentinel/tests/05-manual.tcl b/tools/pika_migrate/tests/sentinel/tests/05-manual.tcl deleted file mode 100644 index 1a60d814b3..0000000000 --- a/tools/pika_migrate/tests/sentinel/tests/05-manual.tcl +++ /dev/null @@ -1,44 +0,0 @@ -# Test manual failover - -source "../tests/includes/init-tests.tcl" - -test "Manual failover works" { - set old_port [RI $master_id tcp_port] - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - assert {[lindex $addr 1] == $old_port} - S 0 SENTINEL FAILOVER mymaster - foreach_sentinel_id id { - wait_for_condition 1000 50 { - [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port - } else { - fail "At least one Sentinel did not received failover info" - } - } - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - set master_id [get_instance_id_by_port redis [lindex $addr 1]] -} - -test "New master [join $addr {:}] role matches" { - assert {[RI $master_id role] eq {master}} -} - -test "All the other slaves now point to the new master" { - foreach_redis_id id { - if {$id != $master_id && $id != 0} { - wait_for_condition 1000 50 { - [RI $id master_port] == [lindex $addr 1] - } else { - fail "Redis ID $id not configured to replicate with new master" - } - } - } -} - -test "The old master eventually gets reconfigured as a slave" { - wait_for_condition 1000 50 { - [RI 0 master_port] == [lindex $addr 1] - } else { - fail "Old master not reconfigured as slave of new master" - } -} - diff --git a/tools/pika_migrate/tests/sentinel/tests/includes/init-tests.tcl b/tools/pika_migrate/tests/sentinel/tests/includes/init-tests.tcl deleted file mode 100644 index c8165dcfa9..0000000000 --- a/tools/pika_migrate/tests/sentinel/tests/includes/init-tests.tcl +++ /dev/null @@ -1,72 +0,0 @@ -# Initialization tests -- most units will start including this. - -test "(init) Restart killed instances" { - foreach type {redis sentinel} { - foreach_${type}_id id { - if {[get_instance_attrib $type $id pid] == -1} { - puts -nonewline "$type/$id " - flush stdout - restart_instance $type $id - } - } - } -} - -test "(init) Remove old master entry from sentinels" { - foreach_sentinel_id id { - catch {S $id SENTINEL REMOVE mymaster} - } -} - -set redis_slaves 4 -test "(init) Create a master-slaves cluster of [expr $redis_slaves+1] instances" { - create_redis_master_slave_cluster [expr {$redis_slaves+1}] -} -set master_id 0 - -test "(init) Sentinels can start monitoring a master" { - set sentinels [llength $::sentinel_instances] - set quorum [expr {$sentinels/2+1}] - foreach_sentinel_id id { - S $id SENTINEL MONITOR mymaster \ - [get_instance_attrib redis $master_id host] \ - [get_instance_attrib redis $master_id port] $quorum - } - foreach_sentinel_id id { - assert {[S $id sentinel master mymaster] ne {}} - S $id SENTINEL SET mymaster down-after-milliseconds 2000 - S $id SENTINEL SET mymaster failover-timeout 20000 - S $id SENTINEL SET mymaster parallel-syncs 10 - } -} - -test "(init) Sentinels can talk with the master" { - foreach_sentinel_id id { - wait_for_condition 1000 50 { - [catch {S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster}] == 0 - } else { - fail "Sentinel $id can't talk with the master." - } - } -} - -test "(init) Sentinels are able to auto-discover other sentinels" { - set sentinels [llength $::sentinel_instances] - foreach_sentinel_id id { - wait_for_condition 1000 50 { - [dict get [S $id SENTINEL MASTER mymaster] num-other-sentinels] == ($sentinels-1) - } else { - fail "At least some sentinel can't detect some other sentinel" - } - } -} - -test "(init) Sentinels are able to auto-discover slaves" { - foreach_sentinel_id id { - wait_for_condition 1000 50 { - [dict get [S $id SENTINEL MASTER mymaster] num-slaves] == $redis_slaves - } else { - fail "At least some sentinel can't detect some slave" - } - } -} diff --git a/tools/pika_migrate/tests/sentinel/tmp/.gitignore b/tools/pika_migrate/tests/sentinel/tmp/.gitignore deleted file mode 100644 index f581f73e2d..0000000000 --- a/tools/pika_migrate/tests/sentinel/tmp/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -redis_* -sentinel_* diff --git a/tools/pika_migrate/tests/support/redis.tcl b/tools/pika_migrate/tests/support/redis.tcl deleted file mode 100644 index 7c78360812..0000000000 --- a/tools/pika_migrate/tests/support/redis.tcl +++ /dev/null @@ -1,294 +0,0 @@ -# Tcl clinet library - used by test-redis.tcl script for now -# Copyright (C) 2009 Salvatore Sanfilippo -# Released under the BSD license like Redis itself -# -# Example usage: -# -# set r [redis 127.0.0.1 6379] -# $r lpush mylist foo -# $r lpush mylist bar -# $r lrange mylist 0 -1 -# $r close -# -# Non blocking usage example: -# -# proc handlePong {r type reply} { -# puts "PONG $type '$reply'" -# if {$reply ne "PONG"} { -# $r ping [list handlePong] -# } -# } -# -# set r [redis] -# $r blocking 0 -# $r get fo [list handlePong] -# -# vwait forever - -package require Tcl 8.5 -package provide redis 0.1 - -namespace eval redis {} -set ::redis::id 0 -array set ::redis::fd {} -array set ::redis::addr {} -array set ::redis::blocking {} -array set ::redis::deferred {} -array set ::redis::reconnect {} -array set ::redis::callback {} -array set ::redis::state {} ;# State in non-blocking reply reading -array set ::redis::statestack {} ;# Stack of states, for nested mbulks - -proc redis {{server 127.0.0.1} {port 6379} {defer 0}} { - set fd [socket $server $port] - fconfigure $fd -translation binary - set id [incr ::redis::id] - set ::redis::fd($id) $fd - set ::redis::addr($id) [list $server $port] - set ::redis::blocking($id) 1 - set ::redis::deferred($id) $defer - set ::redis::reconnect($id) 0 - ::redis::redis_reset_state $id - interp alias {} ::redis::redisHandle$id {} ::redis::__dispatch__ $id -} - -# This is a wrapper to the actual dispatching procedure that handles -# reconnection if needed. -proc ::redis::__dispatch__ {id method args} { - set errorcode [catch {::redis::__dispatch__raw__ $id $method $args} retval] - if {$errorcode && $::redis::reconnect($id) && $::redis::fd($id) eq {}} { - # Try again if the connection was lost. - # FIXME: we don't re-select the previously selected DB, nor we check - # if we are inside a transaction that needs to be re-issued from - # scratch. - set errorcode [catch {::redis::__dispatch__raw__ $id $method $args} retval] - } - return -code $errorcode $retval -} - -proc ::redis::__dispatch__raw__ {id method argv} { - set fd $::redis::fd($id) - - # Reconnect the link if needed. - if {$fd eq {}} { - lassign $::redis::addr($id) host port - set ::redis::fd($id) [socket $host $port] - fconfigure $::redis::fd($id) -translation binary - set fd $::redis::fd($id) - } - - set blocking $::redis::blocking($id) - set deferred $::redis::deferred($id) - if {$blocking == 0} { - if {[llength $argv] == 0} { - error "Please provide a callback in non-blocking mode" - } - set callback [lindex $argv end] - set argv [lrange $argv 0 end-1] - } - if {[info command ::redis::__method__$method] eq {}} { - set cmd "*[expr {[llength $argv]+1}]\r\n" - append cmd "$[string length $method]\r\n$method\r\n" - foreach a $argv { - append cmd "$[string length $a]\r\n$a\r\n" - } - ::redis::redis_write $fd $cmd - if {[catch {flush $fd}]} { - set ::redis::fd($id) {} - return -code error "I/O error reading reply" - } - - if {!$deferred} { - if {$blocking} { - ::redis::redis_read_reply $id $fd - } else { - # Every well formed reply read will pop an element from this - # list and use it as a callback. So pipelining is supported - # in non blocking mode. - lappend ::redis::callback($id) $callback - fileevent $fd readable [list ::redis::redis_readable $fd $id] - } - } - } else { - uplevel 1 [list ::redis::__method__$method $id $fd] $argv - } -} - -proc ::redis::__method__blocking {id fd val} { - set ::redis::blocking($id) $val - fconfigure $fd -blocking $val -} - -proc ::redis::__method__reconnect {id fd val} { - set ::redis::reconnect($id) $val -} - -proc ::redis::__method__read {id fd} { - ::redis::redis_read_reply $id $fd -} - -proc ::redis::__method__write {id fd buf} { - ::redis::redis_write $fd $buf -} - -proc ::redis::__method__flush {id fd} { - flush $fd -} - -proc ::redis::__method__close {id fd} { - catch {close $fd} - catch {unset ::redis::fd($id)} - catch {unset ::redis::addr($id)} - catch {unset ::redis::blocking($id)} - catch {unset ::redis::deferred($id)} - catch {unset ::redis::reconnect($id)} - catch {unset ::redis::state($id)} - catch {unset ::redis::statestack($id)} - catch {unset ::redis::callback($id)} - catch {interp alias {} ::redis::redisHandle$id {}} -} - -proc ::redis::__method__channel {id fd} { - return $fd -} - -proc ::redis::__method__deferred {id fd val} { - set ::redis::deferred($id) $val -} - -proc ::redis::redis_write {fd buf} { - puts -nonewline $fd $buf -} - -proc ::redis::redis_writenl {fd buf} { - redis_write $fd $buf - redis_write $fd "\r\n" - flush $fd -} - -proc ::redis::redis_readnl {fd len} { - set buf [read $fd $len] - read $fd 2 ; # discard CR LF - return $buf -} - -proc ::redis::redis_bulk_read {fd} { - set count [redis_read_line $fd] - if {$count == -1} return {} - set buf [redis_readnl $fd $count] - return $buf -} - -proc ::redis::redis_multi_bulk_read {id fd} { - set count [redis_read_line $fd] - if {$count == -1} return {} - set l {} - set err {} - for {set i 0} {$i < $count} {incr i} { - if {[catch { - lappend l [redis_read_reply $id $fd] - } e] && $err eq {}} { - set err $e - } - } - if {$err ne {}} {return -code error $err} - return $l -} - -proc ::redis::redis_read_line fd { - string trim [gets $fd] -} - -proc ::redis::redis_read_reply {id fd} { - set type [read $fd 1] - switch -exact -- $type { - : - - + {redis_read_line $fd} - - {return -code error [redis_read_line $fd]} - $ {redis_bulk_read $fd} - * {redis_multi_bulk_read $id $fd} - default { - if {$type eq {}} { - set ::redis::fd($id) {} - return -code error "I/O error reading reply" - } - return -code error "Bad protocol, '$type' as reply type byte" - } - } -} - -proc ::redis::redis_reset_state id { - set ::redis::state($id) [dict create buf {} mbulk -1 bulk -1 reply {}] - set ::redis::statestack($id) {} -} - -proc ::redis::redis_call_callback {id type reply} { - set cb [lindex $::redis::callback($id) 0] - set ::redis::callback($id) [lrange $::redis::callback($id) 1 end] - uplevel #0 $cb [list ::redis::redisHandle$id $type $reply] - ::redis::redis_reset_state $id -} - -# Read a reply in non-blocking mode. -proc ::redis::redis_readable {fd id} { - if {[eof $fd]} { - redis_call_callback $id eof {} - ::redis::__method__close $id $fd - return - } - if {[dict get $::redis::state($id) bulk] == -1} { - set line [gets $fd] - if {$line eq {}} return ;# No complete line available, return - switch -exact -- [string index $line 0] { - : - - + {redis_call_callback $id reply [string range $line 1 end-1]} - - {redis_call_callback $id err [string range $line 1 end-1]} - $ { - dict set ::redis::state($id) bulk \ - [expr [string range $line 1 end-1]+2] - if {[dict get $::redis::state($id) bulk] == 1} { - # We got a $-1, hack the state to play well with this. - dict set ::redis::state($id) bulk 2 - dict set ::redis::state($id) buf "\r\n" - ::redis::redis_readable $fd $id - } - } - * { - dict set ::redis::state($id) mbulk [string range $line 1 end-1] - # Handle *-1 - if {[dict get $::redis::state($id) mbulk] == -1} { - redis_call_callback $id reply {} - } - } - default { - redis_call_callback $id err \ - "Bad protocol, $type as reply type byte" - } - } - } else { - set totlen [dict get $::redis::state($id) bulk] - set buflen [string length [dict get $::redis::state($id) buf]] - set toread [expr {$totlen-$buflen}] - set data [read $fd $toread] - set nread [string length $data] - dict append ::redis::state($id) buf $data - # Check if we read a complete bulk reply - if {[string length [dict get $::redis::state($id) buf]] == - [dict get $::redis::state($id) bulk]} { - if {[dict get $::redis::state($id) mbulk] == -1} { - redis_call_callback $id reply \ - [string range [dict get $::redis::state($id) buf] 0 end-2] - } else { - dict with ::redis::state($id) { - lappend reply [string range $buf 0 end-2] - incr mbulk -1 - set bulk -1 - } - if {[dict get $::redis::state($id) mbulk] == 0} { - redis_call_callback $id reply \ - [dict get $::redis::state($id) reply] - } - } - } - } -} diff --git a/tools/pika_migrate/tests/support/server.tcl b/tools/pika_migrate/tests/support/server.tcl deleted file mode 100644 index c7777fe5d3..0000000000 --- a/tools/pika_migrate/tests/support/server.tcl +++ /dev/null @@ -1,337 +0,0 @@ -set ::global_overrides {} -set ::tags {} -set ::valgrind_errors {} - -proc start_server_error {config_file error} { - set err {} - append err "Cant' start the Redis server\n" - append err "CONFIGURATION:" - append err [exec cat $config_file] - append err "\nERROR:" - append err [string trim $error] - send_data_packet $::test_server_fd err $err -} - -proc check_valgrind_errors stderr { - set fd [open $stderr] - set buf [read $fd] - close $fd - - if {[regexp -- { at 0x} $buf] || - (![regexp -- {definitely lost: 0 bytes} $buf] && - ![regexp -- {no leaks are possible} $buf])} { - send_data_packet $::test_server_fd err "Valgrind error: $buf\n" - } -} - -proc kill_server config { - # nothing to kill when running against external server - if {$::external} return - - # nevermind if its already dead - if {![is_alive $config]} { return } - set pid [dict get $config pid] - - # check for leaks - if {![dict exists $config "skipleaks"]} { - catch { - if {[string match {*Darwin*} [exec uname -a]]} { - tags {"leaks"} { - test "Check for memory leaks (pid $pid)" { - set output {0 leaks} - catch {exec leaks $pid} output - if {[string match {*process does not exist*} $output] || - [string match {*cannot examine*} $output]} { - # In a few tests we kill the server process. - set output "0 leaks" - } - set output - } {*0 leaks*} - } - } - } - } - - # kill server and wait for the process to be totally exited - catch {exec kill $pid} - while {[is_alive $config]} { - incr wait 10 - - if {$wait >= 5000} { - puts "Forcing process $pid to exit..." - catch {exec kill -KILL $pid} - } elseif {$wait % 1000 == 0} { - puts "Waiting for process $pid to exit..." - } - after 10 - } - - # Check valgrind errors if needed - if {$::valgrind} { - check_valgrind_errors [dict get $config stderr] - } - - # Remove this pid from the set of active pids in the test server. - send_data_packet $::test_server_fd server-killed $pid -} - -proc is_alive config { - set pid [dict get $config pid] - if {[catch {exec ps -p $pid} err]} { - return 0 - } else { - return 1 - } -} - -proc ping_server {host port} { - set retval 0 - if {[catch { - set fd [socket $host $port] - fconfigure $fd -translation binary - puts $fd "PING\r\n" - flush $fd - set reply [gets $fd] - if {[string range $reply 0 0] eq {+} || - [string range $reply 0 0] eq {-}} { - set retval 1 - } - close $fd - } e]} { - if {$::verbose} { - puts -nonewline "." - } - } else { - if {$::verbose} { - puts -nonewline "ok" - } - } - return $retval -} - -# Return 1 if the server at the specified addr is reachable by PING, otherwise -# returns 0. Performs a try every 50 milliseconds for the specified number -# of retries. -proc server_is_up {host port retrynum} { - after 10 ;# Use a small delay to make likely a first-try success. - set retval 0 - while {[incr retrynum -1]} { - if {[catch {ping_server $host $port} ping]} { - set ping 0 - } - if {$ping} {return 1} - after 50 - } - return 0 -} - -# doesn't really belong here, but highly coupled to code in start_server -proc tags {tags code} { - set ::tags [concat $::tags $tags] - uplevel 1 $code - set ::tags [lrange $::tags 0 end-[llength $tags]] -} - -proc start_server {options {code undefined}} { - # If we are running against an external server, we just push the - # host/port pair in the stack the first time - if {$::external} { - if {[llength $::servers] == 0} { - set srv {} - dict set srv "host" $::host - dict set srv "port" $::port - set client [redis $::host $::port] - dict set srv "client" $client - $client select 9 - - # append the server to the stack - lappend ::servers $srv - } - uplevel 1 $code - return - } - - # setup defaults - set baseconfig "default.conf" - set overrides {} - set tags {} - - # parse options - foreach {option value} $options { - switch $option { - "config" { - set baseconfig $value } - "overrides" { - set overrides $value } - "tags" { - set tags $value - set ::tags [concat $::tags $value] } - default { - error "Unknown option $option" } - } - } - - set data [split [exec cat "tests/assets/$baseconfig"] "\n"] - set config {} - foreach line $data { - if {[string length $line] > 0 && [string index $line 0] ne "#"} { - set elements [split $line " "] - set directive [lrange $elements 0 0] - set arguments [lrange $elements 1 end] - dict set config $directive $arguments - } - } - - # use a different directory every time a server is started - dict set config dir [tmpdir server] - - # start every server on a different port - set ::port [find_available_port [expr {$::port+1}]] - dict set config port $::port - - # apply overrides from global space and arguments - foreach {directive arguments} [concat $::global_overrides $overrides] { - dict set config $directive $arguments - } - - # write new configuration to temporary file - set config_file [tmpfile redis.conf] - set fp [open $config_file w+] - foreach directive [dict keys $config] { - if {$directive == "port"} { - puts -nonewline $fp "$directive : " - puts $fp [dict get $config $directive] - } elseif {$directive == "requirepass"} { - puts $fp "$directive :" - } elseif {$directive == "dump_prefix"} { - puts $fp "$directive :" - } else { - puts -nonewline $fp "$directive " - puts $fp [dict get $config $directive] - } - } - close $fp - - set stdout [format "%s/%s" [dict get $config "dir"] "stdout"] - set stderr [format "%s/%s" [dict get $config "dir"] "stderr"] - - if {$::valgrind} { - set pid [exec valgrind --suppressions=src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full src/redis-server $config_file > $stdout 2> $stderr &] - } else { - set pid [exec src/redis-server -c $config_file > $stdout 2> $stderr &] - #set pid [exec src/redis-server $config_file > $stdout 2> $stderr &] - } - - puts "Starting ---- " - - # Tell the test server about this new instance. - send_data_packet $::test_server_fd server-spawned $pid - - # check that the server actually started - # ugly but tries to be as fast as possible... - if {$::valgrind} {set retrynum 1000} else {set retrynum 100} - - if {$::verbose} { - puts -nonewline "=== ($tags) Starting server ${::host}:${::port} " - } - - if {$code ne "undefined"} { - set serverisup [server_is_up $::host $::port $retrynum] - } else { - set serverisup 1 - } - - if {$::verbose} { - puts "" - } - - if {!$serverisup} { - set err {} - append err [exec cat $stdout] "\n" [exec cat $stderr] - start_server_error $config_file $err - return - } - - puts "Before Wait" - # Wait for actual startup - #while {![info exists _pid]} { - # regexp {PID:\s(\d+)} [exec cat $stdout] _ _pid - # after 100 - #} - puts "After Wait" - - # setup properties to be able to initialize a client object - set host $::host - set port $::port - if {[dict exists $config bind]} { set host [dict get $config bind] } - if {[dict exists $config port]} { set port [dict get $config port] } - - # setup config dict - dict set srv "config_file" $config_file - dict set srv "config" $config - dict set srv "pid" $pid - dict set srv "host" $host - dict set srv "port" $port - dict set srv "stdout" $stdout - dict set srv "stderr" $stderr - - # if a block of code is supplied, we wait for the server to become - # available, create a client object and kill the server afterwards - if {$code ne "undefined"} { - set line [exec head -n1 $stdout] - if {[string match {*already in use*} $line]} { - error_and_quit $config_file $line - } - - while 1 { - # check that the server actually started and is ready for connections - if {[exec grep "going to start" | wc -l < $stderr] > 0} { - break - } - puts "Fuck YYB" - after 10 - } - - # append the server to the stack - lappend ::servers $srv - - # connect client (after server dict is put on the stack) - reconnect - - # execute provided block - set num_tests $::num_tests - if {[catch { uplevel 1 $code } error]} { - set backtrace $::errorInfo - - # Kill the server without checking for leaks - dict set srv "skipleaks" 1 - kill_server $srv - - # Print warnings from log - puts [format "\nLogged warnings (pid %d):" [dict get $srv "pid"]] - set warnings [warnings_from_file [dict get $srv "stdout"]] - if {[string length $warnings] > 0} { - puts "$warnings" - } else { - puts "(none)" - } - puts "" - - error $error $backtrace - } - - # Don't do the leak check when no tests were run - if {$num_tests == $::num_tests} { - dict set srv "skipleaks" 1 - } - - # pop the server object - set ::servers [lrange $::servers 0 end-1] - - set ::tags [lrange $::tags 0 end-[llength $tags]] - kill_server $srv - } else { - set ::tags [lrange $::tags 0 end-[llength $tags]] - set _ $srv - } -} diff --git a/tools/pika_migrate/tests/support/test.tcl b/tools/pika_migrate/tests/support/test.tcl deleted file mode 100644 index 7d390cc47a..0000000000 --- a/tools/pika_migrate/tests/support/test.tcl +++ /dev/null @@ -1,130 +0,0 @@ -set ::num_tests 0 -set ::num_passed 0 -set ::num_failed 0 -set ::tests_failed {} - -proc fail {msg} { - error "assertion:$msg" -} - -proc assert {condition} { - if {![uplevel 1 [list expr $condition]]} { - error "assertion:Expected condition '$condition' to be true ([uplevel 1 [list subst -nocommands $condition]])" - } -} - -proc assert_match {pattern value} { - if {![string match $pattern $value]} { - error "assertion:Expected '$value' to match '$pattern'" - } -} - -proc assert_equal {expected value} { - if {$expected ne $value} { - error "assertion:Expected '$value' to be equal to '$expected'" - } -} - -proc assert_error {pattern code} { - if {[catch {uplevel 1 $code} error]} { - assert_match $pattern $error - } else { - error "assertion:Expected an error but nothing was caught" - } -} - -proc assert_encoding {enc key} { - # Swapped out values don't have an encoding, so make sure that - # the value is swapped in before checking the encoding. - set dbg [r debug object $key] - while {[string match "* swapped at:*" $dbg]} { - r debug swapin $key - set dbg [r debug object $key] - } - assert_match "* encoding:$enc *" $dbg -} - -proc assert_type {type key} { - assert_equal $type [r type $key] -} - -# Wait for the specified condition to be true, with the specified number of -# max retries and delay between retries. Otherwise the 'elsescript' is -# executed. -proc wait_for_condition {maxtries delay e _else_ elsescript} { - while {[incr maxtries -1] >= 0} { - set errcode [catch {uplevel 1 [list expr $e]} result] - if {$errcode == 0} { - if {$result} break - } else { - return -code $errcode $result - } - after $delay - } - if {$maxtries == -1} { - set errcode [catch [uplevel 1 $elsescript] result] - return -code $errcode $result - } -} - -proc test {name code {okpattern undefined}} { - # abort if tagged with a tag to deny - foreach tag $::denytags { - if {[lsearch $::tags $tag] >= 0} { - return - } - } - - # check if tagged with at least 1 tag to allow when there *is* a list - # of tags to allow, because default policy is to run everything - if {[llength $::allowtags] > 0} { - set matched 0 - foreach tag $::allowtags { - if {[lsearch $::tags $tag] >= 0} { - incr matched - } - } - if {$matched < 1} { - return - } - } - - incr ::num_tests - set details {} - lappend details "$name in $::curfile" - - send_data_packet $::test_server_fd testing $name - - if {[catch {set retval [uplevel 1 $code]} error]} { - if {[string match "assertion:*" $error]} { - set msg [string range $error 10 end] - lappend details $msg - lappend ::tests_failed $details - - incr ::num_failed - send_data_packet $::test_server_fd err [join $details "\n"] - } else { - # Re-raise, let handler up the stack take care of this. - error $error $::errorInfo - } - } else { - if {$okpattern eq "undefined" || $okpattern eq $retval || [string match $okpattern $retval]} { - incr ::num_passed - send_data_packet $::test_server_fd ok $name - } else { - set msg "Expected '$okpattern' to equal or match '$retval'" - lappend details $msg - lappend ::tests_failed $details - - incr ::num_failed - send_data_packet $::test_server_fd err [join $details "\n"] - } - } - - if {$::traceleaks} { - set output [exec leaks redis-server] - if {![string match {*0 leaks*} $output]} { - send_data_packet $::test_server_fd err "Detected a memory leak in test '$name': $output" - } - } -} diff --git a/tools/pika_migrate/tests/support/tmpfile.tcl b/tools/pika_migrate/tests/support/tmpfile.tcl deleted file mode 100644 index 809f587306..0000000000 --- a/tools/pika_migrate/tests/support/tmpfile.tcl +++ /dev/null @@ -1,15 +0,0 @@ -set ::tmpcounter 0 -set ::tmproot "./tests/tmp" -file mkdir $::tmproot - -# returns a dirname unique to this process to write to -proc tmpdir {basename} { - set dir [file join $::tmproot $basename.[pid].[incr ::tmpcounter]] - file mkdir $dir - set _ $dir -} - -# return a filename unique to this process to write to -proc tmpfile {basename} { - file join $::tmproot $basename.[pid].[incr ::tmpcounter] -} diff --git a/tools/pika_migrate/tests/support/util.tcl b/tools/pika_migrate/tests/support/util.tcl deleted file mode 100644 index cd5b9b511f..0000000000 --- a/tools/pika_migrate/tests/support/util.tcl +++ /dev/null @@ -1,371 +0,0 @@ -proc randstring {min max {type binary}} { - set len [expr {$min+int(rand()*($max-$min+1))}] - set output {} - if {$type eq {binary}} { - set minval 0 - set maxval 255 - } elseif {$type eq {alpha}} { - set minval 48 - set maxval 122 - } elseif {$type eq {compr}} { - set minval 48 - set maxval 52 - } - while {$len} { - append output [format "%c" [expr {$minval+int(rand()*($maxval-$minval+1))}]] - incr len -1 - } - return $output -} - -# Useful for some test -proc zlistAlikeSort {a b} { - if {[lindex $a 0] > [lindex $b 0]} {return 1} - if {[lindex $a 0] < [lindex $b 0]} {return -1} - string compare [lindex $a 1] [lindex $b 1] -} - -# Return all log lines starting with the first line that contains a warning. -# Generally, this will be an assertion error with a stack trace. -proc warnings_from_file {filename} { - set lines [split [exec cat $filename] "\n"] - set matched 0 - set logall 0 - set result {} - foreach line $lines { - if {[string match {*REDIS BUG REPORT START*} $line]} { - set logall 1 - } - if {[regexp {^\[\d+\]\s+\d+\s+\w+\s+\d{2}:\d{2}:\d{2} \#} $line]} { - set matched 1 - } - if {$logall || $matched} { - lappend result $line - } - } - join $result "\n" -} - -# Return value for INFO property -proc status {r property} { - if {[regexp "\r\n$property:(.*?)\r\n" [{*}$r info] _ value]} { - set _ $value - } -} - -proc waitForBgsave r { - while 1 { - if {[status r rdb_bgsave_in_progress] eq 1} { - if {$::verbose} { - puts -nonewline "\nWaiting for background save to finish... " - flush stdout - } - after 1000 - } else { - break - } - } -} - -proc waitForBgrewriteaof r { - while 1 { - if {[status r aof_rewrite_in_progress] eq 1} { - if {$::verbose} { - puts -nonewline "\nWaiting for background AOF rewrite to finish... " - flush stdout - } - after 1000 - } else { - break - } - } -} - -proc wait_for_sync r { - while 1 { - if {[status $r master_link_status] eq "down"} { - after 10 - } else { - break - } - } -} - -# Random integer between 0 and max (excluded). -proc randomInt {max} { - expr {int(rand()*$max)} -} - -# Random signed integer between -max and max (both extremes excluded). -proc randomSignedInt {max} { - set i [randomInt $max] - if {rand() > 0.5} { - set i -$i - } - return $i -} - -proc randpath args { - set path [expr {int(rand()*[llength $args])}] - uplevel 1 [lindex $args $path] -} - -proc randomValue {} { - randpath { - # Small enough to likely collide - randomSignedInt 1000 - } { - # 32 bit compressible signed/unsigned - randpath {randomSignedInt 2000000000} {randomSignedInt 4000000000} - } { - # 64 bit - randpath {randomSignedInt 1000000000000} - } { - # Random string - randpath {randstring 0 256 alpha} \ - {randstring 0 256 compr} \ - {randstring 0 256 binary} - } -} - -proc randomKey {} { - randpath { - # Small enough to likely collide - randomInt 1000 - } { - # 32 bit compressible signed/unsigned - randpath {randomInt 2000000000} {randomInt 4000000000} - } { - # 64 bit - randpath {randomInt 1000000000000} - } { - # Random string - randpath {randstring 1 256 alpha} \ - {randstring 1 256 compr} - } -} - -proc findKeyWithType {r type} { - for {set j 0} {$j < 20} {incr j} { - set k [{*}$r randomkey] - if {$k eq {}} { - return {} - } - if {[{*}$r type $k] eq $type} { - return $k - } - } - return {} -} - -proc createComplexDataset {r ops {opt {}}} { - for {set j 0} {$j < $ops} {incr j} { - set k [randomKey] - set k2 [randomKey] - set f [randomValue] - set v [randomValue] - - if {[lsearch -exact $opt useexpire] != -1} { - if {rand() < 0.1} { - {*}$r expire [randomKey] [randomInt 2] - } - } - - randpath { - set d [expr {rand()}] - } { - set d [expr {rand()}] - } { - set d [expr {rand()}] - } { - set d [expr {rand()}] - } { - set d [expr {rand()}] - } { - randpath {set d +inf} {set d -inf} - } - set t [{*}$r type $k] - - if {$t eq {none}} { - randpath { - {*}$r set $k $v - } { - {*}$r lpush $k $v - } { - {*}$r sadd $k $v - } { - {*}$r zadd $k $d $v - } { - {*}$r hset $k $f $v - } { - {*}$r del $k - } - set t [{*}$r type $k] - } - - switch $t { - {string} { - # Nothing to do - } - {list} { - randpath {{*}$r lpush $k $v} \ - {{*}$r rpush $k $v} \ - {{*}$r lrem $k 0 $v} \ - {{*}$r rpop $k} \ - {{*}$r lpop $k} - } - {set} { - randpath {{*}$r sadd $k $v} \ - {{*}$r srem $k $v} \ - { - set otherset [findKeyWithType {*}$r set] - if {$otherset ne {}} { - randpath { - {*}$r sunionstore $k2 $k $otherset - } { - {*}$r sinterstore $k2 $k $otherset - } { - {*}$r sdiffstore $k2 $k $otherset - } - } - } - } - {zset} { - randpath {{*}$r zadd $k $d $v} \ - {{*}$r zrem $k $v} \ - { - set otherzset [findKeyWithType {*}$r zset] - if {$otherzset ne {}} { - randpath { - {*}$r zunionstore $k2 2 $k $otherzset - } { - {*}$r zinterstore $k2 2 $k $otherzset - } - } - } - } - {hash} { - randpath {{*}$r hset $k $f $v} \ - {{*}$r hdel $k $f} - } - } - } -} - -proc formatCommand {args} { - set cmd "*[llength $args]\r\n" - foreach a $args { - append cmd "$[string length $a]\r\n$a\r\n" - } - set _ $cmd -} - -proc csvdump r { - set o {} - foreach k [lsort [{*}$r keys *]] { - set type [{*}$r type $k] - append o [csvstring $k] , [csvstring $type] , - switch $type { - string { - append o [csvstring [{*}$r get $k]] "\n" - } - list { - foreach e [{*}$r lrange $k 0 -1] { - append o [csvstring $e] , - } - append o "\n" - } - set { - foreach e [lsort [{*}$r smembers $k]] { - append o [csvstring $e] , - } - append o "\n" - } - zset { - foreach e [{*}$r zrange $k 0 -1 withscores] { - append o [csvstring $e] , - } - append o "\n" - } - hash { - set fields [{*}$r hgetall $k] - set newfields {} - foreach {k v} $fields { - lappend newfields [list $k $v] - } - set fields [lsort -index 0 $newfields] - foreach kv $fields { - append o [csvstring [lindex $kv 0]] , - append o [csvstring [lindex $kv 1]] , - } - append o "\n" - } - } - } - return $o -} - -proc csvstring s { - return "\"$s\"" -} - -proc roundFloat f { - format "%.10g" $f -} - -proc find_available_port start { - for {set j $start} {$j < $start+1024} {incr j} { - if {[catch { - set fd [socket 127.0.0.1 $j] - }]} { - return $j - } else { - close $fd - } - } - if {$j == $start+1024} { - error "Can't find a non busy port in the $start-[expr {$start+1023}] range." - } -} - -# Test if TERM looks like to support colors -proc color_term {} { - expr {[info exists ::env(TERM)] && [string match *xterm* $::env(TERM)]} -} - -proc colorstr {color str} { - if {[color_term]} { - set b 0 - if {[string range $color 0 4] eq {bold-}} { - set b 1 - set color [string range $color 5 end] - } - switch $color { - red {set colorcode {31}} - green {set colorcode {32}} - yellow {set colorcode {33}} - blue {set colorcode {34}} - magenta {set colorcode {35}} - cyan {set colorcode {36}} - white {set colorcode {37}} - default {set colorcode {37}} - } - if {$colorcode ne {}} { - return "\033\[$b;${colorcode};49m$str\033\[0m" - } - } else { - return $str - } -} - -# Execute a background process writing random data for the specified number -# of seconds to the specified Redis instance. -proc start_write_load {host port seconds} { - set tclsh [info nameofexecutable] - exec $tclsh tests/helpers/gen_write_load.tcl $host $port $seconds & -} - -# Stop a process generating write load executed with start_write_load. -proc stop_write_load {handle} { - catch {exec /bin/kill -9 $handle} -} diff --git a/tools/pika_migrate/tests/test_helper.tcl b/tools/pika_migrate/tests/test_helper.tcl deleted file mode 100644 index d1ebde1c48..0000000000 --- a/tools/pika_migrate/tests/test_helper.tcl +++ /dev/null @@ -1,545 +0,0 @@ -# Redis test suite. Copyright (C) 2009 Salvatore Sanfilippo antirez@gmail.com -# This software is released under the BSD License. See the COPYING file for -# more information. - -package require Tcl 8.5 - -set tcl_precision 17 -source tests/support/redis.tcl -source tests/support/server.tcl -source tests/support/tmpfile.tcl -source tests/support/test.tcl -source tests/support/util.tcl - -set ::all_tests { - unit/printver - unit/auth - unit/protocol - unit/basic - unit/scan - unit/type/list - unit/type/list-2 - unit/type/list-3 - unit/type/set - unit/type/zset - unit/type/hash - unit/sort - unit/expire - unit/other - unit/multi - unit/quit - unit/aofrw - integration/replication - integration/replication-2 - integration/replication-3 - integration/replication-4 - integration/replication-psync - integration/aof - integration/rdb - integration/convert-zipmap-hash-on-load - unit/pubsub - unit/slowlog - unit/scripting - unit/maxmemory - unit/introspection - unit/limits - unit/obuf-limits - unit/dump - unit/bitops - unit/memefficiency - unit/hyperloglog -} -# Index to the next test to run in the ::all_tests list. -set ::next_test 0 - -set ::host 127.0.0.1 -set ::port 21111 -set ::traceleaks 0 -set ::valgrind 0 -set ::verbose 0 -set ::quiet 0 -set ::denytags {} -set ::allowtags {} -set ::external 0; # If "1" this means, we are running against external instance -set ::file ""; # If set, runs only the tests in this comma separated list -set ::curfile ""; # Hold the filename of the current suite -set ::accurate 0; # If true runs fuzz tests with more iterations -set ::force_failure 0 -set ::timeout 600; # 10 minutes without progresses will quit the test. -set ::last_progress [clock seconds] -set ::active_servers {} ; # Pids of active Redis instances. - -# Set to 1 when we are running in client mode. The Redis test uses a -# server-client model to run tests simultaneously. The server instance -# runs the specified number of client instances that will actually run tests. -# The server is responsible of showing the result to the user, and exit with -# the appropriate exit code depending on the test outcome. -set ::client 0 -set ::numclients 16 - -proc execute_tests name { - set path "tests/$name.tcl" - set ::curfile $path - source $path - send_data_packet $::test_server_fd done "$name" -} - -# Setup a list to hold a stack of server configs. When calls to start_server -# are nested, use "srv 0 pid" to get the pid of the inner server. To access -# outer servers, use "srv -1 pid" etcetera. -set ::servers {} -proc srv {args} { - set level 0 - if {[string is integer [lindex $args 0]]} { - set level [lindex $args 0] - set property [lindex $args 1] - } else { - set property [lindex $args 0] - } - set srv [lindex $::servers end+$level] - dict get $srv $property -} - -# Provide easy access to the client for the inner server. It's possible to -# prepend the argument list with a negative level to access clients for -# servers running in outer blocks. -proc r {args} { - set level 0 - if {[string is integer [lindex $args 0]]} { - set level [lindex $args 0] - set args [lrange $args 1 end] - } - [srv $level "client"] {*}$args -} - -proc reconnect {args} { - set level [lindex $args 0] - if {[string length $level] == 0 || ![string is integer $level]} { - set level 0 - } - - set srv [lindex $::servers end+$level] - set host [dict get $srv "host"] - set port [dict get $srv "port"] - set config [dict get $srv "config"] - set client [redis $host $port] - dict set srv "client" $client - - # select the right db when we don't have to authenticate - if {![dict exists $config "requirepass"]} { - $client select 9 - } - - # re-set $srv in the servers list - lset ::servers end+$level $srv -} - -proc redis_deferring_client {args} { - set level 0 - if {[llength $args] > 0 && [string is integer [lindex $args 0]]} { - set level [lindex $args 0] - set args [lrange $args 1 end] - } - - # create client that defers reading reply - set client [redis [srv $level "host"] [srv $level "port"] 1] - - # select the right db and read the response (OK) - $client select 9 - $client read - return $client -} - -# Provide easy access to INFO properties. Same semantic as "proc r". -proc s {args} { - set level 0 - if {[string is integer [lindex $args 0]]} { - set level [lindex $args 0] - set args [lrange $args 1 end] - } - status [srv $level "client"] [lindex $args 0] -} - -proc cleanup {} { - if {!$::quiet} {puts -nonewline "Cleanup: may take some time... "} - flush stdout - catch {exec rm -rf {*}[glob tests/tmp/redis.conf.*]} - catch {exec rm -rf {*}[glob tests/tmp/server.*]} - if {!$::quiet} {puts "OK"} -} - -proc test_server_main {} { - cleanup - set tclsh [info nameofexecutable] - # Open a listening socket, trying different ports in order to find a - # non busy one. - set port [find_available_port 11111] - if {!$::quiet} { - puts "Starting test server at port $port" - } - socket -server accept_test_clients -myaddr 127.0.0.1 $port - - # Start the client instances - set ::clients_pids {} - set start_port [expr {$::port+100}] - for {set j 0} {$j < $::numclients} {incr j} { - set start_port [find_available_port $start_port] - set p [exec $tclsh [info script] {*}$::argv \ - --client $port --port $start_port &] - lappend ::clients_pids $p - incr start_port 10 - } - - # Setup global state for the test server - set ::idle_clients {} - set ::active_clients {} - array set ::active_clients_task {} - array set ::clients_start_time {} - set ::clients_time_history {} - set ::failed_tests {} - - # Enter the event loop to handle clients I/O - after 100 test_server_cron - vwait forever -} - -# This function gets called 10 times per second. -proc test_server_cron {} { - set elapsed [expr {[clock seconds]-$::last_progress}] - - if {$elapsed > $::timeout} { - set err "\[[colorstr red TIMEOUT]\]: clients state report follows." - puts $err - show_clients_state - kill_clients - force_kill_all_servers - the_end - } - - after 100 test_server_cron -} - -proc accept_test_clients {fd addr port} { - fconfigure $fd -encoding binary - fileevent $fd readable [list read_from_test_client $fd] -} - -# This is the readable handler of our test server. Clients send us messages -# in the form of a status code such and additional data. Supported -# status types are: -# -# ready: the client is ready to execute the command. Only sent at client -# startup. The server will queue the client FD in the list of idle -# clients. -# testing: just used to signal that a given test started. -# ok: a test was executed with success. -# err: a test was executed with an error. -# exception: there was a runtime exception while executing the test. -# done: all the specified test file was processed, this test client is -# ready to accept a new task. -proc read_from_test_client fd { - set bytes [gets $fd] - set payload [read $fd $bytes] - foreach {status data} $payload break - set ::last_progress [clock seconds] - - if {$status eq {ready}} { - if {!$::quiet} { - puts "\[$status\]: $data" - } - signal_idle_client $fd - } elseif {$status eq {done}} { - set elapsed [expr {[clock seconds]-$::clients_start_time($fd)}] - set all_tests_count [llength $::all_tests] - set running_tests_count [expr {[llength $::active_clients]-1}] - set completed_tests_count [expr {$::next_test-$running_tests_count}] - puts "\[$completed_tests_count/$all_tests_count [colorstr yellow $status]\]: $data ($elapsed seconds)" - lappend ::clients_time_history $elapsed $data - signal_idle_client $fd - set ::active_clients_task($fd) DONE - } elseif {$status eq {ok}} { - if {!$::quiet} { - puts "\[[colorstr green $status]\]: $data" - } - set ::active_clients_task($fd) "(OK) $data" - } elseif {$status eq {err}} { - set err "\[[colorstr red $status]\]: $data" - puts $err - lappend ::failed_tests $err - set ::active_clients_task($fd) "(ERR) $data" - } elseif {$status eq {exception}} { - puts "\[[colorstr red $status]\]: $data" - kill_clients - force_kill_all_servers - exit 1 - } elseif {$status eq {testing}} { - set ::active_clients_task($fd) "(IN PROGRESS) $data" - } elseif {$status eq {server-spawned}} { - lappend ::active_servers $data - } elseif {$status eq {server-killed}} { - set ::active_servers [lsearch -all -inline -not -exact $::active_servers $data] - } else { - if {!$::quiet} { - puts "\[$status\]: $data" - } - } -} - -proc show_clients_state {} { - # The following loop is only useful for debugging tests that may - # enter an infinite loop. Commented out normally. - foreach x $::active_clients { - if {[info exist ::active_clients_task($x)]} { - puts "$x => $::active_clients_task($x)" - } else { - puts "$x => ???" - } - } -} - -proc kill_clients {} { - foreach p $::clients_pids { - catch {exec kill $p} - } -} - -proc force_kill_all_servers {} { - foreach p $::active_servers { - puts "Killing still running Redis server $p" - catch {exec kill -9 $p} - } -} - -# A new client is idle. Remove it from the list of active clients and -# if there are still test units to run, launch them. -proc signal_idle_client fd { - # Remove this fd from the list of active clients. - set ::active_clients \ - [lsearch -all -inline -not -exact $::active_clients $fd] - - if 0 {show_clients_state} - - # New unit to process? - if {$::next_test != [llength $::all_tests]} { - if {!$::quiet} { - puts [colorstr bold-white "Testing [lindex $::all_tests $::next_test]"] - set ::active_clients_task($fd) "ASSIGNED: $fd ([lindex $::all_tests $::next_test])" - } - set ::clients_start_time($fd) [clock seconds] - send_data_packet $fd run [lindex $::all_tests $::next_test] - lappend ::active_clients $fd - incr ::next_test - } else { - lappend ::idle_clients $fd - if {[llength $::active_clients] == 0} { - the_end - } - } -} - -# The the_end function gets called when all the test units were already -# executed, so the test finished. -proc the_end {} { - # TODO: print the status, exit with the rigth exit code. - puts "\n The End\n" - puts "Execution time of different units:" - foreach {time name} $::clients_time_history { - puts " $time seconds - $name" - } - if {[llength $::failed_tests]} { - puts "\n[colorstr bold-red {!!! WARNING}] The following tests failed:\n" - foreach failed $::failed_tests { - puts "*** $failed" - } - cleanup - exit 1 - } else { - puts "\n[colorstr bold-white {\o/}] [colorstr bold-green {All tests passed without errors!}]\n" - cleanup - exit 0 - } -} - -# The client is not even driven (the test server is instead) as we just need -# to read the command, execute, reply... all this in a loop. -proc test_client_main server_port { - set ::test_server_fd [socket localhost $server_port] - fconfigure $::test_server_fd -encoding binary - send_data_packet $::test_server_fd ready [pid] - while 1 { - set bytes [gets $::test_server_fd] - set payload [read $::test_server_fd $bytes] - foreach {cmd data} $payload break - if {$cmd eq {run}} { - execute_tests $data - } else { - error "Unknown test client command: $cmd" - } - } -} - -proc send_data_packet {fd status data} { - set payload [list $status $data] - puts $fd [string length $payload] - puts -nonewline $fd $payload - flush $fd -} - -proc print_help_screen {} { - puts [join { - "--valgrind Run the test over valgrind." - "--accurate Run slow randomized tests for more iterations." - "--quiet Don't show individual tests." - "--single Just execute the specified unit (see next option)." - "--list-tests List all the available test units." - "--clients Number of test clients (default 16)." - "--timeout Test timeout in seconds (default 10 min)." - "--force-failure Force the execution of a test that always fails." - "--help Print this help screen." - } "\n"] -} - -# parse arguments -for {set j 0} {$j < [llength $argv]} {incr j} { - set opt [lindex $argv $j] - set arg [lindex $argv [expr $j+1]] - if {$opt eq {--tags}} { - foreach tag $arg { - if {[string index $tag 0] eq "-"} { - lappend ::denytags [string range $tag 1 end] - } else { - lappend ::allowtags $tag - } - } - incr j - } elseif {$opt eq {--valgrind}} { - set ::valgrind 1 - } elseif {$opt eq {--quiet}} { - set ::quiet 1 - } elseif {$opt eq {--host}} { - set ::external 1 - set ::host $arg - incr j - } elseif {$opt eq {--port}} { - set ::port $arg - incr j - } elseif {$opt eq {--accurate}} { - set ::accurate 1 - } elseif {$opt eq {--force-failure}} { - set ::force_failure 1 - } elseif {$opt eq {--single}} { - set ::all_tests $arg - incr j - } elseif {$opt eq {--list-tests}} { - foreach t $::all_tests { - puts $t - } - exit 0 - } elseif {$opt eq {--client}} { - set ::client 1 - set ::test_server_port $arg - incr j - } elseif {$opt eq {--clients}} { - set ::numclients $arg - incr j - } elseif {$opt eq {--timeout}} { - set ::timeout $arg - incr j - } elseif {$opt eq {--help}} { - print_help_screen - exit 0 - } else { - puts "Wrong argument: $opt" - exit 1 - } -} - -proc attach_to_replication_stream {} { - set s [socket [srv 0 "host"] [srv 0 "port"]] - fconfigure $s -translation binary - puts -nonewline $s "SYNC\r\n" - flush $s - - # Get the count - set count [gets $s] - set prefix [string range $count 0 0] - if {$prefix ne {$}} { - error "attach_to_replication_stream error. Received '$count' as count." - } - set count [string range $count 1 end] - - # Consume the bulk payload - while {$count} { - set buf [read $s $count] - set count [expr {$count-[string length $buf]}] - } - return $s -} - -proc read_from_replication_stream {s} { - fconfigure $s -blocking 0 - set attempt 0 - while {[gets $s count] == -1} { - if {[incr attempt] == 10} return "" - after 100 - } - fconfigure $s -blocking 1 - set count [string range $count 1 end] - - # Return a list of arguments for the command. - set res {} - for {set j 0} {$j < $count} {incr j} { - read $s 1 - set arg [::redis::redis_bulk_read $s] - if {$j == 0} {set arg [string tolower $arg]} - lappend res $arg - } - return $res -} - -proc assert_replication_stream {s patterns} { - for {set j 0} {$j < [llength $patterns]} {incr j} { - assert_match [lindex $patterns $j] [read_from_replication_stream $s] - } -} - -proc close_replication_stream {s} { - close $s -} - -# With the parallel test running multiple Redis instances at the same time -# we need a fast enough computer, otherwise a lot of tests may generate -# false positives. -# If the computer is too slow we revert the sequential test without any -# parallelism, that is, clients == 1. -proc is_a_slow_computer {} { - set start [clock milliseconds] - for {set j 0} {$j < 1000000} {incr j} {} - set elapsed [expr [clock milliseconds]-$start] - expr {$elapsed > 200} -} - -if {$::client} { - if {[catch { test_client_main $::test_server_port } err]} { - set estr "Executing test client: $err.\n$::errorInfo" - if {[catch {send_data_packet $::test_server_fd exception $estr}]} { - puts $estr - } - exit 1 - } -} else { - if {[is_a_slow_computer]} { - puts "** SLOW COMPUTER ** Using a single client to avoid false positives." - set ::numclients 1 - } - - if {[catch { test_server_main } err]} { - if {[string length $err] > 0} { - # only display error when not generated by the test suite - if {$err ne "exception"} { - puts $::errorInfo - } - exit 1 - } - } -} diff --git a/tools/pika_migrate/tests/unit/aofrw.tcl b/tools/pika_migrate/tests/unit/aofrw.tcl deleted file mode 100644 index a2d74168f3..0000000000 --- a/tools/pika_migrate/tests/unit/aofrw.tcl +++ /dev/null @@ -1,210 +0,0 @@ -start_server {tags {"aofrw"}} { - # Enable the AOF - r config set appendonly yes - r config set auto-aof-rewrite-percentage 0 ; # Disable auto-rewrite. - waitForBgrewriteaof r - - test {AOF rewrite during write load} { - # Start a write load for 10 seconds - set master [srv 0 client] - set master_host [srv 0 host] - set master_port [srv 0 port] - set load_handle0 [start_write_load $master_host $master_port 10] - set load_handle1 [start_write_load $master_host $master_port 10] - set load_handle2 [start_write_load $master_host $master_port 10] - set load_handle3 [start_write_load $master_host $master_port 10] - set load_handle4 [start_write_load $master_host $master_port 10] - - # Make sure the instance is really receiving data - wait_for_condition 50 100 { - [r dbsize] > 0 - } else { - fail "No write load detected." - } - - # After 3 seconds, start a rewrite, while the write load is still - # active. - after 3000 - r bgrewriteaof - waitForBgrewriteaof r - - # Let it run a bit more so that we'll append some data to the new - # AOF. - after 1000 - - # Stop the processes generating the load if they are still active - stop_write_load $load_handle0 - stop_write_load $load_handle1 - stop_write_load $load_handle2 - stop_write_load $load_handle3 - stop_write_load $load_handle4 - - # Make sure that we remain the only connected client. - # This step is needed to make sure there are no pending writes - # that will be processed between the two "debug digest" calls. - wait_for_condition 50 100 { - [llength [split [string trim [r client list]] "\n"]] == 1 - } else { - puts [r client list] - fail "Clients generating loads are not disconnecting" - } - - # Get the data set digest - set d1 [r debug digest] - - # Load the AOF - r debug loadaof - set d2 [r debug digest] - - # Make sure they are the same - assert {$d1 eq $d2} - } -} - -start_server {tags {"aofrw"}} { - test {Turning off AOF kills the background writing child if any} { - r config set appendonly yes - waitForBgrewriteaof r - r multi - r bgrewriteaof - r config set appendonly no - r exec - wait_for_condition 50 100 { - [string match {*Killing*AOF*child*} [exec tail -n5 < [srv 0 stdout]]] - } else { - fail "Can't find 'Killing AOF child' into recent logs" - } - } - - foreach d {string int} { - foreach e {ziplist linkedlist} { - test "AOF rewrite of list with $e encoding, $d data" { - r flushall - if {$e eq {ziplist}} {set len 10} else {set len 1000} - for {set j 0} {$j < $len} {incr j} { - if {$d eq {string}} { - set data [randstring 0 16 alpha] - } else { - set data [randomInt 4000000000] - } - r lpush key $data - } - assert_equal [r object encoding key] $e - set d1 [r debug digest] - r bgrewriteaof - waitForBgrewriteaof r - r debug loadaof - set d2 [r debug digest] - if {$d1 ne $d2} { - error "assertion:$d1 is not equal to $d2" - } - } - } - } - - foreach d {string int} { - foreach e {intset hashtable} { - test "AOF rewrite of set with $e encoding, $d data" { - r flushall - if {$e eq {intset}} {set len 10} else {set len 1000} - for {set j 0} {$j < $len} {incr j} { - if {$d eq {string}} { - set data [randstring 0 16 alpha] - } else { - set data [randomInt 4000000000] - } - r sadd key $data - } - if {$d ne {string}} { - assert_equal [r object encoding key] $e - } - set d1 [r debug digest] - r bgrewriteaof - waitForBgrewriteaof r - r debug loadaof - set d2 [r debug digest] - if {$d1 ne $d2} { - error "assertion:$d1 is not equal to $d2" - } - } - } - } - - foreach d {string int} { - foreach e {ziplist hashtable} { - test "AOF rewrite of hash with $e encoding, $d data" { - r flushall - if {$e eq {ziplist}} {set len 10} else {set len 1000} - for {set j 0} {$j < $len} {incr j} { - if {$d eq {string}} { - set data [randstring 0 16 alpha] - } else { - set data [randomInt 4000000000] - } - r hset key $data $data - } - assert_equal [r object encoding key] $e - set d1 [r debug digest] - r bgrewriteaof - waitForBgrewriteaof r - r debug loadaof - set d2 [r debug digest] - if {$d1 ne $d2} { - error "assertion:$d1 is not equal to $d2" - } - } - } - } - - foreach d {string int} { - foreach e {ziplist skiplist} { - test "AOF rewrite of zset with $e encoding, $d data" { - r flushall - if {$e eq {ziplist}} {set len 10} else {set len 1000} - for {set j 0} {$j < $len} {incr j} { - if {$d eq {string}} { - set data [randstring 0 16 alpha] - } else { - set data [randomInt 4000000000] - } - r zadd key [expr rand()] $data - } - assert_equal [r object encoding key] $e - set d1 [r debug digest] - r bgrewriteaof - waitForBgrewriteaof r - r debug loadaof - set d2 [r debug digest] - if {$d1 ne $d2} { - error "assertion:$d1 is not equal to $d2" - } - } - } - } - - test {BGREWRITEAOF is delayed if BGSAVE is in progress} { - r multi - r bgsave - r bgrewriteaof - r info persistence - set res [r exec] - assert_match {*scheduled*} [lindex $res 1] - assert_match {*aof_rewrite_scheduled:1*} [lindex $res 2] - while {[string match {*aof_rewrite_scheduled:1*} [r info persistence]]} { - after 100 - } - } - - test {BGREWRITEAOF is refused if already in progress} { - catch { - r multi - r bgrewriteaof - r bgrewriteaof - r exec - } e - assert_match {*ERR*already*} $e - while {[string match {*aof_rewrite_scheduled:1*} [r info persistence]]} { - after 100 - } - } -} diff --git a/tools/pika_migrate/tests/unit/auth.tcl b/tools/pika_migrate/tests/unit/auth.tcl deleted file mode 100644 index 633cda95c9..0000000000 --- a/tools/pika_migrate/tests/unit/auth.tcl +++ /dev/null @@ -1,27 +0,0 @@ -start_server {tags {"auth"}} { - test {AUTH fails if there is no password configured server side} { - catch {r auth foo} err - set _ $err - } {ERR*no password*} -} - -start_server {tags {"auth"} overrides {requirepass foobar}} { - test {AUTH fails when a wrong password is given} { - catch {r auth wrong!} err - set _ $err - } {ERR*invalid password} - - test {Arbitrary command gives an error when AUTH is required} { - catch {r set foo bar} err - set _ $err - } {NOAUTH*} - - test {AUTH succeeds when the right password is given} { - r auth foobar - } {OK} - - test {Once AUTH succeeded we can actually send commands to the server} { - r set foo 100 - r incr foo - } {101} -} diff --git a/tools/pika_migrate/tests/unit/basic.tcl b/tools/pika_migrate/tests/unit/basic.tcl deleted file mode 100644 index 6f725d299b..0000000000 --- a/tools/pika_migrate/tests/unit/basic.tcl +++ /dev/null @@ -1,783 +0,0 @@ -start_server {tags {"basic"}} { - test {DEL all keys to start with a clean DB} { - foreach key [r keys *] {r del $key} - r dbsize - } {0} - - test {SET and GET an item} { - r set x foobar - r get x - } {foobar} - - test {SET and GET an empty item} { - r set x {} - r get x - } {} - - test {DEL against a single item} { - r del x - r get x - } {} - - test {Vararg DEL} { - r set foo1 a - r set foo2 b - r set foo3 c - list [r del foo1 foo2 foo3 foo4] [r mget foo1 foo2 foo3] - } {3 {{} {} {}}} - - test {KEYS with pattern} { - foreach key {key_x key_y key_z foo_a foo_b foo_c} { - r set $key hello - } - lsort [r keys foo*] - } {foo_a foo_b foo_c} - - test {KEYS to get all keys} { - lsort [r keys *] - } {foo_a foo_b foo_c key_x key_y key_z} - - test {DBSIZE} { - r dbsize - } {6} - - test {DEL all keys} { - foreach key [r keys *] {r del $key} - r dbsize - } {0} - - test {Very big payload in GET/SET} { - set buf [string repeat "abcd" 1000000] - r set foo $buf - r get foo - } [string repeat "abcd" 1000000] - - tags {"slow"} { - test {Very big payload random access} { - set err {} - array set payload {} - for {set j 0} {$j < 100} {incr j} { - set size [expr 1+[randomInt 100000]] - set buf [string repeat "pl-$j" $size] - set payload($j) $buf - r set bigpayload_$j $buf - } - for {set j 0} {$j < 1000} {incr j} { - set index [randomInt 100] - set buf [r get bigpayload_$index] - if {$buf != $payload($index)} { - set err "Values differ: I set '$payload($index)' but I read back '$buf'" - break - } - } - unset payload - set _ $err - } {} - - test {SET 10000 numeric keys and access all them in reverse order} { - set err {} - for {set x 0} {$x < 10000} {incr x} { - r set $x $x - } - set sum 0 - for {set x 9999} {$x >= 0} {incr x -1} { - set val [r get $x] - if {$val ne $x} { - set err "Element at position $x is $val instead of $x" - break - } - } - set _ $err - } {} - - test {DBSIZE should be 10101 now} { - r dbsize - } {10101} - } - - test {INCR against non existing key} { - set res {} - append res [r incr novar] - append res [r get novar] - } {11} - - test {INCR against key created by incr itself} { - r incr novar - } {2} - - test {INCR against key originally set with SET} { - r set novar 100 - r incr novar - } {101} - - test {INCR over 32bit value} { - r set novar 17179869184 - r incr novar - } {17179869185} - - test {INCRBY over 32bit value with over 32bit increment} { - r set novar 17179869184 - r incrby novar 17179869184 - } {34359738368} - - test {INCR fails against key with spaces (left)} { - r set novar " 11" - catch {r incr novar} err - format $err - } {ERR*} - - test {INCR fails against key with spaces (right)} { - r set novar "11 " - catch {r incr novar} err - format $err - } {ERR*} - - test {INCR fails against key with spaces (both)} { - r set novar " 11 " - catch {r incr novar} err - format $err - } {ERR*} - - test {INCR fails against a key holding a list} { - r rpush mylist 1 - catch {r incr mylist} err - r rpop mylist - format $err - } {WRONGTYPE*} - - test {DECRBY over 32bit value with over 32bit increment, negative res} { - r set novar 17179869184 - r decrby novar 17179869185 - } {-1} - - test {INCRBYFLOAT against non existing key} { - r del novar - list [roundFloat [r incrbyfloat novar 1]] \ - [roundFloat [r get novar]] \ - [roundFloat [r incrbyfloat novar 0.25]] \ - [roundFloat [r get novar]] - } {1 1 1.25 1.25} - - test {INCRBYFLOAT against key originally set with SET} { - r set novar 1.5 - roundFloat [r incrbyfloat novar 1.5] - } {3} - - test {INCRBYFLOAT over 32bit value} { - r set novar 17179869184 - r incrbyfloat novar 1.5 - } {17179869185.5} - - test {INCRBYFLOAT over 32bit value with over 32bit increment} { - r set novar 17179869184 - r incrbyfloat novar 17179869184 - } {34359738368} - - test {INCRBYFLOAT fails against key with spaces (left)} { - set err {} - r set novar " 11" - catch {r incrbyfloat novar 1.0} err - format $err - } {ERR*valid*} - - test {INCRBYFLOAT fails against key with spaces (right)} { - set err {} - r set novar "11 " - catch {r incrbyfloat novar 1.0} err - format $err - } {ERR*valid*} - - test {INCRBYFLOAT fails against key with spaces (both)} { - set err {} - r set novar " 11 " - catch {r incrbyfloat novar 1.0} err - format $err - } {ERR*valid*} - - test {INCRBYFLOAT fails against a key holding a list} { - r del mylist - set err {} - r rpush mylist 1 - catch {r incrbyfloat mylist 1.0} err - r del mylist - format $err - } {WRONGTYPE*} - - test {INCRBYFLOAT does not allow NaN or Infinity} { - r set foo 0 - set err {} - catch {r incrbyfloat foo +inf} err - set err - # p.s. no way I can force NaN to test it from the API because - # there is no way to increment / decrement by infinity nor to - # perform divisions. - } {ERR*would produce*} - - test {INCRBYFLOAT decrement} { - r set foo 1 - roundFloat [r incrbyfloat foo -1.1] - } {-0.1} - - test "SETNX target key missing" { - r del novar - assert_equal 1 [r setnx novar foobared] - assert_equal "foobared" [r get novar] - } - - test "SETNX target key exists" { - r set novar foobared - assert_equal 0 [r setnx novar blabla] - assert_equal "foobared" [r get novar] - } - - test "SETNX against not-expired volatile key" { - r set x 10 - r expire x 10000 - assert_equal 0 [r setnx x 20] - assert_equal 10 [r get x] - } - - test "SETNX against expired volatile key" { - # Make it very unlikely for the key this test uses to be expired by the - # active expiry cycle. This is tightly coupled to the implementation of - # active expiry and dbAdd() but currently the only way to test that - # SETNX expires a key when it should have been. - for {set x 0} {$x < 9999} {incr x} { - r setex key-$x 3600 value - } - - # This will be one of 10000 expiring keys. A cycle is executed every - # 100ms, sampling 10 keys for being expired or not. This key will be - # expired for at most 1s when we wait 2s, resulting in a total sample - # of 100 keys. The probability of the success of this test being a - # false positive is therefore approx. 1%. - r set x 10 - r expire x 1 - - # Wait for the key to expire - after 2000 - - assert_equal 1 [r setnx x 20] - assert_equal 20 [r get x] - } - - test "DEL against expired key" { - r debug set-active-expire 0 - r setex keyExpire 1 valExpire - after 1100 - assert_equal 0 [r del keyExpire] - r debug set-active-expire 1 - } - - test {EXISTS} { - set res {} - r set newkey test - append res [r exists newkey] - r del newkey - append res [r exists newkey] - } {10} - - test {Zero length value in key. SET/GET/EXISTS} { - r set emptykey {} - set res [r get emptykey] - append res [r exists emptykey] - r del emptykey - append res [r exists emptykey] - } {10} - - test {Commands pipelining} { - set fd [r channel] - puts -nonewline $fd "SET k1 xyzk\r\nGET k1\r\nPING\r\n" - flush $fd - set res {} - append res [string match OK* [r read]] - append res [r read] - append res [string match PONG* [r read]] - format $res - } {1xyzk1} - - test {Non existing command} { - catch {r foobaredcommand} err - string match ERR* $err - } {1} - - test {RENAME basic usage} { - r set mykey hello - r rename mykey mykey1 - r rename mykey1 mykey2 - r get mykey2 - } {hello} - - test {RENAME source key should no longer exist} { - r exists mykey - } {0} - - test {RENAME against already existing key} { - r set mykey a - r set mykey2 b - r rename mykey2 mykey - set res [r get mykey] - append res [r exists mykey2] - } {b0} - - test {RENAMENX basic usage} { - r del mykey - r del mykey2 - r set mykey foobar - r renamenx mykey mykey2 - set res [r get mykey2] - append res [r exists mykey] - } {foobar0} - - test {RENAMENX against already existing key} { - r set mykey foo - r set mykey2 bar - r renamenx mykey mykey2 - } {0} - - test {RENAMENX against already existing key (2)} { - set res [r get mykey] - append res [r get mykey2] - } {foobar} - - test {RENAME against non existing source key} { - catch {r rename nokey foobar} err - format $err - } {ERR*} - - test {RENAME where source and dest key is the same} { - catch {r rename mykey mykey} err - format $err - } {ERR*} - - test {RENAME with volatile key, should move the TTL as well} { - r del mykey mykey2 - r set mykey foo - r expire mykey 100 - assert {[r ttl mykey] > 95 && [r ttl mykey] <= 100} - r rename mykey mykey2 - assert {[r ttl mykey2] > 95 && [r ttl mykey2] <= 100} - } - - test {RENAME with volatile key, should not inherit TTL of target key} { - r del mykey mykey2 - r set mykey foo - r set mykey2 bar - r expire mykey2 100 - assert {[r ttl mykey] == -1 && [r ttl mykey2] > 0} - r rename mykey mykey2 - r ttl mykey2 - } {-1} - - test {DEL all keys again (DB 0)} { - foreach key [r keys *] { - r del $key - } - r dbsize - } {0} - - test {DEL all keys again (DB 1)} { - r select 10 - foreach key [r keys *] { - r del $key - } - set res [r dbsize] - r select 9 - format $res - } {0} - - test {MOVE basic usage} { - r set mykey foobar - r move mykey 10 - set res {} - lappend res [r exists mykey] - lappend res [r dbsize] - r select 10 - lappend res [r get mykey] - lappend res [r dbsize] - r select 9 - format $res - } [list 0 0 foobar 1] - - test {MOVE against key existing in the target DB} { - r set mykey hello - r move mykey 10 - } {0} - - test {MOVE against non-integer DB (#1428)} { - r set mykey hello - catch {r move mykey notanumber} e - set e - } {*ERR*index out of range} - - test {SET/GET keys in different DBs} { - r set a hello - r set b world - r select 10 - r set a foo - r set b bared - r select 9 - set res {} - lappend res [r get a] - lappend res [r get b] - r select 10 - lappend res [r get a] - lappend res [r get b] - r select 9 - format $res - } {hello world foo bared} - - test {MGET} { - r flushdb - r set foo BAR - r set bar FOO - r mget foo bar - } {BAR FOO} - - test {MGET against non existing key} { - r mget foo baazz bar - } {BAR {} FOO} - - test {MGET against non-string key} { - r sadd myset ciao - r sadd myset bau - r mget foo baazz bar myset - } {BAR {} FOO {}} - - test {RANDOMKEY} { - r flushdb - r set foo x - r set bar y - set foo_seen 0 - set bar_seen 0 - for {set i 0} {$i < 100} {incr i} { - set rkey [r randomkey] - if {$rkey eq {foo}} { - set foo_seen 1 - } - if {$rkey eq {bar}} { - set bar_seen 1 - } - } - list $foo_seen $bar_seen - } {1 1} - - test {RANDOMKEY against empty DB} { - r flushdb - r randomkey - } {} - - test {RANDOMKEY regression 1} { - r flushdb - r set x 10 - r del x - r randomkey - } {} - - test {GETSET (set new value)} { - list [r getset foo xyz] [r get foo] - } {{} xyz} - - test {GETSET (replace old value)} { - r set foo bar - list [r getset foo xyz] [r get foo] - } {bar xyz} - - test {MSET base case} { - r mset x 10 y "foo bar" z "x x x x x x x\n\n\r\n" - r mget x y z - } [list 10 {foo bar} "x x x x x x x\n\n\r\n"] - - test {MSET wrong number of args} { - catch {r mset x 10 y "foo bar" z} err - format $err - } {*wrong number*} - - test {MSETNX with already existent key} { - list [r msetnx x1 xxx y2 yyy x 20] [r exists x1] [r exists y2] - } {0 0 0} - - test {MSETNX with not existing keys} { - list [r msetnx x1 xxx y2 yyy] [r get x1] [r get y2] - } {1 xxx yyy} - - test "STRLEN against non-existing key" { - assert_equal 0 [r strlen notakey] - } - - test "STRLEN against integer-encoded value" { - r set myinteger -555 - assert_equal 4 [r strlen myinteger] - } - - test "STRLEN against plain string" { - r set mystring "foozzz0123456789 baz" - assert_equal 20 [r strlen mystring] - } - - test "SETBIT against non-existing key" { - r del mykey - assert_equal 0 [r setbit mykey 1 1] - assert_equal [binary format B* 01000000] [r get mykey] - } - - test "SETBIT against string-encoded key" { - # Ascii "@" is integer 64 = 01 00 00 00 - r set mykey "@" - - assert_equal 0 [r setbit mykey 2 1] - assert_equal [binary format B* 01100000] [r get mykey] - assert_equal 1 [r setbit mykey 1 0] - assert_equal [binary format B* 00100000] [r get mykey] - } - - test "SETBIT against integer-encoded key" { - # Ascii "1" is integer 49 = 00 11 00 01 - r set mykey 1 - assert_encoding int mykey - - assert_equal 0 [r setbit mykey 6 1] - assert_equal [binary format B* 00110011] [r get mykey] - assert_equal 1 [r setbit mykey 2 0] - assert_equal [binary format B* 00010011] [r get mykey] - } - - test "SETBIT against key with wrong type" { - r del mykey - r lpush mykey "foo" - assert_error "WRONGTYPE*" {r setbit mykey 0 1} - } - - test "SETBIT with out of range bit offset" { - r del mykey - assert_error "*out of range*" {r setbit mykey [expr 4*1024*1024*1024] 1} - assert_error "*out of range*" {r setbit mykey -1 1} - } - - test "SETBIT with non-bit argument" { - r del mykey - assert_error "*out of range*" {r setbit mykey 0 -1} - assert_error "*out of range*" {r setbit mykey 0 2} - assert_error "*out of range*" {r setbit mykey 0 10} - assert_error "*out of range*" {r setbit mykey 0 20} - } - - test "SETBIT fuzzing" { - set str "" - set len [expr 256*8] - r del mykey - - for {set i 0} {$i < 2000} {incr i} { - set bitnum [randomInt $len] - set bitval [randomInt 2] - set fmt [format "%%-%ds%%d%%-s" $bitnum] - set head [string range $str 0 $bitnum-1] - set tail [string range $str $bitnum+1 end] - set str [string map {" " 0} [format $fmt $head $bitval $tail]] - - r setbit mykey $bitnum $bitval - assert_equal [binary format B* $str] [r get mykey] - } - } - - test "GETBIT against non-existing key" { - r del mykey - assert_equal 0 [r getbit mykey 0] - } - - test "GETBIT against string-encoded key" { - # Single byte with 2nd and 3rd bit set - r set mykey "`" - - # In-range - assert_equal 0 [r getbit mykey 0] - assert_equal 1 [r getbit mykey 1] - assert_equal 1 [r getbit mykey 2] - assert_equal 0 [r getbit mykey 3] - - # Out-range - assert_equal 0 [r getbit mykey 8] - assert_equal 0 [r getbit mykey 100] - assert_equal 0 [r getbit mykey 10000] - } - - test "GETBIT against integer-encoded key" { - r set mykey 1 - assert_encoding int mykey - - # Ascii "1" is integer 49 = 00 11 00 01 - assert_equal 0 [r getbit mykey 0] - assert_equal 0 [r getbit mykey 1] - assert_equal 1 [r getbit mykey 2] - assert_equal 1 [r getbit mykey 3] - - # Out-range - assert_equal 0 [r getbit mykey 8] - assert_equal 0 [r getbit mykey 100] - assert_equal 0 [r getbit mykey 10000] - } - - test "SETRANGE against non-existing key" { - r del mykey - assert_equal 3 [r setrange mykey 0 foo] - assert_equal "foo" [r get mykey] - - r del mykey - assert_equal 0 [r setrange mykey 0 ""] - assert_equal 0 [r exists mykey] - - r del mykey - assert_equal 4 [r setrange mykey 1 foo] - assert_equal "\000foo" [r get mykey] - } - - test "SETRANGE against string-encoded key" { - r set mykey "foo" - assert_equal 3 [r setrange mykey 0 b] - assert_equal "boo" [r get mykey] - - r set mykey "foo" - assert_equal 3 [r setrange mykey 0 ""] - assert_equal "foo" [r get mykey] - - r set mykey "foo" - assert_equal 3 [r setrange mykey 1 b] - assert_equal "fbo" [r get mykey] - - r set mykey "foo" - assert_equal 7 [r setrange mykey 4 bar] - assert_equal "foo\000bar" [r get mykey] - } - - test "SETRANGE against integer-encoded key" { - r set mykey 1234 - assert_encoding int mykey - assert_equal 4 [r setrange mykey 0 2] - assert_encoding raw mykey - assert_equal 2234 [r get mykey] - - # Shouldn't change encoding when nothing is set - r set mykey 1234 - assert_encoding int mykey - assert_equal 4 [r setrange mykey 0 ""] - assert_encoding int mykey - assert_equal 1234 [r get mykey] - - r set mykey 1234 - assert_encoding int mykey - assert_equal 4 [r setrange mykey 1 3] - assert_encoding raw mykey - assert_equal 1334 [r get mykey] - - r set mykey 1234 - assert_encoding int mykey - assert_equal 6 [r setrange mykey 5 2] - assert_encoding raw mykey - assert_equal "1234\0002" [r get mykey] - } - - test "SETRANGE against key with wrong type" { - r del mykey - r lpush mykey "foo" - assert_error "WRONGTYPE*" {r setrange mykey 0 bar} - } - - test "SETRANGE with out of range offset" { - r del mykey - assert_error "*maximum allowed size*" {r setrange mykey [expr 512*1024*1024-4] world} - - r set mykey "hello" - assert_error "*out of range*" {r setrange mykey -1 world} - assert_error "*maximum allowed size*" {r setrange mykey [expr 512*1024*1024-4] world} - } - - test "GETRANGE against non-existing key" { - r del mykey - assert_equal "" [r getrange mykey 0 -1] - } - - test "GETRANGE against string value" { - r set mykey "Hello World" - assert_equal "Hell" [r getrange mykey 0 3] - assert_equal "Hello World" [r getrange mykey 0 -1] - assert_equal "orld" [r getrange mykey -4 -1] - assert_equal "" [r getrange mykey 5 3] - assert_equal " World" [r getrange mykey 5 5000] - assert_equal "Hello World" [r getrange mykey -5000 10000] - } - - test "GETRANGE against integer-encoded value" { - r set mykey 1234 - assert_equal "123" [r getrange mykey 0 2] - assert_equal "1234" [r getrange mykey 0 -1] - assert_equal "234" [r getrange mykey -3 -1] - assert_equal "" [r getrange mykey 5 3] - assert_equal "4" [r getrange mykey 3 5000] - assert_equal "1234" [r getrange mykey -5000 10000] - } - - test "GETRANGE fuzzing" { - for {set i 0} {$i < 1000} {incr i} { - r set bin [set bin [randstring 0 1024 binary]] - set _start [set start [randomInt 1500]] - set _end [set end [randomInt 1500]] - if {$_start < 0} {set _start "end-[abs($_start)-1]"} - if {$_end < 0} {set _end "end-[abs($_end)-1]"} - assert_equal [string range $bin $_start $_end] [r getrange bin $start $end] - } - } - - test {Extended SET can detect syntax errors} { - set e {} - catch {r set foo bar non-existing-option} e - set e - } {*syntax*} - - test {Extended SET NX option} { - r del foo - set v1 [r set foo 1 nx] - set v2 [r set foo 2 nx] - list $v1 $v2 [r get foo] - } {OK {} 1} - - test {Extended SET XX option} { - r del foo - set v1 [r set foo 1 xx] - r set foo bar - set v2 [r set foo 2 xx] - list $v1 $v2 [r get foo] - } {{} OK 2} - - test {Extended SET EX option} { - r del foo - r set foo bar ex 10 - set ttl [r ttl foo] - assert {$ttl <= 10 && $ttl > 5} - } - - test {Extended SET PX option} { - r del foo - r set foo bar px 10000 - set ttl [r ttl foo] - assert {$ttl <= 10 && $ttl > 5} - } - - test {Extended SET using multiple options at once} { - r set foo val - assert {[r set foo bar xx px 10000] eq {OK}} - set ttl [r ttl foo] - assert {$ttl <= 10 && $ttl > 5} - } - - test {KEYS * two times with long key, Github issue #1208} { - r flushdb - r set dlskeriewrioeuwqoirueioqwrueoqwrueqw test - r keys * - r keys * - } {dlskeriewrioeuwqoirueioqwrueoqwrueqw} - - test {GETRANGE with huge ranges, Github issue #1844} { - r set foo bar - r getrange foo 0 4294967297 - } {bar} -} diff --git a/tools/pika_migrate/tests/unit/bitops.tcl b/tools/pika_migrate/tests/unit/bitops.tcl deleted file mode 100644 index 9751850ad4..0000000000 --- a/tools/pika_migrate/tests/unit/bitops.tcl +++ /dev/null @@ -1,341 +0,0 @@ -# Compare Redis commadns against Tcl implementations of the same commands. -proc count_bits s { - binary scan $s b* bits - string length [regsub -all {0} $bits {}] -} - -proc simulate_bit_op {op args} { - set maxlen 0 - set j 0 - set count [llength $args] - foreach a $args { - binary scan $a b* bits - set b($j) $bits - if {[string length $bits] > $maxlen} { - set maxlen [string length $bits] - } - incr j - } - for {set j 0} {$j < $count} {incr j} { - if {[string length $b($j)] < $maxlen} { - append b($j) [string repeat 0 [expr $maxlen-[string length $b($j)]]] - } - } - set out {} - for {set x 0} {$x < $maxlen} {incr x} { - set bit [string range $b(0) $x $x] - if {$op eq {not}} {set bit [expr {!$bit}]} - for {set j 1} {$j < $count} {incr j} { - set bit2 [string range $b($j) $x $x] - switch $op { - and {set bit [expr {$bit & $bit2}]} - or {set bit [expr {$bit | $bit2}]} - xor {set bit [expr {$bit ^ $bit2}]} - } - } - append out $bit - } - binary format b* $out -} - -start_server {tags {"bitops"}} { - test {BITCOUNT returns 0 against non existing key} { - r bitcount no-key - } 0 - - catch {unset num} - foreach vec [list "" "\xaa" "\x00\x00\xff" "foobar" "123"] { - incr num - test "BITCOUNT against test vector #$num" { - r set str $vec - assert {[r bitcount str] == [count_bits $vec]} - } - } - - test {BITCOUNT fuzzing without start/end} { - for {set j 0} {$j < 100} {incr j} { - set str [randstring 0 3000] - r set str $str - assert {[r bitcount str] == [count_bits $str]} - } - } - - test {BITCOUNT fuzzing with start/end} { - for {set j 0} {$j < 100} {incr j} { - set str [randstring 0 3000] - r set str $str - set l [string length $str] - set start [randomInt $l] - set end [randomInt $l] - if {$start > $end} { - lassign [list $end $start] start end - } - assert {[r bitcount str $start $end] == [count_bits [string range $str $start $end]]} - } - } - - test {BITCOUNT with start, end} { - r set s "foobar" - assert_equal [r bitcount s 0 -1] [count_bits "foobar"] - assert_equal [r bitcount s 1 -2] [count_bits "ooba"] - assert_equal [r bitcount s -2 1] [count_bits ""] - assert_equal [r bitcount s 0 1000] [count_bits "foobar"] - } - - test {BITCOUNT syntax error #1} { - catch {r bitcount s 0} e - set e - } {ERR*syntax*} - - test {BITCOUNT regression test for github issue #582} { - r del str - r setbit foo 0 1 - if {[catch {r bitcount foo 0 4294967296} e]} { - assert_match {*ERR*out of range*} $e - set _ 1 - } else { - set e - } - } {1} - - test {BITCOUNT misaligned prefix} { - r del str - r set str ab - r bitcount str 1 -1 - } {3} - - test {BITCOUNT misaligned prefix + full words + remainder} { - r del str - r set str __PPxxxxxxxxxxxxxxxxRR__ - r bitcount str 2 -3 - } {74} - - test {BITOP NOT (empty string)} { - r set s "" - r bitop not dest s - r get dest - } {} - - test {BITOP NOT (known string)} { - r set s "\xaa\x00\xff\x55" - r bitop not dest s - r get dest - } "\x55\xff\x00\xaa" - - test {BITOP where dest and target are the same key} { - r set s "\xaa\x00\xff\x55" - r bitop not s s - r get s - } "\x55\xff\x00\xaa" - - test {BITOP AND|OR|XOR don't change the string with single input key} { - r set a "\x01\x02\xff" - r bitop and res1 a - r bitop or res2 a - r bitop xor res3 a - list [r get res1] [r get res2] [r get res3] - } [list "\x01\x02\xff" "\x01\x02\xff" "\x01\x02\xff"] - - test {BITOP missing key is considered a stream of zero} { - r set a "\x01\x02\xff" - r bitop and res1 no-suck-key a - r bitop or res2 no-suck-key a no-such-key - r bitop xor res3 no-such-key a - list [r get res1] [r get res2] [r get res3] - } [list "\x00\x00\x00" "\x01\x02\xff" "\x01\x02\xff"] - - test {BITOP shorter keys are zero-padded to the key with max length} { - r set a "\x01\x02\xff\xff" - r set b "\x01\x02\xff" - r bitop and res1 a b - r bitop or res2 a b - r bitop xor res3 a b - list [r get res1] [r get res2] [r get res3] - } [list "\x01\x02\xff\x00" "\x01\x02\xff\xff" "\x00\x00\x00\xff"] - - foreach op {and or xor} { - test "BITOP $op fuzzing" { - for {set i 0} {$i < 10} {incr i} { - r flushall - set vec {} - set veckeys {} - set numvec [expr {[randomInt 10]+1}] - for {set j 0} {$j < $numvec} {incr j} { - set str [randstring 0 1000] - lappend vec $str - lappend veckeys vector_$j - r set vector_$j $str - } - r bitop $op target {*}$veckeys - assert_equal [r get target] [simulate_bit_op $op {*}$vec] - } - } - } - - test {BITOP NOT fuzzing} { - for {set i 0} {$i < 10} {incr i} { - r flushall - set str [randstring 0 1000] - r set str $str - r bitop not target str - assert_equal [r get target] [simulate_bit_op not $str] - } - } - - test {BITOP with integer encoded source objects} { - r set a 1 - r set b 2 - r bitop xor dest a b a - r get dest - } {2} - - test {BITOP with non string source key} { - r del c - r set a 1 - r set b 2 - r lpush c foo - catch {r bitop xor dest a b c d} e - set e - } {WRONGTYPE*} - - test {BITOP with empty string after non empty string (issue #529)} { - r flushdb - r set a "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - r bitop or x a b - } {32} - - test {BITPOS bit=0 with empty key returns 0} { - r del str - r bitpos str 0 - } {0} - - test {BITPOS bit=1 with empty key returns -1} { - r del str - r bitpos str 1 - } {-1} - - test {BITPOS bit=0 with string less than 1 word works} { - r set str "\xff\xf0\x00" - r bitpos str 0 - } {12} - - test {BITPOS bit=1 with string less than 1 word works} { - r set str "\x00\x0f\x00" - r bitpos str 1 - } {12} - - test {BITPOS bit=0 starting at unaligned address} { - r set str "\xff\xf0\x00" - r bitpos str 0 1 - } {12} - - test {BITPOS bit=1 starting at unaligned address} { - r set str "\x00\x0f\xff" - r bitpos str 1 1 - } {12} - - test {BITPOS bit=0 unaligned+full word+reminder} { - r del str - r set str "\xff\xff\xff" ; # Prefix - # Followed by two (or four in 32 bit systems) full words - r append str "\xff\xff\xff\xff\xff\xff\xff\xff" - r append str "\xff\xff\xff\xff\xff\xff\xff\xff" - r append str "\xff\xff\xff\xff\xff\xff\xff\xff" - # First zero bit. - r append str "\x0f" - assert {[r bitpos str 0] == 216} - assert {[r bitpos str 0 1] == 216} - assert {[r bitpos str 0 2] == 216} - assert {[r bitpos str 0 3] == 216} - assert {[r bitpos str 0 4] == 216} - assert {[r bitpos str 0 5] == 216} - assert {[r bitpos str 0 6] == 216} - assert {[r bitpos str 0 7] == 216} - assert {[r bitpos str 0 8] == 216} - } - - test {BITPOS bit=1 unaligned+full word+reminder} { - r del str - r set str "\x00\x00\x00" ; # Prefix - # Followed by two (or four in 32 bit systems) full words - r append str "\x00\x00\x00\x00\x00\x00\x00\x00" - r append str "\x00\x00\x00\x00\x00\x00\x00\x00" - r append str "\x00\x00\x00\x00\x00\x00\x00\x00" - # First zero bit. - r append str "\xf0" - assert {[r bitpos str 1] == 216} - assert {[r bitpos str 1 1] == 216} - assert {[r bitpos str 1 2] == 216} - assert {[r bitpos str 1 3] == 216} - assert {[r bitpos str 1 4] == 216} - assert {[r bitpos str 1 5] == 216} - assert {[r bitpos str 1 6] == 216} - assert {[r bitpos str 1 7] == 216} - assert {[r bitpos str 1 8] == 216} - } - - test {BITPOS bit=1 returns -1 if string is all 0 bits} { - r set str "" - for {set j 0} {$j < 20} {incr j} { - assert {[r bitpos str 1] == -1} - r append str "\x00" - } - } - - test {BITPOS bit=0 works with intervals} { - r set str "\x00\xff\x00" - assert {[r bitpos str 0 0 -1] == 0} - assert {[r bitpos str 0 1 -1] == 16} - assert {[r bitpos str 0 2 -1] == 16} - assert {[r bitpos str 0 2 200] == 16} - assert {[r bitpos str 0 1 1] == -1} - } - - test {BITPOS bit=1 works with intervals} { - r set str "\x00\xff\x00" - assert {[r bitpos str 1 0 -1] == 8} - assert {[r bitpos str 1 1 -1] == 8} - assert {[r bitpos str 1 2 -1] == -1} - assert {[r bitpos str 1 2 200] == -1} - assert {[r bitpos str 1 1 1] == 8} - } - - test {BITPOS bit=0 changes behavior if end is given} { - r set str "\xff\xff\xff" - assert {[r bitpos str 0] == 24} - assert {[r bitpos str 0 0] == 24} - assert {[r bitpos str 0 0 -1] == -1} - } - - test {BITPOS bit=1 fuzzy testing using SETBIT} { - r del str - set max 524288; # 64k - set first_one_pos -1 - for {set j 0} {$j < 1000} {incr j} { - assert {[r bitpos str 1] == $first_one_pos} - set pos [randomInt $max] - r setbit str $pos 1 - if {$first_one_pos == -1 || $first_one_pos > $pos} { - # Update the position of the first 1 bit in the array - # if the bit we set is on the left of the previous one. - set first_one_pos $pos - } - } - } - - test {BITPOS bit=0 fuzzy testing using SETBIT} { - set max 524288; # 64k - set first_zero_pos $max - r set str [string repeat "\xff" [expr $max/8]] - for {set j 0} {$j < 1000} {incr j} { - assert {[r bitpos str 0] == $first_zero_pos} - set pos [randomInt $max] - r setbit str $pos 0 - if {$first_zero_pos > $pos} { - # Update the position of the first 0 bit in the array - # if the bit we clear is on the left of the previous one. - set first_zero_pos $pos - } - } - } -} diff --git a/tools/pika_migrate/tests/unit/dump.tcl b/tools/pika_migrate/tests/unit/dump.tcl deleted file mode 100644 index b79c3ba9d0..0000000000 --- a/tools/pika_migrate/tests/unit/dump.tcl +++ /dev/null @@ -1,142 +0,0 @@ -start_server {tags {"dump"}} { - test {DUMP / RESTORE are able to serialize / unserialize a simple key} { - r set foo bar - set encoded [r dump foo] - r del foo - list [r exists foo] [r restore foo 0 $encoded] [r ttl foo] [r get foo] - } {0 OK -1 bar} - - test {RESTORE can set an arbitrary expire to the materialized key} { - r set foo bar - set encoded [r dump foo] - r del foo - r restore foo 5000 $encoded - set ttl [r pttl foo] - assert {$ttl >= 3000 && $ttl <= 5000} - r get foo - } {bar} - - test {RESTORE can set an expire that overflows a 32 bit integer} { - r set foo bar - set encoded [r dump foo] - r del foo - r restore foo 2569591501 $encoded - set ttl [r pttl foo] - assert {$ttl >= (2569591501-3000) && $ttl <= 2569591501} - r get foo - } {bar} - - test {RESTORE returns an error of the key already exists} { - r set foo bar - set e {} - catch {r restore foo 0 "..."} e - set e - } {*is busy*} - - test {DUMP of non existing key returns nil} { - r dump nonexisting_key - } {} - - test {MIGRATE is able to migrate a key between two instances} { - set first [srv 0 client] - r set key "Some Value" - start_server {tags {"repl"}} { - set second [srv 0 client] - set second_host [srv 0 host] - set second_port [srv 0 port] - - assert {[$first exists key] == 1} - assert {[$second exists key] == 0} - set ret [r -1 migrate $second_host $second_port key 9 5000] - assert {$ret eq {OK}} - assert {[$first exists key] == 0} - assert {[$second exists key] == 1} - assert {[$second get key] eq {Some Value}} - assert {[$second ttl key] == -1} - } - } - - test {MIGRATE propagates TTL correctly} { - set first [srv 0 client] - r set key "Some Value" - start_server {tags {"repl"}} { - set second [srv 0 client] - set second_host [srv 0 host] - set second_port [srv 0 port] - - assert {[$first exists key] == 1} - assert {[$second exists key] == 0} - $first expire key 10 - set ret [r -1 migrate $second_host $second_port key 9 5000] - assert {$ret eq {OK}} - assert {[$first exists key] == 0} - assert {[$second exists key] == 1} - assert {[$second get key] eq {Some Value}} - assert {[$second ttl key] >= 7 && [$second ttl key] <= 10} - } - } - - test {MIGRATE can correctly transfer large values} { - set first [srv 0 client] - r del key - for {set j 0} {$j < 5000} {incr j} { - r rpush key 1 2 3 4 5 6 7 8 9 10 - r rpush key "item 1" "item 2" "item 3" "item 4" "item 5" \ - "item 6" "item 7" "item 8" "item 9" "item 10" - } - assert {[string length [r dump key]] > (1024*64)} - start_server {tags {"repl"}} { - set second [srv 0 client] - set second_host [srv 0 host] - set second_port [srv 0 port] - - assert {[$first exists key] == 1} - assert {[$second exists key] == 0} - set ret [r -1 migrate $second_host $second_port key 9 10000] - assert {$ret eq {OK}} - assert {[$first exists key] == 0} - assert {[$second exists key] == 1} - assert {[$second ttl key] == -1} - assert {[$second llen key] == 5000*20} - } - } - - test {MIGRATE can correctly transfer hashes} { - set first [srv 0 client] - r del key - r hmset key field1 "item 1" field2 "item 2" field3 "item 3" \ - field4 "item 4" field5 "item 5" field6 "item 6" - start_server {tags {"repl"}} { - set second [srv 0 client] - set second_host [srv 0 host] - set second_port [srv 0 port] - - assert {[$first exists key] == 1} - assert {[$second exists key] == 0} - set ret [r -1 migrate $second_host $second_port key 9 10000] - assert {$ret eq {OK}} - assert {[$first exists key] == 0} - assert {[$second exists key] == 1} - assert {[$second ttl key] == -1} - } - } - - test {MIGRATE timeout actually works} { - set first [srv 0 client] - r set key "Some Value" - start_server {tags {"repl"}} { - set second [srv 0 client] - set second_host [srv 0 host] - set second_port [srv 0 port] - - assert {[$first exists key] == 1} - assert {[$second exists key] == 0} - - set rd [redis_deferring_client] - $rd debug sleep 5.0 ; # Make second server unable to reply. - set e {} - catch {r -1 migrate $second_host $second_port key 9 1000} e - assert_match {IOERR*} $e - } - } -} diff --git a/tools/pika_migrate/tests/unit/expire.tcl b/tools/pika_migrate/tests/unit/expire.tcl deleted file mode 100644 index ff3dacb337..0000000000 --- a/tools/pika_migrate/tests/unit/expire.tcl +++ /dev/null @@ -1,201 +0,0 @@ -start_server {tags {"expire"}} { - test {EXPIRE - set timeouts multiple times} { - r set x foobar - set v1 [r expire x 5] - set v2 [r ttl x] - set v3 [r expire x 10] - set v4 [r ttl x] - r expire x 2 - list $v1 $v2 $v3 $v4 - } {1 [45] 1 10} - - test {EXPIRE - It should be still possible to read 'x'} { - r get x - } {foobar} - - tags {"slow"} { - test {EXPIRE - After 2.1 seconds the key should no longer be here} { - after 2100 - list [r get x] [r exists x] - } {{} 0} - } - - test {EXPIRE - write on expire should work} { - r del x - r lpush x foo - r expire x 1000 - r lpush x bar - r lrange x 0 -1 - } {bar foo} - - test {EXPIREAT - Check for EXPIRE alike behavior} { - r del x - r set x foo - r expireat x [expr [clock seconds]+15] - r ttl x - } {1[345]} - - test {SETEX - Set + Expire combo operation. Check for TTL} { - r setex x 12 test - r ttl x - } {1[012]} - - test {SETEX - Check value} { - r get x - } {test} - - test {SETEX - Overwrite old key} { - r setex y 1 foo - r get y - } {foo} - - tags {"slow"} { - test {SETEX - Wait for the key to expire} { - after 1100 - r get y - } {} - } - - test {SETEX - Wrong time parameter} { - catch {r setex z -10 foo} e - set _ $e - } {*invalid expire*} - - test {PERSIST can undo an EXPIRE} { - r set x foo - r expire x 50 - list [r ttl x] [r persist x] [r ttl x] [r get x] - } {50 1 -1 foo} - - test {PERSIST returns 0 against non existing or non volatile keys} { - r set x foo - list [r persist foo] [r persist nokeyatall] - } {0 0} - - test {EXPIRE pricision is now the millisecond} { - # This test is very likely to do a false positive if the - # server is under pressure, so if it does not work give it a few more - # chances. - for {set j 0} {$j < 3} {incr j} { - r del x - r setex x 1 somevalue - after 900 - set a [r get x] - after 1100 - set b [r get x] - if {$a eq {somevalue} && $b eq {}} break - } - list $a $b - } {somevalue {}} - - test {PEXPIRE/PSETEX/PEXPIREAT can set sub-second expires} { - # This test is very likely to do a false positive if the - # server is under pressure, so if it does not work give it a few more - # chances. - for {set j 0} {$j < 3} {incr j} { - r del x y z - r psetex x 100 somevalue - after 80 - set a [r get x] - after 120 - set b [r get x] - - r set x somevalue - r pexpire x 100 - after 80 - set c [r get x] - after 120 - set d [r get x] - - r set x somevalue - r pexpireat x [expr ([clock seconds]*1000)+100] - after 80 - set e [r get x] - after 120 - set f [r get x] - - if {$a eq {somevalue} && $b eq {} && - $c eq {somevalue} && $d eq {} && - $e eq {somevalue} && $f eq {}} break - } - list $a $b - } {somevalue {}} - - test {TTL returns tiem to live in seconds} { - r del x - r setex x 10 somevalue - set ttl [r ttl x] - assert {$ttl > 8 && $ttl <= 10} - } - - test {PTTL returns time to live in milliseconds} { - r del x - r setex x 1 somevalue - set ttl [r pttl x] - assert {$ttl > 900 && $ttl <= 1000} - } - - test {TTL / PTTL return -1 if key has no expire} { - r del x - r set x hello - list [r ttl x] [r pttl x] - } {-1 -1} - - test {TTL / PTTL return -2 if key does not exit} { - r del x - list [r ttl x] [r pttl x] - } {-2 -2} - - test {Redis should actively expire keys incrementally} { - r flushdb - r psetex key1 500 a - r psetex key2 500 a - r psetex key3 500 a - set size1 [r dbsize] - # Redis expires random keys ten times every second so we are - # fairly sure that all the three keys should be evicted after - # one second. - after 1000 - set size2 [r dbsize] - list $size1 $size2 - } {3 0} - - test {Redis should lazy expire keys} { - r flushdb - r debug set-active-expire 0 - r psetex key1 500 a - r psetex key2 500 a - r psetex key3 500 a - set size1 [r dbsize] - # Redis expires random keys ten times every second so we are - # fairly sure that all the three keys should be evicted after - # one second. - after 1000 - set size2 [r dbsize] - r mget key1 key2 key3 - set size3 [r dbsize] - r debug set-active-expire 1 - list $size1 $size2 $size3 - } {3 3 0} - - test {EXPIRE should not resurrect keys (issue #1026)} { - r debug set-active-expire 0 - r set foo bar - r pexpire foo 500 - after 1000 - r expire foo 10 - r debug set-active-expire 1 - r exists foo - } {0} - - test {5 keys in, 5 keys out} { - r flushdb - r set a c - r expire a 5 - r set t c - r set e c - r set s c - r set foo b - lsort [r keys *] - } {a e foo s t} -} diff --git a/tools/pika_migrate/tests/unit/geo.tcl b/tools/pika_migrate/tests/unit/geo.tcl deleted file mode 100644 index 7ed8710980..0000000000 --- a/tools/pika_migrate/tests/unit/geo.tcl +++ /dev/null @@ -1,311 +0,0 @@ -# Helper functions to simulate search-in-radius in the Tcl side in order to -# verify the Redis implementation with a fuzzy test. -proc geo_degrad deg {expr {$deg*atan(1)*8/360}} - -proc geo_distance {lon1d lat1d lon2d lat2d} { - set lon1r [geo_degrad $lon1d] - set lat1r [geo_degrad $lat1d] - set lon2r [geo_degrad $lon2d] - set lat2r [geo_degrad $lat2d] - set v [expr {sin(($lon2r - $lon1r) / 2)}] - set u [expr {sin(($lat2r - $lat1r) / 2)}] - expr {2.0 * 6372797.560856 * \ - asin(sqrt($u * $u + cos($lat1r) * cos($lat2r) * $v * $v))} -} - -proc geo_random_point {lonvar latvar} { - upvar 1 $lonvar lon - upvar 1 $latvar lat - # Note that the actual latitude limit should be -85 to +85, we restrict - # the test to -70 to +70 since in this range the algorithm is more precise - # while outside this range occasionally some element may be missing. - set lon [expr {-180 + rand()*360}] - set lat [expr {-70 + rand()*140}] -} - -# Return elements non common to both the lists. -# This code is from http://wiki.tcl.tk/15489 -proc compare_lists {List1 List2} { - set DiffList {} - foreach Item $List1 { - if {[lsearch -exact $List2 $Item] == -1} { - lappend DiffList $Item - } - } - foreach Item $List2 { - if {[lsearch -exact $List1 $Item] == -1} { - if {[lsearch -exact $DiffList $Item] == -1} { - lappend DiffList $Item - } - } - } - return $DiffList -} - -# The following list represents sets of random seed, search position -# and radius that caused bugs in the past. It is used by the randomized -# test later as a starting point. When the regression vectors are scanned -# the code reverts to using random data. -# -# The format is: seed km lon lat -set regression_vectors { - {1482225976969 7083 81.634948934258375 30.561509253718668} - {1482340074151 5416 -70.863281847379767 -46.347003465679947} - {1499014685896 6064 -89.818768962202014 -40.463868561416803} - {1412 156 149.29737817929004 15.95807862745508} - {441574 143 59.235461856813856 66.269555127373678} - {160645 187 -101.88575239939883 49.061997951502917} - {750269 154 -90.187939661642517 66.615930412251487} - {342880 145 163.03472387745728 64.012747720821181} - {729955 143 137.86663517256579 63.986745399416776} - {939895 151 59.149620271823181 65.204186651485145} - {1412 156 149.29737817929004 15.95807862745508} - {564862 149 84.062063109158544 -65.685403922426232} -} -set rv_idx 0 - -start_server {tags {"geo"}} { - test {GEOADD create} { - r geoadd nyc -73.9454966 40.747533 "lic market" - } {1} - - test {GEOADD update} { - r geoadd nyc -73.9454966 40.747533 "lic market" - } {0} - - test {GEOADD invalid coordinates} { - catch { - r geoadd nyc -73.9454966 40.747533 "lic market" \ - foo bar "luck market" - } err - set err - } {*valid*} - - test {GEOADD multi add} { - r geoadd nyc -73.9733487 40.7648057 "central park n/q/r" -73.9903085 40.7362513 "union square" -74.0131604 40.7126674 "wtc one" -73.7858139 40.6428986 "jfk" -73.9375699 40.7498929 "q4" -73.9564142 40.7480973 4545 - } {6} - - test {Check geoset values} { - r zrange nyc 0 -1 withscores - } {{wtc one} 1791873972053020 {union square} 1791875485187452 {central park n/q/r} 1791875761332224 4545 1791875796750882 {lic market} 1791875804419201 q4 1791875830079666 jfk 1791895905559723} - - test {GEORADIUS simple (sorted)} { - r georadius nyc -73.9798091 40.7598464 3 km asc - } {{central park n/q/r} 4545 {union square}} - - test {GEORADIUS withdist (sorted)} { - r georadius nyc -73.9798091 40.7598464 3 km withdist asc - } {{{central park n/q/r} 0.7750} {4545 2.3651} {{union square} 2.7697}} - - test {GEORADIUS with COUNT} { - r georadius nyc -73.9798091 40.7598464 10 km COUNT 3 - } {{wtc one} {union square} {central park n/q/r}} - - test {GEORADIUS with COUNT but missing integer argument} { - catch {r georadius nyc -73.9798091 40.7598464 10 km COUNT} e - set e - } {ERR*syntax*} - - test {GEORADIUS with COUNT DESC} { - r georadius nyc -73.9798091 40.7598464 10 km COUNT 2 DESC - } {{wtc one} q4} - - test {GEORADIUS HUGE, issue #2767} { - r geoadd users -47.271613776683807 -54.534504198047678 user_000000 - llength [r GEORADIUS users 0 0 50000 km WITHCOORD] - } {1} - - test {GEORADIUSBYMEMBER simple (sorted)} { - r georadiusbymember nyc "wtc one" 7 km - } {{wtc one} {union square} {central park n/q/r} 4545 {lic market}} - - test {GEORADIUSBYMEMBER withdist (sorted)} { - r georadiusbymember nyc "wtc one" 7 km withdist - } {{{wtc one} 0.0000} {{union square} 3.2544} {{central park n/q/r} 6.7000} {4545 6.1975} {{lic market} 6.8969}} - - test {GEOHASH is able to return geohash strings} { - # Example from Wikipedia. - r del points - r geoadd points -5.6 42.6 test - lindex [r geohash points test] 0 - } {ezs42e44yx0} - - test {GEOPOS simple} { - r del points - r geoadd points 10 20 a 30 40 b - lassign [lindex [r geopos points a b] 0] x1 y1 - lassign [lindex [r geopos points a b] 1] x2 y2 - assert {abs($x1 - 10) < 0.001} - assert {abs($y1 - 20) < 0.001} - assert {abs($x2 - 30) < 0.001} - assert {abs($y2 - 40) < 0.001} - } - - test {GEOPOS missing element} { - r del points - r geoadd points 10 20 a 30 40 b - lindex [r geopos points a x b] 1 - } {} - - test {GEODIST simple & unit} { - r del points - r geoadd points 13.361389 38.115556 "Palermo" \ - 15.087269 37.502669 "Catania" - set m [r geodist points Palermo Catania] - assert {$m > 166274 && $m < 166275} - set km [r geodist points Palermo Catania km] - assert {$km > 166.2 && $km < 166.3} - } - - test {GEODIST missing elements} { - r del points - r geoadd points 13.361389 38.115556 "Palermo" \ - 15.087269 37.502669 "Catania" - set m [r geodist points Palermo Agrigento] - assert {$m eq {}} - set m [r geodist points Ragusa Agrigento] - assert {$m eq {}} - set m [r geodist empty_key Palermo Catania] - assert {$m eq {}} - } - - test {GEORADIUS STORE option: syntax error} { - r del points - r geoadd points 13.361389 38.115556 "Palermo" \ - 15.087269 37.502669 "Catania" - catch {r georadius points 13.361389 38.115556 50 km store} e - set e - } {*ERR*syntax*} - - test {GEORANGE STORE option: incompatible options} { - r del points - r geoadd points 13.361389 38.115556 "Palermo" \ - 15.087269 37.502669 "Catania" - catch {r georadius points 13.361389 38.115556 50 km store points2 withdist} e - assert_match {*ERR*} $e - catch {r georadius points 13.361389 38.115556 50 km store points2 withhash} e - assert_match {*ERR*} $e - catch {r georadius points 13.361389 38.115556 50 km store points2 withcoords} e - assert_match {*ERR*} $e - } - - test {GEORANGE STORE option: plain usage} { - r del points - r geoadd points 13.361389 38.115556 "Palermo" \ - 15.087269 37.502669 "Catania" - r georadius points 13.361389 38.115556 500 km store points2 - assert_equal [r zrange points 0 -1] [r zrange points2 0 -1] - } - - test {GEORANGE STOREDIST option: plain usage} { - r del points - r geoadd points 13.361389 38.115556 "Palermo" \ - 15.087269 37.502669 "Catania" - r georadius points 13.361389 38.115556 500 km storedist points2 - set res [r zrange points2 0 -1 withscores] - assert {[lindex $res 1] < 1} - assert {[lindex $res 3] > 166} - assert {[lindex $res 3] < 167} - } - - test {GEORANGE STOREDIST option: COUNT ASC and DESC} { - r del points - r geoadd points 13.361389 38.115556 "Palermo" \ - 15.087269 37.502669 "Catania" - r georadius points 13.361389 38.115556 500 km storedist points2 asc count 1 - assert {[r zcard points2] == 1} - set res [r zrange points2 0 -1 withscores] - assert {[lindex $res 0] eq "Palermo"} - - r georadius points 13.361389 38.115556 500 km storedist points2 desc count 1 - assert {[r zcard points2] == 1} - set res [r zrange points2 0 -1 withscores] - assert {[lindex $res 0] eq "Catania"} - } - - test {GEOADD + GEORANGE randomized test} { - set attempt 30 - while {[incr attempt -1]} { - set rv [lindex $regression_vectors $rv_idx] - incr rv_idx - - unset -nocomplain debuginfo - set srand_seed [clock milliseconds] - if {$rv ne {}} {set srand_seed [lindex $rv 0]} - lappend debuginfo "srand_seed is $srand_seed" - expr {srand($srand_seed)} ; # If you need a reproducible run - r del mypoints - - if {[randomInt 10] == 0} { - # From time to time use very big radiuses - set radius_km [expr {[randomInt 50000]+10}] - } else { - # Normally use a few - ~200km radiuses to stress - # test the code the most in edge cases. - set radius_km [expr {[randomInt 200]+10}] - } - if {$rv ne {}} {set radius_km [lindex $rv 1]} - set radius_m [expr {$radius_km*1000}] - geo_random_point search_lon search_lat - if {$rv ne {}} { - set search_lon [lindex $rv 2] - set search_lat [lindex $rv 3] - } - lappend debuginfo "Search area: $search_lon,$search_lat $radius_km km" - set tcl_result {} - set argv {} - for {set j 0} {$j < 20000} {incr j} { - geo_random_point lon lat - lappend argv $lon $lat "place:$j" - set distance [geo_distance $lon $lat $search_lon $search_lat] - if {$distance < $radius_m} { - lappend tcl_result "place:$j" - } - lappend debuginfo "place:$j $lon $lat [expr {$distance/1000}] km" - } - r geoadd mypoints {*}$argv - set res [lsort [r georadius mypoints $search_lon $search_lat $radius_km km]] - set res2 [lsort $tcl_result] - set test_result OK - - if {$res != $res2} { - set rounding_errors 0 - set diff [compare_lists $res $res2] - foreach place $diff { - set mydist [geo_distance $lon $lat $search_lon $search_lat] - set mydist [expr $mydist/1000] - if {($mydist / $radius_km) > 0.999} {incr rounding_errors} - } - # Make sure this is a real error and not a rounidng issue. - if {[llength $diff] == $rounding_errors} { - set res $res2; # Error silenced - } - } - - if {$res != $res2} { - set diff [compare_lists $res $res2] - puts "*** Possible problem in GEO radius query ***" - puts "Redis: $res" - puts "Tcl : $res2" - puts "Diff : $diff" - puts [join $debuginfo "\n"] - foreach place $diff { - if {[lsearch -exact $res2 $place] != -1} { - set where "(only in Tcl)" - } else { - set where "(only in Redis)" - } - lassign [lindex [r geopos mypoints $place] 0] lon lat - set mydist [geo_distance $lon $lat $search_lon $search_lat] - set mydist [expr $mydist/1000] - puts "$place -> [r geopos mypoints $place] $mydist $where" - if {($mydist / $radius_km) > 0.999} {incr rounding_errors} - } - set test_result FAIL - } - unset -nocomplain debuginfo - if {$test_result ne {OK}} break - } - set test_result - } {OK} -} diff --git a/tools/pika_migrate/tests/unit/hyperloglog.tcl b/tools/pika_migrate/tests/unit/hyperloglog.tcl deleted file mode 100755 index 6d614bb156..0000000000 --- a/tools/pika_migrate/tests/unit/hyperloglog.tcl +++ /dev/null @@ -1,250 +0,0 @@ -start_server {tags {"hll"}} { -# test {HyperLogLog self test passes} { -# catch {r pfselftest} e -# set e -# } {OK} - - test {PFADD without arguments creates an HLL value} { - r pfadd hll - r exists hll - } {1} - - test {Approximated cardinality after creation is zero} { - r pfcount hll - } {0} - - test {PFADD returns 1 when at least 1 reg was modified} { - r pfadd hll a b c - } {1} - - test {PFADD returns 0 when no reg was modified} { - r pfadd hll a b c - } {0} - - test {PFADD works with empty string (regression)} { - r pfadd hll "" - } - - # Note that the self test stresses much better the - # cardinality estimation error. We are testing just the - # command implementation itself here. - test {PFCOUNT returns approximated cardinality of set} { - r del hll - set res {} - r pfadd hll 1 2 3 4 5 - lappend res [r pfcount hll] - # Call it again to test cached value invalidation. - r pfadd hll 6 7 8 8 9 10 - lappend res [r pfcount hll] - set res - } {5 10} - -# test {HyperLogLogs are promote from sparse to dense} { -# r del hll -# r config set hll-sparse-max-bytes 3000 -# set n 0 -# while {$n < 100000} { -# set elements {} -# for {set j 0} {$j < 100} {incr j} {lappend elements [expr rand()]} -# incr n 100 -# r pfadd hll {*}$elements -# set card [r pfcount hll] -# set err [expr {abs($card-$n)}] -# assert {$err < (double($card)/100)*5} -# if {$n < 1000} { -# assert {[r pfdebug encoding hll] eq {sparse}} -# } elseif {$n > 10000} { -# assert {[r pfdebug encoding hll] eq {dense}} -# } -# } -# } - -# test {HyperLogLog sparse encoding stress test} { -# for {set x 0} {$x < 1000} {incr x} { -# r del hll1 hll2 -# set numele [randomInt 100] -# set elements {} -# for {set j 0} {$j < $numele} {incr j} { -# lappend elements [expr rand()] -# } - # Force dense representation of hll2 -# r pfadd hll2 -# r pfdebug todense hll2 -# r pfadd hll1 {*}$elements -# r pfadd hll2 {*}$elements -# assert {[r pfdebug encoding hll1] eq {sparse}} -# assert {[r pfdebug encoding hll2] eq {dense}} - # Cardinality estimated should match exactly. -# assert {[r pfcount hll1] eq [r pfcount hll2]} -# } -# } - -# test {Corrupted sparse HyperLogLogs are detected: Additionl at tail} { -# r del hll -# r pfadd hll a b c -# r append hll "hello" -# set e {} -# catch {r pfcount hll} e -# set e -# } {*INVALIDOBJ*} - -# test {Corrupted sparse HyperLogLogs are detected: Broken magic} { -# r del hll -# r pfadd hll a b c -# r setrange hll 0 "0123" -# set e {} -# catch {r pfcount hll} e -# set e -# } {*WRONGTYPE*} - -# test {Corrupted sparse HyperLogLogs are detected: Invalid encoding} { -# r del hll -# r pfadd hll a b c -# r setrange hll 4 "x" -# set e {} -# catch {r pfcount hll} e -# set e -# } {*WRONGTYPE*} - -# test {Corrupted dense HyperLogLogs are detected: Wrong length} { -# r del hll -# r pfadd hll a b c -# r setrange hll 4 "\x00" -# set e {} -# catch {r pfcount hll} e -# set e -# } {*WRONGTYPE*} - -# test {PFADD, PFCOUNT, PFMERGE type checking works} { -# r set foo bar -# catch {r pfadd foo 1} e -# assert_match {*WRONGTYPE*} $e -# catch {r pfcount foo} e -# assert_match {*WRONGTYPE*} $e -# catch {r pfmerge bar foo} e -# assert_match {*WRONGTYPE*} $e -# catch {r pfmerge foo bar} e -# assert_match {*WRONGTYPE*} $e -# } - - test {PFMERGE results on the cardinality of union of sets} { - r del hll hll1 hll2 hll3 - r pfadd hll1 a b c - r pfadd hll2 b c d - r pfadd hll3 c d e - r pfmerge hll hll1 hll2 hll3 - r pfcount hll - } {5} - - test {PFCOUNT multiple-keys merge returns cardinality of union} { - r del hll1 hll2 hll3 - for {set x 1} {$x < 100000} {incr x} { - # Force dense representation of hll2 - r pfadd hll1 "foo-$x" - r pfadd hll2 "bar-$x" - r pfadd hll3 "zap-$x" - - set card [r pfcount hll1 hll2 hll3] - set realcard [expr {$x*3}] - set err [expr {abs($card-$realcard)}] - assert {$err < (double($card)/100)*5} - } - } - - test {HYPERLOGLOG press test: 5w, 10w, 15w, 20w, 30w, 50w, 100w} { - r del hll1 - for {set x 1} {$x <= 1000000} {incr x} { - r pfadd hll1 "foo-$x" - if {$x == 50000} { - set card [r pfcount hll1] - set realcard [expr {$x*1}] - set err [expr {abs($card-$realcard)}] - - set d_err [expr {$err * 1.0}] - set d_realcard [expr {$realcard * 1.0}] - set err_precentage [expr {double($d_err / $d_realcard)}] - puts "$x error rate: $err_precentage" - assert {$err < $realcard * 0.01} - } - if {$x == 100000} { - set card [r pfcount hll1] - set realcard [expr {$x*1}] - set err [expr {abs($card-$realcard)}] - - set d_err [expr {$err * 1.0}] - set d_realcard [expr {$realcard * 1.0}] - set err_precentage [expr {double($d_err / $d_realcard)}] - puts "$x error rate: $err_precentage" - assert {$err < $realcard * 0.01} - } - if {$x == 150000} { - set card [r pfcount hll1] - set realcard [expr {$x*1}] - set err [expr {abs($card-$realcard)}] - - set d_err [expr {$err * 1.0}] - set d_realcard [expr {$realcard * 1.0}] - set err_precentage [expr {double($d_err / $d_realcard)}] - puts "$x error rate: $err_precentage" - assert {$err < $realcard * 0.01} - } - if {$x == 300000} { - set card [r pfcount hll1] - set realcard [expr {$x*1}] - set err [expr {abs($card-$realcard)}] - - set d_err [expr {$err * 1.0}] - set d_realcard [expr {$realcard * 1.0}] - set err_precentage [expr {double($d_err / $d_realcard)}] - puts "$x error rate: $err_precentage" - assert {$err < $realcard * 0.01} - } - if {$x == 500000} { - set card [r pfcount hll1] - set realcard [expr {$x*1}] - set err [expr {abs($card-$realcard)}] - - set d_err [expr {$err * 1.0}] - set d_realcard [expr {$realcard * 1.0}] - set err_precentage [expr {double($d_err / $d_realcard)}] - puts "$x error rate: $err_precentage" - assert {$err < $realcard * 0.01} - } - if {$x == 1000000} { - set card [r pfcount hll1] - set realcard [expr {$x*1}] - set err [expr {abs($card-$realcard)}] - - set d_err [expr {$err * 1.0}] - set d_realcard [expr {$realcard * 1.0}] - set err_precentage [expr {double($d_err / $d_realcard)}] - puts "$x error rate: $err_precentage" - assert {$err < $realcard * 0.03} - } - } - } - -# test {PFDEBUG GETREG returns the HyperLogLog raw registers} { -# r del hll -# r pfadd hll 1 2 3 -# llength [r pfdebug getreg hll] -# } {16384} - - -# test {PFDEBUG GETREG returns the HyperLogLog raw registers} { -# r del hll -# r pfadd hll 1 2 3 -# llength [r pfdebug getreg hll] -# } {16384} - -# test {PFADD / PFCOUNT cache invalidation works} { -# r del hll -# r pfadd hll a b c -# r pfcount hll -# assert {[r getrange hll 15 15] eq "\x00"} -# r pfadd hll a b c -# assert {[r getrange hll 15 15] eq "\x00"} -# r pfadd hll 1 2 3 -# assert {[r getrange hll 15 15] eq "\x80"} -# } -} diff --git a/tools/pika_migrate/tests/unit/introspection.tcl b/tools/pika_migrate/tests/unit/introspection.tcl deleted file mode 100644 index 342bb939a8..0000000000 --- a/tools/pika_migrate/tests/unit/introspection.tcl +++ /dev/null @@ -1,59 +0,0 @@ -start_server {tags {"introspection"}} { - test {CLIENT LIST} { - r client list - } {*addr=*:* fd=* age=* idle=* flags=N db=9 sub=0 psub=0 multi=-1 qbuf=0 qbuf-free=* obl=0 oll=0 omem=0 events=r cmd=client*} - - test {MONITOR can log executed commands} { - set rd [redis_deferring_client] - $rd monitor - r set foo bar - r get foo - list [$rd read] [$rd read] [$rd read] - } {*OK*"set" "foo"*"get" "foo"*} - - test {MONITOR can log commands issued by the scripting engine} { - set rd [redis_deferring_client] - $rd monitor - r eval {redis.call('set',KEYS[1],ARGV[1])} 1 foo bar - $rd read ;# Discard the OK - assert_match {*eval*} [$rd read] - assert_match {*lua*"set"*"foo"*"bar"*} [$rd read] - } - - test {CLIENT GETNAME should return NIL if name is not assigned} { - r client getname - } {} - - test {CLIENT LIST shows empty fields for unassigned names} { - r client list - } {*name= *} - - test {CLIENT SETNAME does not accept spaces} { - catch {r client setname "foo bar"} e - set e - } {ERR*} - - test {CLIENT SETNAME can assign a name to this connection} { - assert_equal [r client setname myname] {OK} - r client list - } {*name=myname*} - - test {CLIENT SETNAME can change the name of an existing connection} { - assert_equal [r client setname someothername] {OK} - r client list - } {*name=someothername*} - - test {After CLIENT SETNAME, connection can still be closed} { - set rd [redis_deferring_client] - $rd client setname foobar - assert_equal [$rd read] "OK" - assert_match {*foobar*} [r client list] - $rd close - # Now the client should no longer be listed - wait_for_condition 50 100 { - [string match {*foobar*} [r client list]] == 0 - } else { - fail "Client still listed in CLIENT LIST after SETNAME." - } - } -} diff --git a/tools/pika_migrate/tests/unit/keys.tcl b/tools/pika_migrate/tests/unit/keys.tcl deleted file mode 100644 index cb62444f3f..0000000000 --- a/tools/pika_migrate/tests/unit/keys.tcl +++ /dev/null @@ -1,54 +0,0 @@ -start_server {tags {"keys"}} { - test {KEYS with pattern} { - foreach key {key_x key_y key_z foo_a foo_b foo_c} { - r set $key hello - } - assert_equal {foo_a foo_b foo_c} [r keys foo*] - assert_equal {foo_a foo_b foo_c} [r keys f*] - assert_equal {foo_a foo_b foo_c} [r keys f*o*] - } - - test {KEYS to get all keys} { - lsort [r keys *] - } {foo_a foo_b foo_c key_x key_y key_z} - - test {KEYS select by type} { - foreach key {key_x key_y key_z foo_a foo_b foo_c} { - r del $key - } - r set kv_1 value - r set kv_2 value - r hset hash_1 hash_field 1 - r hset hash_2 hash_field 1 - r lpush list_1 value - r lpush list_2 value - r zadd zset_1 1 "a" - r zadd zset_2 1 "a" - r sadd set_1 "a" - r sadd set_2 "a" - assert_equal {kv_1 kv_2} [r keys * string] - assert_equal {hash_1 hash_2} [r keys * hash] - assert_equal {list_1 list_2} [r keys * list] - assert_equal {zset_1 zset_2} [r keys * zset] - assert_equal {set_1 set_2} [r keys * set] - assert_equal {kv_1 kv_2 hash_1 hash_2 zset_1 zset_2 set_1 set_2 list_1 list_2} [r keys *] - assert_equal {kv_1 kv_2} [r keys * STRING] - assert_equal {hash_1 hash_2} [r keys * HASH] - assert_equal {list_1 list_2} [r keys * LIST] - assert_equal {zset_1 zset_2} [r keys * ZSET] - assert_equal {set_1 set_2} [r keys * SET] - } - - test {KEYS syntax error} { - catch {r keys * a} e1 - catch {r keys * strings} e2 - catch {r keys * c d} e3 - catch {r keys} e4 - catch {r keys * set zset} e5 - assert_equal {ERR syntax error} [set e1] - assert_equal {ERR syntax error} [set e2] - assert_equal {ERR syntax error} [set e3] - assert_equal {ERR wrong number of arguments for 'keys' command} [set e4] - assert_equal {ERR syntax error} [set e5] - } -} diff --git a/tools/pika_migrate/tests/unit/latency-monitor.tcl b/tools/pika_migrate/tests/unit/latency-monitor.tcl deleted file mode 100644 index b736cad98b..0000000000 --- a/tools/pika_migrate/tests/unit/latency-monitor.tcl +++ /dev/null @@ -1,50 +0,0 @@ -start_server {tags {"latency-monitor"}} { - # Set a threshold high enough to avoid spurious latency events. - r config set latency-monitor-threshold 200 - r latency reset - - test {Test latency events logging} { - r debug sleep 0.3 - after 1100 - r debug sleep 0.4 - after 1100 - r debug sleep 0.5 - assert {[r latency history command] >= 3} - } - - test {LATENCY HISTORY output is ok} { - set min 250 - set max 450 - foreach event [r latency history command] { - lassign $event time latency - assert {$latency >= $min && $latency <= $max} - incr min 100 - incr max 100 - set last_time $time ; # Used in the next test - } - } - - test {LATENCY LATEST output is ok} { - foreach event [r latency latest] { - lassign $event eventname time latency max - assert {$eventname eq "command"} - assert {$max >= 450 & $max <= 650} - assert {$time == $last_time} - break - } - } - - test {LATENCY HISTORY / RESET with wrong event name is fine} { - assert {[llength [r latency history blabla]] == 0} - assert {[r latency reset blabla] == 0} - } - - test {LATENCY DOCTOR produces some output} { - assert {[string length [r latency doctor]] > 0} - } - - test {LATENCY RESET is able to reset events} { - assert {[r latency reset] > 0} - assert {[r latency latest] eq {}} - } -} diff --git a/tools/pika_migrate/tests/unit/limits.tcl b/tools/pika_migrate/tests/unit/limits.tcl deleted file mode 100644 index b37ea9b0f5..0000000000 --- a/tools/pika_migrate/tests/unit/limits.tcl +++ /dev/null @@ -1,16 +0,0 @@ -start_server {tags {"limits"} overrides {maxclients 10}} { - test {Check if maxclients works refusing connections} { - set c 0 - catch { - while {$c < 50} { - incr c - set rd [redis_deferring_client] - $rd ping - $rd read - after 100 - } - } e - assert {$c > 8 && $c <= 10} - set e - } {*ERR max*reached*} -} diff --git a/tools/pika_migrate/tests/unit/maxmemory.tcl b/tools/pika_migrate/tests/unit/maxmemory.tcl deleted file mode 100644 index e6bf7860cb..0000000000 --- a/tools/pika_migrate/tests/unit/maxmemory.tcl +++ /dev/null @@ -1,144 +0,0 @@ -start_server {tags {"maxmemory"}} { - test "Without maxmemory small integers are shared" { - r config set maxmemory 0 - r set a 1 - assert {[r object refcount a] > 1} - } - - test "With maxmemory and non-LRU policy integers are still shared" { - r config set maxmemory 1073741824 - r config set maxmemory-policy allkeys-random - r set a 1 - assert {[r object refcount a] > 1} - } - - test "With maxmemory and LRU policy integers are not shared" { - r config set maxmemory 1073741824 - r config set maxmemory-policy allkeys-lru - r set a 1 - r config set maxmemory-policy volatile-lru - r set b 1 - assert {[r object refcount a] == 1} - assert {[r object refcount b] == 1} - r config set maxmemory 0 - } - - foreach policy { - allkeys-random allkeys-lru volatile-lru volatile-random volatile-ttl - } { - test "maxmemory - is the memory limit honoured? (policy $policy)" { - # make sure to start with a blank instance - r flushall - # Get the current memory limit and calculate a new limit. - # We just add 100k to the current memory size so that it is - # fast for us to reach that limit. - set used [s used_memory] - set limit [expr {$used+100*1024}] - r config set maxmemory $limit - r config set maxmemory-policy $policy - # Now add keys until the limit is almost reached. - set numkeys 0 - while 1 { - r setex [randomKey] 10000 x - incr numkeys - if {[s used_memory]+4096 > $limit} { - assert {$numkeys > 10} - break - } - } - # If we add the same number of keys already added again, we - # should still be under the limit. - for {set j 0} {$j < $numkeys} {incr j} { - r setex [randomKey] 10000 x - } - assert {[s used_memory] < ($limit+4096)} - } - } - - foreach policy { - allkeys-random allkeys-lru volatile-lru volatile-random volatile-ttl - } { - test "maxmemory - only allkeys-* should remove non-volatile keys ($policy)" { - # make sure to start with a blank instance - r flushall - # Get the current memory limit and calculate a new limit. - # We just add 100k to the current memory size so that it is - # fast for us to reach that limit. - set used [s used_memory] - set limit [expr {$used+100*1024}] - r config set maxmemory $limit - r config set maxmemory-policy $policy - # Now add keys until the limit is almost reached. - set numkeys 0 - while 1 { - r set [randomKey] x - incr numkeys - if {[s used_memory]+4096 > $limit} { - assert {$numkeys > 10} - break - } - } - # If we add the same number of keys already added again and - # the policy is allkeys-* we should still be under the limit. - # Otherwise we should see an error reported by Redis. - set err 0 - for {set j 0} {$j < $numkeys} {incr j} { - if {[catch {r set [randomKey] x} e]} { - if {[string match {*used memory*} $e]} { - set err 1 - } - } - } - if {[string match allkeys-* $policy]} { - assert {[s used_memory] < ($limit+4096)} - } else { - assert {$err == 1} - } - } - } - - foreach policy { - volatile-lru volatile-random volatile-ttl - } { - test "maxmemory - policy $policy should only remove volatile keys." { - # make sure to start with a blank instance - r flushall - # Get the current memory limit and calculate a new limit. - # We just add 100k to the current memory size so that it is - # fast for us to reach that limit. - set used [s used_memory] - set limit [expr {$used+100*1024}] - r config set maxmemory $limit - r config set maxmemory-policy $policy - # Now add keys until the limit is almost reached. - set numkeys 0 - while 1 { - # Odd keys are volatile - # Even keys are non volatile - if {$numkeys % 2} { - r setex "key:$numkeys" 10000 x - } else { - r set "key:$numkeys" x - } - if {[s used_memory]+4096 > $limit} { - assert {$numkeys > 10} - break - } - incr numkeys - } - # Now we add the same number of volatile keys already added. - # We expect Redis to evict only volatile keys in order to make - # space. - set err 0 - for {set j 0} {$j < $numkeys} {incr j} { - catch {r setex "foo:$j" 10000 x} - } - # We should still be under the limit. - assert {[s used_memory] < ($limit+4096)} - # However all our non volatile keys should be here. - for {set j 0} {$j < $numkeys} {incr j 2} { - assert {[r exists "key:$j"]} - } - } - } -} diff --git a/tools/pika_migrate/tests/unit/memefficiency.tcl b/tools/pika_migrate/tests/unit/memefficiency.tcl deleted file mode 100644 index 7ca9a705bb..0000000000 --- a/tools/pika_migrate/tests/unit/memefficiency.tcl +++ /dev/null @@ -1,37 +0,0 @@ -proc test_memory_efficiency {range} { - r flushall - set rd [redis_deferring_client] - set base_mem [s used_memory] - set written 0 - for {set j 0} {$j < 10000} {incr j} { - set key key:$j - set val [string repeat A [expr {int(rand()*$range)}]] - $rd set $key $val - incr written [string length $key] - incr written [string length $val] - incr written 2 ;# A separator is the minimum to store key-value data. - } - for {set j 0} {$j < 10000} {incr j} { - $rd read ; # Discard replies - } - - set current_mem [s used_memory] - set used [expr {$current_mem-$base_mem}] - set efficiency [expr {double($written)/$used}] - return $efficiency -} - -start_server {tags {"memefficiency"}} { - foreach {size_range expected_min_efficiency} { - 32 0.15 - 64 0.25 - 128 0.35 - 1024 0.75 - 16384 0.82 - } { - test "Memory efficiency with values in range $size_range" { - set efficiency [test_memory_efficiency $size_range] - assert {$efficiency >= $expected_min_efficiency} - } - } -} diff --git a/tools/pika_migrate/tests/unit/multi.tcl b/tools/pika_migrate/tests/unit/multi.tcl deleted file mode 100644 index 6655bf62c2..0000000000 --- a/tools/pika_migrate/tests/unit/multi.tcl +++ /dev/null @@ -1,309 +0,0 @@ -start_server {tags {"multi"}} { - test {MUTLI / EXEC basics} { - r del mylist - r rpush mylist a - r rpush mylist b - r rpush mylist c - r multi - set v1 [r lrange mylist 0 -1] - set v2 [r ping] - set v3 [r exec] - list $v1 $v2 $v3 - } {QUEUED QUEUED {{a b c} PONG}} - - test {DISCARD} { - r del mylist - r rpush mylist a - r rpush mylist b - r rpush mylist c - r multi - set v1 [r del mylist] - set v2 [r discard] - set v3 [r lrange mylist 0 -1] - list $v1 $v2 $v3 - } {QUEUED OK {a b c}} - - test {Nested MULTI are not allowed} { - set err {} - r multi - catch {[r multi]} err - r exec - set _ $err - } {*ERR MULTI*} - - test {MULTI where commands alter argc/argv} { - r sadd myset a - r multi - r spop myset - list [r exec] [r exists myset] - } {a 0} - - test {WATCH inside MULTI is not allowed} { - set err {} - r multi - catch {[r watch x]} err - r exec - set _ $err - } {*ERR WATCH*} - - test {EXEC fails if there are errors while queueing commands #1} { - r del foo1 foo2 - r multi - r set foo1 bar1 - catch {r non-existing-command} - r set foo2 bar2 - catch {r exec} e - assert_match {EXECABORT*} $e - list [r exists foo1] [r exists foo2] - } {0 0} - - test {EXEC fails if there are errors while queueing commands #2} { - set rd [redis_deferring_client] - r del foo1 foo2 - r multi - r set foo1 bar1 - $rd config set maxmemory 1 - assert {[$rd read] eq {OK}} - catch {r lpush mylist myvalue} - $rd config set maxmemory 0 - assert {[$rd read] eq {OK}} - r set foo2 bar2 - catch {r exec} e - assert_match {EXECABORT*} $e - $rd close - list [r exists foo1] [r exists foo2] - } {0 0} - - test {If EXEC aborts, the client MULTI state is cleared} { - r del foo1 foo2 - r multi - r set foo1 bar1 - catch {r non-existing-command} - r set foo2 bar2 - catch {r exec} e - assert_match {EXECABORT*} $e - r ping - } {PONG} - - test {EXEC works on WATCHed key not modified} { - r watch x y z - r watch k - r multi - r ping - r exec - } {PONG} - - test {EXEC fail on WATCHed key modified (1 key of 1 watched)} { - r set x 30 - r watch x - r set x 40 - r multi - r ping - r exec - } {} - - test {EXEC fail on WATCHed key modified (1 key of 5 watched)} { - r set x 30 - r watch a b x k z - r set x 40 - r multi - r ping - r exec - } {} - - test {EXEC fail on WATCHed key modified by SORT with STORE even if the result is empty} { - r flushdb - r lpush foo bar - r watch foo - r sort emptylist store foo - r multi - r ping - r exec - } {} - - test {After successful EXEC key is no longer watched} { - r set x 30 - r watch x - r multi - r ping - r exec - r set x 40 - r multi - r ping - r exec - } {PONG} - - test {After failed EXEC key is no longer watched} { - r set x 30 - r watch x - r set x 40 - r multi - r ping - r exec - r set x 40 - r multi - r ping - r exec - } {PONG} - - test {It is possible to UNWATCH} { - r set x 30 - r watch x - r set x 40 - r unwatch - r multi - r ping - r exec - } {PONG} - - test {UNWATCH when there is nothing watched works as expected} { - r unwatch - } {OK} - - test {FLUSHALL is able to touch the watched keys} { - r set x 30 - r watch x - r flushall - r multi - r ping - r exec - } {} - - test {FLUSHALL does not touch non affected keys} { - r del x - r watch x - r flushall - r multi - r ping - r exec - } {PONG} - - test {FLUSHDB is able to touch the watched keys} { - r set x 30 - r watch x - r flushdb - r multi - r ping - r exec - } {} - - test {FLUSHDB does not touch non affected keys} { - r del x - r watch x - r flushdb - r multi - r ping - r exec - } {PONG} - - test {WATCH is able to remember the DB a key belongs to} { - r select 5 - r set x 30 - r watch x - r select 1 - r set x 10 - r select 5 - r multi - r ping - set res [r exec] - # Restore original DB - r select 9 - set res - } {PONG} - - test {WATCH will consider touched keys target of EXPIRE} { - r del x - r set x foo - r watch x - r expire x 10 - r multi - r ping - r exec - } {} - - test {WATCH will not consider touched expired keys} { - r del x - r set x foo - r expire x 1 - r watch x - after 1100 - r multi - r ping - r exec - } {PONG} - - test {DISCARD should clear the WATCH dirty flag on the client} { - r watch x - r set x 10 - r multi - r discard - r multi - r incr x - r exec - } {11} - - test {DISCARD should UNWATCH all the keys} { - r watch x - r set x 10 - r multi - r discard - r set x 10 - r multi - r incr x - r exec - } {11} - - test {MULTI / EXEC is propagated correctly (single write command)} { - set repl [attach_to_replication_stream] - r multi - r set foo bar - r exec - assert_replication_stream $repl { - {select *} - {multi} - {set foo bar} - {exec} - } - close_replication_stream $repl - } - - test {MULTI / EXEC is propagated correctly (empty transaction)} { - set repl [attach_to_replication_stream] - r multi - r exec - r set foo bar - assert_replication_stream $repl { - {select *} - {set foo bar} - } - close_replication_stream $repl - } - - test {MULTI / EXEC is propagated correctly (read-only commands)} { - r set foo value1 - set repl [attach_to_replication_stream] - r multi - r get foo - r exec - r set foo value2 - assert_replication_stream $repl { - {select *} - {set foo value2} - } - close_replication_stream $repl - } - - test {MULTI / EXEC is propagated correctly (write command, no effect)} { - r del bar foo bar - set repl [attach_to_replication_stream] - r multi - r del foo - r exec - assert_replication_stream $repl { - {select *} - {multi} - {exec} - } - close_replication_stream $repl - } -} diff --git a/tools/pika_migrate/tests/unit/obuf-limits.tcl b/tools/pika_migrate/tests/unit/obuf-limits.tcl deleted file mode 100644 index 5d625cf453..0000000000 --- a/tools/pika_migrate/tests/unit/obuf-limits.tcl +++ /dev/null @@ -1,73 +0,0 @@ -start_server {tags {"obuf-limits"}} { - test {Client output buffer hard limit is enforced} { - r config set client-output-buffer-limit {pubsub 100000 0 0} - set rd1 [redis_deferring_client] - - $rd1 subscribe foo - set reply [$rd1 read] - assert {$reply eq "subscribe foo 1"} - - set omem 0 - while 1 { - r publish foo bar - set clients [split [r client list] "\r\n"] - set c [split [lindex $clients 1] " "] - if {![regexp {omem=([0-9]+)} $c - omem]} break - if {$omem > 200000} break - } - assert {$omem >= 90000 && $omem < 200000} - $rd1 close - } - - test {Client output buffer soft limit is not enforced if time is not overreached} { - r config set client-output-buffer-limit {pubsub 0 100000 10} - set rd1 [redis_deferring_client] - - $rd1 subscribe foo - set reply [$rd1 read] - assert {$reply eq "subscribe foo 1"} - - set omem 0 - set start_time 0 - set time_elapsed 0 - while 1 { - r publish foo bar - set clients [split [r client list] "\r\n"] - set c [split [lindex $clients 1] " "] - if {![regexp {omem=([0-9]+)} $c - omem]} break - if {$omem > 100000} { - if {$start_time == 0} {set start_time [clock seconds]} - set time_elapsed [expr {[clock seconds]-$start_time}] - if {$time_elapsed >= 5} break - } - } - assert {$omem >= 100000 && $time_elapsed >= 5 && $time_elapsed <= 10} - $rd1 close - } - - test {Client output buffer soft limit is enforced if time is overreached} { - r config set client-output-buffer-limit {pubsub 0 100000 3} - set rd1 [redis_deferring_client] - - $rd1 subscribe foo - set reply [$rd1 read] - assert {$reply eq "subscribe foo 1"} - - set omem 0 - set start_time 0 - set time_elapsed 0 - while 1 { - r publish foo bar - set clients [split [r client list] "\r\n"] - set c [split [lindex $clients 1] " "] - if {![regexp {omem=([0-9]+)} $c - omem]} break - if {$omem > 100000} { - if {$start_time == 0} {set start_time [clock seconds]} - set time_elapsed [expr {[clock seconds]-$start_time}] - if {$time_elapsed >= 10} break - } - } - assert {$omem >= 100000 && $time_elapsed < 6} - $rd1 close - } -} diff --git a/tools/pika_migrate/tests/unit/other.tcl b/tools/pika_migrate/tests/unit/other.tcl deleted file mode 100644 index a53f3f5c81..0000000000 --- a/tools/pika_migrate/tests/unit/other.tcl +++ /dev/null @@ -1,245 +0,0 @@ -start_server {tags {"other"}} { - if {$::force_failure} { - # This is used just for test suite development purposes. - test {Failing test} { - format err - } {ok} - } - - test {SAVE - make sure there are all the types as values} { - # Wait for a background saving in progress to terminate - waitForBgsave r - r lpush mysavelist hello - r lpush mysavelist world - r set myemptykey {} - r set mynormalkey {blablablba} - r zadd mytestzset 10 a - r zadd mytestzset 20 b - r zadd mytestzset 30 c - r save - } {OK} - - tags {slow} { - if {$::accurate} {set iterations 10000} else {set iterations 1000} - foreach fuzztype {binary alpha compr} { - test "FUZZ stresser with data model $fuzztype" { - set err 0 - for {set i 0} {$i < $iterations} {incr i} { - set fuzz [randstring 0 512 $fuzztype] - r set foo $fuzz - set got [r get foo] - if {$got ne $fuzz} { - set err [list $fuzz $got] - break - } - } - set _ $err - } {0} - } - } - - test {BGSAVE} { - waitForBgsave r - r flushdb - r save - r set x 10 - r bgsave - waitForBgsave r - r debug reload - r get x - } {10} - - test {SELECT an out of range DB} { - catch {r select 1000000} err - set _ $err - } {*invalid*} - - tags {consistency} { - if {![catch {package require sha1}]} { - if {$::accurate} {set numops 10000} else {set numops 1000} - test {Check consistency of different data types after a reload} { - r flushdb - createComplexDataset r $numops - set dump [csvdump r] - set sha1 [r debug digest] - r debug reload - set sha1_after [r debug digest] - if {$sha1 eq $sha1_after} { - set _ 1 - } else { - set newdump [csvdump r] - puts "Consistency test failed!" - puts "You can inspect the two dumps in /tmp/repldump*.txt" - - set fd [open /tmp/repldump1.txt w] - puts $fd $dump - close $fd - set fd [open /tmp/repldump2.txt w] - puts $fd $newdump - close $fd - - set _ 0 - } - } {1} - - test {Same dataset digest if saving/reloading as AOF?} { - r bgrewriteaof - waitForBgrewriteaof r - r debug loadaof - set sha1_after [r debug digest] - if {$sha1 eq $sha1_after} { - set _ 1 - } else { - set newdump [csvdump r] - puts "Consistency test failed!" - puts "You can inspect the two dumps in /tmp/aofdump*.txt" - - set fd [open /tmp/aofdump1.txt w] - puts $fd $dump - close $fd - set fd [open /tmp/aofdump2.txt w] - puts $fd $newdump - close $fd - - set _ 0 - } - } {1} - } - } - - test {EXPIRES after a reload (snapshot + append only file rewrite)} { - r flushdb - r set x 10 - r expire x 1000 - r save - r debug reload - set ttl [r ttl x] - set e1 [expr {$ttl > 900 && $ttl <= 1000}] - r bgrewriteaof - waitForBgrewriteaof r - r debug loadaof - set ttl [r ttl x] - set e2 [expr {$ttl > 900 && $ttl <= 1000}] - list $e1 $e2 - } {1 1} - - test {EXPIRES after AOF reload (without rewrite)} { - r flushdb - r config set appendonly yes - r set x somevalue - r expire x 1000 - r setex y 2000 somevalue - r set z somevalue - r expireat z [expr {[clock seconds]+3000}] - - # Milliseconds variants - r set px somevalue - r pexpire px 1000000 - r psetex py 2000000 somevalue - r set pz somevalue - r pexpireat pz [expr {([clock seconds]+3000)*1000}] - - # Reload and check - waitForBgrewriteaof r - # We need to wait two seconds to avoid false positives here, otherwise - # the DEBUG LOADAOF command may read a partial file. - # Another solution would be to set the fsync policy to no, since this - # prevents write() to be delayed by the completion of fsync(). - after 2000 - r debug loadaof - set ttl [r ttl x] - assert {$ttl > 900 && $ttl <= 1000} - set ttl [r ttl y] - assert {$ttl > 1900 && $ttl <= 2000} - set ttl [r ttl z] - assert {$ttl > 2900 && $ttl <= 3000} - set ttl [r ttl px] - assert {$ttl > 900 && $ttl <= 1000} - set ttl [r ttl py] - assert {$ttl > 1900 && $ttl <= 2000} - set ttl [r ttl pz] - assert {$ttl > 2900 && $ttl <= 3000} - r config set appendonly no - } - - tags {protocol} { - test {PIPELINING stresser (also a regression for the old epoll bug)} { - set fd2 [socket $::host $::port] - fconfigure $fd2 -encoding binary -translation binary - puts -nonewline $fd2 "SELECT 9\r\n" - flush $fd2 - gets $fd2 - - for {set i 0} {$i < 100000} {incr i} { - set q {} - set val "0000${i}0000" - append q "SET key:$i $val\r\n" - puts -nonewline $fd2 $q - set q {} - append q "GET key:$i\r\n" - puts -nonewline $fd2 $q - } - flush $fd2 - - for {set i 0} {$i < 100000} {incr i} { - gets $fd2 line - gets $fd2 count - set count [string range $count 1 end] - set val [read $fd2 $count] - read $fd2 2 - } - close $fd2 - set _ 1 - } {1} - } - - test {APPEND basics} { - list [r append foo bar] [r get foo] \ - [r append foo 100] [r get foo] - } {3 bar 6 bar100} - - test {APPEND basics, integer encoded values} { - set res {} - r del foo - r append foo 1 - r append foo 2 - lappend res [r get foo] - r set foo 1 - r append foo 2 - lappend res [r get foo] - } {12 12} - - test {APPEND fuzzing} { - set err {} - foreach type {binary alpha compr} { - set buf {} - r del x - for {set i 0} {$i < 1000} {incr i} { - set bin [randstring 0 10 $type] - append buf $bin - r append x $bin - } - if {$buf != [r get x]} { - set err "Expected '$buf' found '[r get x]'" - break - } - } - set _ $err - } {} - - # Leave the user with a clean DB before to exit - test {FLUSHDB} { - set aux {} - r select 9 - r flushdb - lappend aux [r dbsize] - r select 10 - r flushdb - lappend aux [r dbsize] - } {0 0} - - test {Perform a final SAVE to leave a clean DB on disk} { - waitForBgsave r - r save - } {OK} -} diff --git a/tools/pika_migrate/tests/unit/printver.tcl b/tools/pika_migrate/tests/unit/printver.tcl deleted file mode 100644 index c80f45144d..0000000000 --- a/tools/pika_migrate/tests/unit/printver.tcl +++ /dev/null @@ -1,6 +0,0 @@ -start_server {} { - set i [r info] - regexp {redis_version:(.*?)\r\n} $i - version - regexp {redis_git_sha1:(.*?)\r\n} $i - sha1 - puts "Testing Redis version $version ($sha1)" -} diff --git a/tools/pika_migrate/tests/unit/protocol.tcl b/tools/pika_migrate/tests/unit/protocol.tcl deleted file mode 100644 index ac99c3abb4..0000000000 --- a/tools/pika_migrate/tests/unit/protocol.tcl +++ /dev/null @@ -1,117 +0,0 @@ -start_server {tags {"protocol"}} { - test "Handle an empty query" { - reconnect - r write "\r\n" - r flush - assert_equal "PONG" [r ping] - } - - test "Negative multibulk length" { - reconnect - r write "*-10\r\n" - r flush - assert_equal PONG [r ping] - } - - test "Out of range multibulk length" { - reconnect - r write "*20000000\r\n" - r flush - assert_error "*invalid multibulk length*" {r read} - } - - test "Wrong multibulk payload header" { - reconnect - r write "*3\r\n\$3\r\nSET\r\n\$1\r\nx\r\nfooz\r\n" - r flush - assert_error "*expected '$', got 'f'*" {r read} - } - - test "Negative multibulk payload length" { - reconnect - r write "*3\r\n\$3\r\nSET\r\n\$1\r\nx\r\n\$-10\r\n" - r flush - assert_error "*invalid bulk length*" {r read} - } - - test "Out of range multibulk payload length" { - reconnect - r write "*3\r\n\$3\r\nSET\r\n\$1\r\nx\r\n\$2000000000\r\n" - r flush - assert_error "*invalid bulk length*" {r read} - } - - test "Non-number multibulk payload length" { - reconnect - r write "*3\r\n\$3\r\nSET\r\n\$1\r\nx\r\n\$blabla\r\n" - r flush - assert_error "*invalid bulk length*" {r read} - } - - test "Multi bulk request not followed by bulk arguments" { - reconnect - r write "*1\r\nfoo\r\n" - r flush - assert_error "*expected '$', got 'f'*" {r read} - } - - test "Generic wrong number of args" { - reconnect - assert_error "*wrong*arguments*ping*" {r ping x y z} - } - - test "Unbalanced number of quotes" { - reconnect - r write "set \"\"\"test-key\"\"\" test-value\r\n" - r write "ping\r\n" - r flush - assert_error "*unbalanced*" {r read} - } - - set c 0 - foreach seq [list "\x00" "*\x00" "$\x00"] { - incr c - test "Protocol desync regression test #$c" { - set s [socket [srv 0 host] [srv 0 port]] - puts -nonewline $s $seq - set payload [string repeat A 1024]"\n" - set test_start [clock seconds] - set test_time_limit 30 - while 1 { - if {[catch { - puts -nonewline $s payload - flush $s - incr payload_size [string length $payload] - }]} { - set retval [gets $s] - close $s - break - } else { - set elapsed [expr {[clock seconds]-$test_start}] - if {$elapsed > $test_time_limit} { - close $s - error "assertion:Redis did not closed connection after protocol desync" - } - } - } - set retval - } {*Protocol error*} - } - unset c -} - -start_server {tags {"regression"}} { - test "Regression for a crash with blocking ops and pipelining" { - set rd [redis_deferring_client] - set fd [r channel] - set proto "*3\r\n\$5\r\nBLPOP\r\n\$6\r\nnolist\r\n\$1\r\n0\r\n" - puts -nonewline $fd $proto$proto - flush $fd - set res {} - - $rd rpush nolist a - $rd read - $rd rpush nolist a - $rd read - } -} diff --git a/tools/pika_migrate/tests/unit/pubsub.tcl b/tools/pika_migrate/tests/unit/pubsub.tcl deleted file mode 100644 index 16c8c6a5f7..0000000000 --- a/tools/pika_migrate/tests/unit/pubsub.tcl +++ /dev/null @@ -1,399 +0,0 @@ -start_server {tags {"pubsub"}} { - proc __consume_subscribe_messages {client type channels} { - set numsub -1 - set counts {} - - for {set i [llength $channels]} {$i > 0} {incr i -1} { - set msg [$client read] - assert_equal $type [lindex $msg 0] - - # when receiving subscribe messages the channels names - # are ordered. when receiving unsubscribe messages - # they are unordered - set idx [lsearch -exact $channels [lindex $msg 1]] - if {[string match "*unsubscribe" $type]} { - assert {$idx >= 0} - } else { - assert {$idx == 0} - } - set channels [lreplace $channels $idx $idx] - - # aggregate the subscription count to return to the caller - lappend counts [lindex $msg 2] - } - - # we should have received messages for channels - assert {[llength $channels] == 0} - return $counts - } - - proc subscribe {client channels} { - $client subscribe {*}$channels - __consume_subscribe_messages $client subscribe $channels - } - - proc unsubscribe {client {channels {}}} { - $client unsubscribe {*}$channels - __consume_subscribe_messages $client unsubscribe $channels - } - - proc psubscribe {client channels} { - $client psubscribe {*}$channels - __consume_subscribe_messages $client psubscribe $channels - } - - proc punsubscribe {client {channels {}}} { - $client punsubscribe {*}$channels - __consume_subscribe_messages $client punsubscribe $channels - } - - test "Pub/Sub PING" { - set rd1 [redis_deferring_client] - subscribe $rd1 somechannel - # While subscribed to non-zero channels PING works in Pub/Sub mode. - $rd1 ping - set reply1 [$rd1 read] - unsubscribe $rd1 somechannel - # Now we are unsubscribed, PING should just return PONG. - $rd1 ping - set reply2 [$rd1 read] - $rd1 close - list $reply1 $reply2 - } {PONG PONG} - - test "PUBLISH/SUBSCRIBE basics" { - set rd1 [redis_deferring_client] - - # subscribe to two channels - assert_equal {1 2} [subscribe $rd1 {chan1 chan2}] - assert_equal 1 [r publish chan1 hello] - assert_equal 1 [r publish chan2 world] - assert_equal {message chan1 hello} [$rd1 read] - assert_equal {message chan2 world} [$rd1 read] - - # unsubscribe from one of the channels - unsubscribe $rd1 {chan1} - assert_equal 0 [r publish chan1 hello] - assert_equal 1 [r publish chan2 world] - assert_equal {message chan2 world} [$rd1 read] - - # unsubscribe from the remaining channel - unsubscribe $rd1 {chan2} - assert_equal 0 [r publish chan1 hello] - assert_equal 0 [r publish chan2 world] - - # clean up clients - $rd1 close - } - - test "PUBLISH/SUBSCRIBE with two clients" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] - - assert_equal {1} [subscribe $rd1 {chan1}] - assert_equal {1} [subscribe $rd2 {chan1}] - assert_equal 2 [r publish chan1 hello] - assert_equal {message chan1 hello} [$rd1 read] - assert_equal {message chan1 hello} [$rd2 read] - - # clean up clients - $rd1 close - $rd2 close - } - - test "PUBLISH/SUBSCRIBE after UNSUBSCRIBE without arguments" { - set rd1 [redis_deferring_client] - assert_equal {1 2 3} [subscribe $rd1 {chan1 chan2 chan3}] - unsubscribe $rd1 - assert_equal 0 [r publish chan1 hello] - assert_equal 0 [r publish chan2 hello] - assert_equal 0 [r publish chan3 hello] - - # clean up clients - $rd1 close - } - - test "SUBSCRIBE to one channel more than once" { - set rd1 [redis_deferring_client] - assert_equal {1 1 1} [subscribe $rd1 {chan1 chan1 chan1}] - assert_equal 1 [r publish chan1 hello] - assert_equal {message chan1 hello} [$rd1 read] - - # clean up clients - $rd1 close - } - - test "UNSUBSCRIBE from non-subscribed channels" { - set rd1 [redis_deferring_client] - assert_equal {0 0 0} [unsubscribe $rd1 {foo bar quux}] - - # clean up clients - $rd1 close - } - - test "PUBLISH/PSUBSCRIBE basics" { - set rd1 [redis_deferring_client] - - # subscribe to two patterns - assert_equal {1 2} [psubscribe $rd1 {foo.* bar.*}] - assert_equal 1 [r publish foo.1 hello] - assert_equal 1 [r publish bar.1 hello] - assert_equal 0 [r publish foo1 hello] - assert_equal 0 [r publish barfoo.1 hello] - assert_equal 0 [r publish qux.1 hello] - assert_equal {pmessage foo.* foo.1 hello} [$rd1 read] - assert_equal {pmessage bar.* bar.1 hello} [$rd1 read] - - # unsubscribe from one of the patterns - assert_equal {1} [punsubscribe $rd1 {foo.*}] - assert_equal 0 [r publish foo.1 hello] - assert_equal 1 [r publish bar.1 hello] - assert_equal {pmessage bar.* bar.1 hello} [$rd1 read] - - # unsubscribe from the remaining pattern - assert_equal {0} [punsubscribe $rd1 {bar.*}] - assert_equal 0 [r publish foo.1 hello] - assert_equal 0 [r publish bar.1 hello] - - # clean up clients - $rd1 close - } - - test "PUBLISH/PSUBSCRIBE with two clients" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] - - assert_equal {1} [psubscribe $rd1 {chan.*}] - assert_equal {1} [psubscribe $rd2 {chan.*}] - assert_equal 2 [r publish chan.foo hello] - assert_equal {pmessage chan.* chan.foo hello} [$rd1 read] - assert_equal {pmessage chan.* chan.foo hello} [$rd2 read] - - # clean up clients - $rd1 close - $rd2 close - } - - test "PUBLISH/PSUBSCRIBE after PUNSUBSCRIBE without arguments" { - set rd1 [redis_deferring_client] - assert_equal {1 2 3} [psubscribe $rd1 {chan1.* chan2.* chan3.*}] - punsubscribe $rd1 - assert_equal 0 [r publish chan1.hi hello] - assert_equal 0 [r publish chan2.hi hello] - assert_equal 0 [r publish chan3.hi hello] - - # clean up clients - $rd1 close - } - - test "PUNSUBSCRIBE from non-subscribed channels" { - set rd1 [redis_deferring_client] - assert_equal {0 0 0} [punsubscribe $rd1 {foo.* bar.* quux.*}] - - # clean up clients - $rd1 close - } - - test "NUMSUB returns numbers, not strings (#1561)" { - r pubsub numsub abc def - } {abc 0 def 0} - - test "PubSub return value" { - set rd1 [redis_deferring_client] - assert_equal {1} [subscribe $rd1 {foo.bar}] - assert_equal {2} [psubscribe $rd1 {foo.*}] - assert_equal {foo.bar} [r pubsub channels] - assert_equal {1} [r pubsub numpat] - assert_equal {foo.bar 1} [r pubsub numsub foo.bar] - - $rd1 close - } - - test "Mix SUBSCRIBE and PSUBSCRIBE" { - set rd1 [redis_deferring_client] - assert_equal {1} [subscribe $rd1 {foo.bar}] - assert_equal {2} [psubscribe $rd1 {foo.*}] - - assert_equal 2 [r publish foo.bar hello] - assert_equal {message foo.bar hello} [$rd1 read] - assert_equal {pmessage foo.* foo.bar hello} [$rd1 read] - - # clean up clients - $rd1 close - } - - test "PUNSUBSCRIBE and UNSUBSCRIBE should always reply" { - # Make sure we are not subscribed to any channel at all. - r punsubscribe - r unsubscribe - # Now check if the commands still reply correctly. - set reply1 [r punsubscribe] - set reply2 [r unsubscribe] - concat $reply1 $reply2 - } {punsubscribe {} 0 unsubscribe {} 0} - - ### Keyspace events notification tests - -# test "Keyspace notifications: we receive keyspace notifications" { -# r config set notify-keyspace-events KA -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r set foo bar -# assert_equal {pmessage * __keyspace@9__:foo set} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: we receive keyevent notifications" { -# r config set notify-keyspace-events EA -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r set foo bar -# assert_equal {pmessage * __keyevent@9__:set foo} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: we can receive both kind of events" { -# r config set notify-keyspace-events KEA -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r set foo bar -# assert_equal {pmessage * __keyspace@9__:foo set} [$rd1 read] -# assert_equal {pmessage * __keyevent@9__:set foo} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: we are able to mask events" { -# r config set notify-keyspace-events KEl -# r del mylist -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r set foo bar -# r lpush mylist a -# # No notification for set, because only list commands are enabled. -# assert_equal {pmessage * __keyspace@9__:mylist lpush} [$rd1 read] -# assert_equal {pmessage * __keyevent@9__:lpush mylist} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: general events test" { -# r config set notify-keyspace-events KEg -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r set foo bar -# r expire foo 1 -# r del foo -# assert_equal {pmessage * __keyspace@9__:foo expire} [$rd1 read] -# assert_equal {pmessage * __keyevent@9__:expire foo} [$rd1 read] -# assert_equal {pmessage * __keyspace@9__:foo del} [$rd1 read] -# assert_equal {pmessage * __keyevent@9__:del foo} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: list events test" { -# r config set notify-keyspace-events KEl -# r del mylist -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r lpush mylist a -# r rpush mylist a -# r rpop mylist -# assert_equal {pmessage * __keyspace@9__:mylist lpush} [$rd1 read] -# assert_equal {pmessage * __keyevent@9__:lpush mylist} [$rd1 read] -# assert_equal {pmessage * __keyspace@9__:mylist rpush} [$rd1 read] -# assert_equal {pmessage * __keyevent@9__:rpush mylist} [$rd1 read] -# assert_equal {pmessage * __keyspace@9__:mylist rpop} [$rd1 read] -# assert_equal {pmessage * __keyevent@9__:rpop mylist} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: set events test" { -# r config set notify-keyspace-events Ks -# r del myset -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r sadd myset a b c d -# r srem myset x -# r sadd myset x y z -# r srem myset x -# assert_equal {pmessage * __keyspace@9__:myset sadd} [$rd1 read] -# assert_equal {pmessage * __keyspace@9__:myset sadd} [$rd1 read] -# assert_equal {pmessage * __keyspace@9__:myset srem} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: zset events test" { -# r config set notify-keyspace-events Kz -# r del myzset -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r zadd myzset 1 a 2 b -# r zrem myzset x -# r zadd myzset 3 x 4 y 5 z -# r zrem myzset x -# assert_equal {pmessage * __keyspace@9__:myzset zadd} [$rd1 read] -# assert_equal {pmessage * __keyspace@9__:myzset zadd} [$rd1 read] -# assert_equal {pmessage * __keyspace@9__:myzset zrem} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: hash events test" { -# r config set notify-keyspace-events Kh -# r del myhash -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r hmset myhash yes 1 no 0 -# r hincrby myhash yes 10 -# assert_equal {pmessage * __keyspace@9__:myhash hset} [$rd1 read] -# assert_equal {pmessage * __keyspace@9__:myhash hincrby} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: expired events (triggered expire)" { -# r config set notify-keyspace-events Ex -# r del foo -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r psetex foo 100 1 -# wait_for_condition 50 100 { -# [r exists foo] == 0 -# } else { -# fail "Key does not expire?!" -# } -# assert_equal {pmessage * __keyevent@9__:expired foo} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: expired events (background expire)" { -# r config set notify-keyspace-events Ex -# r del foo -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r psetex foo 100 1 -# assert_equal {pmessage * __keyevent@9__:expired foo} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: evicted events" { -# r config set notify-keyspace-events Ee -# r config set maxmemory-policy allkeys-lru -# r flushdb -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r set foo bar -# r config set maxmemory 1 -# assert_equal {pmessage * __keyevent@9__:evicted foo} [$rd1 read] -# r config set maxmemory 0 -# $rd1 close -# } -# -# test "Keyspace notifications: test CONFIG GET/SET of event flags" { -# r config set notify-keyspace-events gKE -# assert_equal {gKE} [lindex [r config get notify-keyspace-events] 1] -# r config set notify-keyspace-events {$lshzxeKE} -# assert_equal {$lshzxeKE} [lindex [r config get notify-keyspace-events] 1] -# r config set notify-keyspace-events KA -# assert_equal {AK} [lindex [r config get notify-keyspace-events] 1] -# r config set notify-keyspace-events EA -# assert_equal {AE} [lindex [r config get notify-keyspace-events] 1] -# } -#} diff --git a/tools/pika_migrate/tests/unit/quit.tcl b/tools/pika_migrate/tests/unit/quit.tcl deleted file mode 100644 index 4cf440abf1..0000000000 --- a/tools/pika_migrate/tests/unit/quit.tcl +++ /dev/null @@ -1,40 +0,0 @@ -start_server {tags {"quit"}} { - proc format_command {args} { - set cmd "*[llength $args]\r\n" - foreach a $args { - append cmd "$[string length $a]\r\n$a\r\n" - } - set _ $cmd - } - - test "QUIT returns OK" { - reconnect - assert_equal OK [r quit] - assert_error * {r ping} - } - - test "Pipelined commands after QUIT must not be executed" { - reconnect - r write [format_command quit] - r write [format_command set foo bar] - r flush - assert_equal OK [r read] - assert_error * {r read} - - reconnect - assert_equal {} [r get foo] - } - - test "Pipelined commands after QUIT that exceed read buffer size" { - reconnect - r write [format_command quit] - r write [format_command set foo [string repeat "x" 1024]] - r flush - assert_equal OK [r read] - assert_error * {r read} - - reconnect - assert_equal {} [r get foo] - - } -} diff --git a/tools/pika_migrate/tests/unit/scan.tcl b/tools/pika_migrate/tests/unit/scan.tcl deleted file mode 100644 index 1d84f128da..0000000000 --- a/tools/pika_migrate/tests/unit/scan.tcl +++ /dev/null @@ -1,239 +0,0 @@ -start_server {tags {"scan"}} { - test "SCAN basic" { - r flushdb - r debug populate 1000 - - set cur 0 - set keys {} - while 1 { - set res [r scan $cur] - set cur [lindex $res 0] - set k [lindex $res 1] - lappend keys {*}$k - if {$cur == 0} break - } - - set keys [lsort -unique $keys] - assert_equal 1000 [llength $keys] - } - - test "SCAN COUNT" { - r flushdb - r debug populate 1000 - - set cur 0 - set keys {} - while 1 { - set res [r scan $cur count 5] - set cur [lindex $res 0] - set k [lindex $res 1] - lappend keys {*}$k - if {$cur == 0} break - } - - set keys [lsort -unique $keys] - assert_equal 1000 [llength $keys] - } - - test "SCAN MATCH" { - r flushdb - r debug populate 1000 - - set cur 0 - set keys {} - while 1 { - set res [r scan $cur match "key:1??"] - set cur [lindex $res 0] - set k [lindex $res 1] - lappend keys {*}$k - if {$cur == 0} break - } - - set keys [lsort -unique $keys] - assert_equal 100 [llength $keys] - } - - foreach enc {intset hashtable} { - test "SSCAN with encoding $enc" { - # Create the Set - r del set - if {$enc eq {intset}} { - set prefix "" - } else { - set prefix "ele:" - } - set elements {} - for {set j 0} {$j < 100} {incr j} { - lappend elements ${prefix}${j} - } - r sadd set {*}$elements - - # Verify that the encoding matches. - assert {[r object encoding set] eq $enc} - - # Test SSCAN - set cur 0 - set keys {} - while 1 { - set res [r sscan set $cur] - set cur [lindex $res 0] - set k [lindex $res 1] - lappend keys {*}$k - if {$cur == 0} break - } - - set keys [lsort -unique $keys] - assert_equal 100 [llength $keys] - } - } - - foreach enc {ziplist hashtable} { - test "HSCAN with encoding $enc" { - # Create the Hash - r del hash - if {$enc eq {ziplist}} { - set count 30 - } else { - set count 1000 - } - set elements {} - for {set j 0} {$j < $count} {incr j} { - lappend elements key:$j $j - } - r hmset hash {*}$elements - - # Verify that the encoding matches. - assert {[r object encoding hash] eq $enc} - - # Test HSCAN - set cur 0 - set keys {} - while 1 { - set res [r hscan hash $cur] - set cur [lindex $res 0] - set k [lindex $res 1] - lappend keys {*}$k - if {$cur == 0} break - } - - set keys2 {} - foreach {k v} $keys { - assert {$k eq "key:$v"} - lappend keys2 $k - } - - set keys2 [lsort -unique $keys2] - assert_equal $count [llength $keys2] - } - } - - foreach enc {ziplist skiplist} { - test "ZSCAN with encoding $enc" { - # Create the Sorted Set - r del zset - if {$enc eq {ziplist}} { - set count 30 - } else { - set count 1000 - } - set elements {} - for {set j 0} {$j < $count} {incr j} { - lappend elements $j key:$j - } - r zadd zset {*}$elements - - # Verify that the encoding matches. - assert {[r object encoding zset] eq $enc} - - # Test ZSCAN - set cur 0 - set keys {} - while 1 { - set res [r zscan zset $cur] - set cur [lindex $res 0] - set k [lindex $res 1] - lappend keys {*}$k - if {$cur == 0} break - } - - set keys2 {} - foreach {k v} $keys { - assert {$k eq "key:$v"} - lappend keys2 $k - } - - set keys2 [lsort -unique $keys2] - assert_equal $count [llength $keys2] - } - } - - test "SCAN guarantees check under write load" { - r flushdb - r debug populate 100 - - # We start scanning here, so keys from 0 to 99 should all be - # reported at the end of the iteration. - set keys {} - while 1 { - set res [r scan $cur] - set cur [lindex $res 0] - set k [lindex $res 1] - lappend keys {*}$k - if {$cur == 0} break - # Write 10 random keys at every SCAN iteration. - for {set j 0} {$j < 10} {incr j} { - r set addedkey:[randomInt 1000] foo - } - } - - set keys2 {} - foreach k $keys { - if {[string length $k] > 6} continue - lappend keys2 $k - } - - set keys2 [lsort -unique $keys2] - assert_equal 100 [llength $keys2] - } - - test "SSCAN with integer encoded object (issue #1345)" { - set objects {1 a} - r del set - r sadd set {*}$objects - set res [r sscan set 0 MATCH *a* COUNT 100] - assert_equal [lsort -unique [lindex $res 1]] {a} - set res [r sscan set 0 MATCH *1* COUNT 100] - assert_equal [lsort -unique [lindex $res 1]] {1} - } - - test "SSCAN with PATTERN" { - r del mykey - r sadd mykey foo fab fiz foobar 1 2 3 4 - set res [r sscan mykey 0 MATCH foo* COUNT 10000] - lsort -unique [lindex $res 1] - } {foo foobar} - - test "HSCAN with PATTERN" { - r del mykey - r hmset mykey foo 1 fab 2 fiz 3 foobar 10 1 a 2 b 3 c 4 d - set res [r hscan mykey 0 MATCH foo* COUNT 10000] - lsort -unique [lindex $res 1] - } {1 10 foo foobar} - - test "ZSCAN with PATTERN" { - r del mykey - r zadd mykey 1 foo 2 fab 3 fiz 10 foobar - set res [r zscan mykey 0 MATCH foo* COUNT 10000] - lsort -unique [lindex $res 1] - } - - test "ZSCAN scores: regression test for issue #2175" { - r del mykey - for {set j 0} {$j < 500} {incr j} { - r zadd mykey 9.8813129168249309e-323 $j - } - set res [lindex [r zscan mykey 0] 1] - set first_score [lindex $res 1] - assert {$first_score != 0} - } -} diff --git a/tools/pika_migrate/tests/unit/scripting.tcl b/tools/pika_migrate/tests/unit/scripting.tcl deleted file mode 100644 index e1cd2174ba..0000000000 --- a/tools/pika_migrate/tests/unit/scripting.tcl +++ /dev/null @@ -1,606 +0,0 @@ -start_server {tags {"scripting"}} { - test {EVAL - Does Lua interpreter replies to our requests?} { - r eval {return 'hello'} 0 - } {hello} - - test {EVAL - Lua integer -> Redis protocol type conversion} { - r eval {return 100.5} 0 - } {100} - - test {EVAL - Lua string -> Redis protocol type conversion} { - r eval {return 'hello world'} 0 - } {hello world} - - test {EVAL - Lua true boolean -> Redis protocol type conversion} { - r eval {return true} 0 - } {1} - - test {EVAL - Lua false boolean -> Redis protocol type conversion} { - r eval {return false} 0 - } {} - - test {EVAL - Lua status code reply -> Redis protocol type conversion} { - r eval {return {ok='fine'}} 0 - } {fine} - - test {EVAL - Lua error reply -> Redis protocol type conversion} { - catch { - r eval {return {err='this is an error'}} 0 - } e - set _ $e - } {this is an error} - - test {EVAL - Lua table -> Redis protocol type conversion} { - r eval {return {1,2,3,'ciao',{1,2}}} 0 - } {1 2 3 ciao {1 2}} - - test {EVAL - Are the KEYS and ARGV arrays populated correctly?} { - r eval {return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}} 2 a b c d - } {a b c d} - - test {EVAL - is Lua able to call Redis API?} { - r set mykey myval - r eval {return redis.call('get',KEYS[1])} 1 mykey - } {myval} - - test {EVALSHA - Can we call a SHA1 if already defined?} { - r evalsha fd758d1589d044dd850a6f05d52f2eefd27f033f 1 mykey - } {myval} - - test {EVALSHA - Can we call a SHA1 in uppercase?} { - r evalsha FD758D1589D044DD850A6F05D52F2EEFD27F033F 1 mykey - } {myval} - - test {EVALSHA - Do we get an error on invalid SHA1?} { - catch {r evalsha NotValidShaSUM 0} e - set _ $e - } {NOSCRIPT*} - - test {EVALSHA - Do we get an error on non defined SHA1?} { - catch {r evalsha ffd632c7d33e571e9f24556ebed26c3479a87130 0} e - set _ $e - } {NOSCRIPT*} - - test {EVAL - Redis integer -> Lua type conversion} { - r eval { - local foo = redis.pcall('incr','x') - return {type(foo),foo} - } 0 - } {number 1} - - test {EVAL - Redis bulk -> Lua type conversion} { - r set mykey myval - r eval { - local foo = redis.pcall('get','mykey') - return {type(foo),foo} - } 0 - } {string myval} - - test {EVAL - Redis multi bulk -> Lua type conversion} { - r del mylist - r rpush mylist a - r rpush mylist b - r rpush mylist c - r eval { - local foo = redis.pcall('lrange','mylist',0,-1) - return {type(foo),foo[1],foo[2],foo[3],# foo} - } 0 - } {table a b c 3} - - test {EVAL - Redis status reply -> Lua type conversion} { - r eval { - local foo = redis.pcall('set','mykey','myval') - return {type(foo),foo['ok']} - } 0 - } {table OK} - - test {EVAL - Redis error reply -> Lua type conversion} { - r set mykey myval - r eval { - local foo = redis.pcall('incr','mykey') - return {type(foo),foo['err']} - } 0 - } {table {ERR value is not an integer or out of range}} - - test {EVAL - Redis nil bulk reply -> Lua type conversion} { - r del mykey - r eval { - local foo = redis.pcall('get','mykey') - return {type(foo),foo == false} - } 0 - } {boolean 1} - - test {EVAL - Is the Lua client using the currently selected DB?} { - r set mykey "this is DB 9" - r select 10 - r set mykey "this is DB 10" - r eval {return redis.pcall('get','mykey')} 0 - } {this is DB 10} - - test {EVAL - SELECT inside Lua should not affect the caller} { - # here we DB 10 is selected - r set mykey "original value" - r eval {return redis.pcall('select','9')} 0 - set res [r get mykey] - r select 9 - set res - } {original value} - - if 0 { - test {EVAL - Script can't run more than configured time limit} { - r config set lua-time-limit 1 - catch { - r eval { - local i = 0 - while true do i=i+1 end - } 0 - } e - set _ $e - } {*execution time*} - } - - test {EVAL - Scripts can't run certain commands} { - set e {} - catch {r eval {return redis.pcall('spop','x')} 0} e - set e - } {*not allowed*} - - test {EVAL - Scripts can't run certain commands} { - set e {} - catch { - r eval "redis.pcall('randomkey'); return redis.pcall('set','x','ciao')" 0 - } e - set e - } {*not allowed after*} - - test {EVAL - No arguments to redis.call/pcall is considered an error} { - set e {} - catch {r eval {return redis.call()} 0} e - set e - } {*one argument*} - - test {EVAL - redis.call variant raises a Lua error on Redis cmd error (1)} { - set e {} - catch { - r eval "redis.call('nosuchcommand')" 0 - } e - set e - } {*Unknown Redis*} - - test {EVAL - redis.call variant raises a Lua error on Redis cmd error (1)} { - set e {} - catch { - r eval "redis.call('get','a','b','c')" 0 - } e - set e - } {*number of args*} - - test {EVAL - redis.call variant raises a Lua error on Redis cmd error (1)} { - set e {} - r set foo bar - catch { - r eval {redis.call('lpush',KEYS[1],'val')} 1 foo - } e - set e - } {*against a key*} - - test {EVAL - JSON numeric decoding} { - # We must return the table as a string because otherwise - # Redis converts floats to ints and we get 0 and 1023 instead - # of 0.0003 and 1023.2 as the parsed output. - r eval {return - table.concat( - cjson.decode( - "[0.0, -5e3, -1, 0.3e-3, 1023.2, 0e10]"), " ") - } 0 - } {0 -5000 -1 0.0003 1023.2 0} - - test {EVAL - JSON string decoding} { - r eval {local decoded = cjson.decode('{"keya": "a", "keyb": "b"}') - return {decoded.keya, decoded.keyb} - } 0 - } {a b} - - test {EVAL - cmsgpack can pack double?} { - r eval {local encoded = cmsgpack.pack(0.1) - local h = "" - for i = 1, #encoded do - h = h .. string.format("%02x",string.byte(encoded,i)) - end - return h - } 0 - } {cb3fb999999999999a} - - test {EVAL - cmsgpack can pack negative int64?} { - r eval {local encoded = cmsgpack.pack(-1099511627776) - local h = "" - for i = 1, #encoded do - h = h .. string.format("%02x",string.byte(encoded,i)) - end - return h - } 0 - } {d3ffffff0000000000} - - test {EVAL - cmsgpack can pack and unpack circular references?} { - r eval {local a = {x=nil,y=5} - local b = {x=a} - a['x'] = b - local encoded = cmsgpack.pack(a) - local h = "" - -- cmsgpack encodes to a depth of 16, but can't encode - -- references, so the encoded object has a deep copy recusive - -- depth of 16. - for i = 1, #encoded do - h = h .. string.format("%02x",string.byte(encoded,i)) - end - -- when unpacked, re.x.x != re because the unpack creates - -- individual tables down to a depth of 16. - -- (that's why the encoded output is so large) - local re = cmsgpack.unpack(encoded) - assert(re) - assert(re.x) - assert(re.x.x.y == re.y) - assert(re.x.x.x.x.y == re.y) - assert(re.x.x.x.x.x.x.y == re.y) - assert(re.x.x.x.x.x.x.x.x.x.x.y == re.y) - -- maximum working depth: - assert(re.x.x.x.x.x.x.x.x.x.x.x.x.x.x.y == re.y) - -- now the last x would be b above and has no y - assert(re.x.x.x.x.x.x.x.x.x.x.x.x.x.x.x) - -- so, the final x.x is at the depth limit and was assigned nil - assert(re.x.x.x.x.x.x.x.x.x.x.x.x.x.x.x.x == nil) - return {h, re.x.x.x.x.x.x.x.x.y == re.y, re.y == 5} - } 0 - } {82a17905a17881a17882a17905a17881a17882a17905a17881a17882a17905a17881a17882a17905a17881a17882a17905a17881a17882a17905a17881a17882a17905a17881a178c0 1 1} - - test {EVAL - Numerical sanity check from bitop} { - r eval {assert(0x7fffffff == 2147483647, "broken hex literals"); - assert(0xffffffff == -1 or 0xffffffff == 2^32-1, - "broken hex literals"); - assert(tostring(-1) == "-1", "broken tostring()"); - assert(tostring(0xffffffff) == "-1" or - tostring(0xffffffff) == "4294967295", - "broken tostring()") - } 0 - } {} - - test {EVAL - Verify minimal bitop functionality} { - r eval {assert(bit.tobit(1) == 1); - assert(bit.band(1) == 1); - assert(bit.bxor(1,2) == 3); - assert(bit.bor(1,2,4,8,16,32,64,128) == 255) - } 0 - } {} - - test {SCRIPTING FLUSH - is able to clear the scripts cache?} { - r set mykey myval - set v [r evalsha fd758d1589d044dd850a6f05d52f2eefd27f033f 1 mykey] - assert_equal $v myval - set e "" - r script flush - catch {r evalsha fd758d1589d044dd850a6f05d52f2eefd27f033f 1 mykey} e - set e - } {NOSCRIPT*} - - test {SCRIPT EXISTS - can detect already defined scripts?} { - r eval "return 1+1" 0 - r script exists a27e7e8a43702b7046d4f6a7ccf5b60cef6b9bd9 a27e7e8a43702b7046d4f6a7ccf5b60cef6b9bda - } {1 0} - - test {SCRIPT LOAD - is able to register scripts in the scripting cache} { - list \ - [r script load "return 'loaded'"] \ - [r evalsha b534286061d4b9e4026607613b95c06c06015ae8 0] - } {b534286061d4b9e4026607613b95c06c06015ae8 loaded} - - test "In the context of Lua the output of random commands gets ordered" { - r del myset - r sadd myset a b c d e f g h i l m n o p q r s t u v z aa aaa azz - r eval {return redis.call('smembers',KEYS[1])} 1 myset - } {a aa aaa azz b c d e f g h i l m n o p q r s t u v z} - - test "SORT is normally not alpha re-ordered for the scripting engine" { - r del myset - r sadd myset 1 2 3 4 10 - r eval {return redis.call('sort',KEYS[1],'desc')} 1 myset - } {10 4 3 2 1} - - test "SORT BY output gets ordered for scripting" { - r del myset - r sadd myset a b c d e f g h i l m n o p q r s t u v z aa aaa azz - r eval {return redis.call('sort',KEYS[1],'by','_')} 1 myset - } {a aa aaa azz b c d e f g h i l m n o p q r s t u v z} - - test "SORT BY with GET gets ordered for scripting" { - r del myset - r sadd myset a b c - r eval {return redis.call('sort',KEYS[1],'by','_','get','#','get','_:*')} 1 myset - } {a {} b {} c {}} - - test "redis.sha1hex() implementation" { - list [r eval {return redis.sha1hex('')} 0] \ - [r eval {return redis.sha1hex('Pizza & Mandolino')} 0] - } {da39a3ee5e6b4b0d3255bfef95601890afd80709 74822d82031af7493c20eefa13bd07ec4fada82f} - - test {Globals protection reading an undeclared global variable} { - catch {r eval {return a} 0} e - set e - } {*ERR*attempted to access unexisting global*} - - test {Globals protection setting an undeclared global*} { - catch {r eval {a=10} 0} e - set e - } {*ERR*attempted to create global*} - - test {Test an example script DECR_IF_GT} { - set decr_if_gt { - local current - - current = redis.call('get',KEYS[1]) - if not current then return nil end - if current > ARGV[1] then - return redis.call('decr',KEYS[1]) - else - return redis.call('get',KEYS[1]) - end - } - r set foo 5 - set res {} - lappend res [r eval $decr_if_gt 1 foo 2] - lappend res [r eval $decr_if_gt 1 foo 2] - lappend res [r eval $decr_if_gt 1 foo 2] - lappend res [r eval $decr_if_gt 1 foo 2] - lappend res [r eval $decr_if_gt 1 foo 2] - set res - } {4 3 2 2 2} - - test {Scripting engine resets PRNG at every script execution} { - set rand1 [r eval {return tostring(math.random())} 0] - set rand2 [r eval {return tostring(math.random())} 0] - assert_equal $rand1 $rand2 - } - - test {Scripting engine PRNG can be seeded correctly} { - set rand1 [r eval { - math.randomseed(ARGV[1]); return tostring(math.random()) - } 0 10] - set rand2 [r eval { - math.randomseed(ARGV[1]); return tostring(math.random()) - } 0 10] - set rand3 [r eval { - math.randomseed(ARGV[1]); return tostring(math.random()) - } 0 20] - assert_equal $rand1 $rand2 - assert {$rand2 ne $rand3} - } - - test {EVAL does not leak in the Lua stack} { - r set x 0 - # Use a non blocking client to speedup the loop. - set rd [redis_deferring_client] - for {set j 0} {$j < 10000} {incr j} { - $rd eval {return redis.call("incr",KEYS[1])} 1 x - } - for {set j 0} {$j < 10000} {incr j} { - $rd read - } - assert {[s used_memory_lua] < 1024*100} - $rd close - r get x - } {10000} - - test {EVAL processes writes from AOF in read-only slaves} { - r flushall - r config set appendonly yes - r eval {redis.call("set",KEYS[1],"100")} 1 foo - r eval {redis.call("incr",KEYS[1])} 1 foo - r eval {redis.call("incr",KEYS[1])} 1 foo - wait_for_condition 50 100 { - [s aof_rewrite_in_progress] == 0 - } else { - fail "AOF rewrite can't complete after CONFIG SET appendonly yes." - } - r config set slave-read-only yes - r slaveof 127.0.0.1 0 - r debug loadaof - set res [r get foo] - r slaveof no one - set res - } {102} - - test {We can call scripts rewriting client->argv from Lua} { - r del myset - r sadd myset a b c - r mset a 1 b 2 c 3 d 4 - assert {[r spop myset] ne {}} - assert {[r spop myset] ne {}} - assert {[r spop myset] ne {}} - assert {[r mget a b c d] eq {1 2 3 4}} - assert {[r spop myset] eq {}} - } - - test {Call Redis command with many args from Lua (issue #1764)} { - r eval { - local i - local x={} - redis.call('del','mylist') - for i=1,100 do - table.insert(x,i) - end - redis.call('rpush','mylist',unpack(x)) - return redis.call('lrange','mylist',0,-1) - } 0 - } {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100} - - test {Number conversion precision test (issue #1118)} { - r eval { - local value = 9007199254740991 - redis.call("set","foo",value) - return redis.call("get","foo") - } 0 - } {9007199254740991} - - test {String containing number precision test (regression of issue #1118)} { - r eval { - redis.call("set", "key", "12039611435714932082") - return redis.call("get", "key") - } 0 - } {12039611435714932082} - - test {Verify negative arg count is error instead of crash (issue #1842)} { - catch { r eval { return "hello" } -12 } e - set e - } {ERR Number of keys can't be negative} - - test {Correct handling of reused argv (issue #1939)} { - r eval { - for i = 0, 10 do - redis.call('SET', 'a', '1') - redis.call('MGET', 'a', 'b', 'c') - redis.call('EXPIRE', 'a', 0) - redis.call('GET', 'a') - redis.call('MGET', 'a', 'b', 'c') - end - } 0 - } -} - -# Start a new server since the last test in this stanza will kill the -# instance at all. -start_server {tags {"scripting"}} { - test {Timedout read-only scripts can be killed by SCRIPT KILL} { - set rd [redis_deferring_client] - r config set lua-time-limit 10 - $rd eval {while true do end} 0 - after 200 - catch {r ping} e - assert_match {BUSY*} $e - r script kill - after 200 ; # Give some time to Lua to call the hook again... - assert_equal [r ping] "PONG" - } - - test {Timedout script link is still usable after Lua returns} { - r config set lua-time-limit 10 - r eval {for i=1,100000 do redis.call('ping') end return 'ok'} 0 - r ping - } {PONG} - - test {Timedout scripts that modified data can't be killed by SCRIPT KILL} { - set rd [redis_deferring_client] - r config set lua-time-limit 10 - $rd eval {redis.call('set',KEYS[1],'y'); while true do end} 1 x - after 200 - catch {r ping} e - assert_match {BUSY*} $e - catch {r script kill} e - assert_match {UNKILLABLE*} $e - catch {r ping} e - assert_match {BUSY*} $e - } - - # Note: keep this test at the end of this server stanza because it - # kills the server. - test {SHUTDOWN NOSAVE can kill a timedout script anyway} { - # The server sould be still unresponding to normal commands. - catch {r ping} e - assert_match {BUSY*} $e - catch {r shutdown nosave} - # Make sure the server was killed - catch {set rd [redis_deferring_client]} e - assert_match {*connection refused*} $e - } -} - -start_server {tags {"scripting repl"}} { - start_server {} { - test {Before the slave connects we issue two EVAL commands} { - # One with an error, but still executing a command. - # SHA is: 67164fc43fa971f76fd1aaeeaf60c1c178d25876 - catch { - r eval {redis.call('incr',KEYS[1]); redis.call('nonexisting')} 1 x - } - # One command is correct: - # SHA is: 6f5ade10a69975e903c6d07b10ea44c6382381a5 - r eval {return redis.call('incr',KEYS[1])} 1 x - } {2} - - test {Connect a slave to the main instance} { - r -1 slaveof [srv 0 host] [srv 0 port] - wait_for_condition 50 100 { - [s -1 role] eq {slave} && - [string match {*master_link_status:up*} [r -1 info replication]] - } else { - fail "Can't turn the instance into a slave" - } - } - - test {Now use EVALSHA against the master, with both SHAs} { - # The server should replicate successful and unsuccessful - # commands as EVAL instead of EVALSHA. - catch { - r evalsha 67164fc43fa971f76fd1aaeeaf60c1c178d25876 1 x - } - r evalsha 6f5ade10a69975e903c6d07b10ea44c6382381a5 1 x - } {4} - - test {If EVALSHA was replicated as EVAL, 'x' should be '4'} { - wait_for_condition 50 100 { - [r -1 get x] eq {4} - } else { - fail "Expected 4 in x, but value is '[r -1 get x]'" - } - } - - test {Replication of script multiple pushes to list with BLPOP} { - set rd [redis_deferring_client] - $rd brpop a 0 - r eval { - redis.call("lpush",KEYS[1],"1"); - redis.call("lpush",KEYS[1],"2"); - } 1 a - set res [$rd read] - $rd close - wait_for_condition 50 100 { - [r -1 lrange a 0 -1] eq [r lrange a 0 -1] - } else { - fail "Expected list 'a' in slave and master to be the same, but they are respectively '[r -1 lrange a 0 -1]' and '[r lrange a 0 -1]'" - } - set res - } {a 1} - - test {EVALSHA replication when first call is readonly} { - r del x - r eval {if tonumber(ARGV[1]) > 0 then redis.call('incr', KEYS[1]) end} 1 x 0 - r evalsha 6e0e2745aa546d0b50b801a20983b70710aef3ce 1 x 0 - r evalsha 6e0e2745aa546d0b50b801a20983b70710aef3ce 1 x 1 - wait_for_condition 50 100 { - [r -1 get x] eq {1} - } else { - fail "Expected 1 in x, but value is '[r -1 get x]'" - } - } - - test {Lua scripts using SELECT are replicated correctly} { - r eval { - redis.call("set","foo1","bar1") - redis.call("select","10") - redis.call("incr","x") - redis.call("select","11") - redis.call("incr","z") - } 0 - r eval { - redis.call("set","foo1","bar1") - redis.call("select","10") - redis.call("incr","x") - redis.call("select","11") - redis.call("incr","z") - } 0 - wait_for_condition 50 100 { - [r -1 debug digest] eq [r debug digest] - } else { - fail "Master-Slave desync after Lua script using SELECT." - } - } - } -} diff --git a/tools/pika_migrate/tests/unit/slowlog.tcl b/tools/pika_migrate/tests/unit/slowlog.tcl deleted file mode 100644 index b25b91e2ce..0000000000 --- a/tools/pika_migrate/tests/unit/slowlog.tcl +++ /dev/null @@ -1,70 +0,0 @@ -start_server {tags {"slowlog"} overrides {slowlog-log-slower-than 1000000}} { - test {SLOWLOG - check that it starts with an empty log} { - r slowlog len - } {0} - - test {SLOWLOG - only logs commands taking more time than specified} { - r config set slowlog-log-slower-than 100000 - r ping - assert_equal [r slowlog len] 0 - r debug sleep 0.2 - assert_equal [r slowlog len] 1 - } - - test {SLOWLOG - max entries is correctly handled} { - r config set slowlog-log-slower-than 0 - r config set slowlog-max-len 10 - for {set i 0} {$i < 100} {incr i} { - r ping - } - r slowlog len - } {10} - - test {SLOWLOG - GET optional argument to limit output len works} { - llength [r slowlog get 5] - } {5} - - test {SLOWLOG - RESET subcommand works} { - r config set slowlog-log-slower-than 100000 - r slowlog reset - r slowlog len - } {0} - - test {SLOWLOG - logged entry sanity check} { - r debug sleep 0.2 - set e [lindex [r slowlog get] 0] - assert_equal [llength $e] 4 - assert_equal [lindex $e 0] 105 - assert_equal [expr {[lindex $e 2] > 100000}] 1 - assert_equal [lindex $e 3] {debug sleep 0.2} - } - - test {SLOWLOG - commands with too many arguments are trimmed} { - r config set slowlog-log-slower-than 0 - r slowlog reset - r sadd set 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 - set e [lindex [r slowlog get] 0] - lindex $e 3 - } {sadd set 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 {... (2 more arguments)}} - - test {SLOWLOG - too long arguments are trimmed} { - r config set slowlog-log-slower-than 0 - r slowlog reset - set arg [string repeat A 129] - r sadd set foo $arg - set e [lindex [r slowlog get] 0] - lindex $e 3 - } {sadd set foo {AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA... (1 more bytes)}} - - test {SLOWLOG - EXEC is not logged, just executed commands} { - r config set slowlog-log-slower-than 100000 - r slowlog reset - assert_equal [r slowlog len] 0 - r multi - r debug sleep 0.2 - r exec - assert_equal [r slowlog len] 1 - set e [lindex [r slowlog get] 0] - assert_equal [lindex $e 3] {debug sleep 0.2} - } -} diff --git a/tools/pika_migrate/tests/unit/sort.tcl b/tools/pika_migrate/tests/unit/sort.tcl deleted file mode 100644 index a25ffeb5ce..0000000000 --- a/tools/pika_migrate/tests/unit/sort.tcl +++ /dev/null @@ -1,311 +0,0 @@ -start_server { - tags {"sort"} - overrides { - "list-max-ziplist-value" 16 - "list-max-ziplist-entries" 32 - "set-max-intset-entries" 32 - } -} { - proc create_random_dataset {num cmd} { - set tosort {} - set result {} - array set seenrand {} - r del tosort - for {set i 0} {$i < $num} {incr i} { - # Make sure all the weights are different because - # Redis does not use a stable sort but Tcl does. - while 1 { - randpath { - set rint [expr int(rand()*1000000)] - } { - set rint [expr rand()] - } - if {![info exists seenrand($rint)]} break - } - set seenrand($rint) x - r $cmd tosort $i - r set weight_$i $rint - r hset wobj_$i weight $rint - lappend tosort [list $i $rint] - } - set sorted [lsort -index 1 -real $tosort] - for {set i 0} {$i < $num} {incr i} { - lappend result [lindex $sorted $i 0] - } - set _ $result - } - - foreach {num cmd enc title} { - 16 lpush ziplist "Ziplist" - 1000 lpush linkedlist "Linked list" - 10000 lpush linkedlist "Big Linked list" - 16 sadd intset "Intset" - 1000 sadd hashtable "Hash table" - 10000 sadd hashtable "Big Hash table" - } { - set result [create_random_dataset $num $cmd] - assert_encoding $enc tosort - - test "$title: SORT BY key" { - assert_equal $result [r sort tosort BY weight_*] - } - - test "$title: SORT BY key with limit" { - assert_equal [lrange $result 5 9] [r sort tosort BY weight_* LIMIT 5 5] - } - - test "$title: SORT BY hash field" { - assert_equal $result [r sort tosort BY wobj_*->weight] - } - } - - set result [create_random_dataset 16 lpush] - test "SORT GET #" { - assert_equal [lsort -integer $result] [r sort tosort GET #] - } - - test "SORT GET " { - r del foo - set res [r sort tosort GET foo] - assert_equal 16 [llength $res] - foreach item $res { assert_equal {} $item } - } - - test "SORT GET (key and hash) with sanity check" { - set l1 [r sort tosort GET # GET weight_*] - set l2 [r sort tosort GET # GET wobj_*->weight] - foreach {id1 w1} $l1 {id2 w2} $l2 { - assert_equal $id1 $id2 - assert_equal $w1 [r get weight_$id1] - assert_equal $w2 [r get weight_$id1] - } - } - - test "SORT BY key STORE" { - r sort tosort BY weight_* store sort-res - assert_equal $result [r lrange sort-res 0 -1] - assert_equal 16 [r llen sort-res] - assert_encoding ziplist sort-res - } - - test "SORT BY hash field STORE" { - r sort tosort BY wobj_*->weight store sort-res - assert_equal $result [r lrange sort-res 0 -1] - assert_equal 16 [r llen sort-res] - assert_encoding ziplist sort-res - } - - test "SORT DESC" { - assert_equal [lsort -decreasing -integer $result] [r sort tosort DESC] - } - - test "SORT ALPHA against integer encoded strings" { - r del mylist - r lpush mylist 2 - r lpush mylist 1 - r lpush mylist 3 - r lpush mylist 10 - r sort mylist alpha - } {1 10 2 3} - - test "SORT sorted set" { - r del zset - r zadd zset 1 a - r zadd zset 5 b - r zadd zset 2 c - r zadd zset 10 d - r zadd zset 3 e - r sort zset alpha desc - } {e d c b a} - - test "SORT sorted set BY nosort should retain ordering" { - r del zset - r zadd zset 1 a - r zadd zset 5 b - r zadd zset 2 c - r zadd zset 10 d - r zadd zset 3 e - r multi - r sort zset by nosort asc - r sort zset by nosort desc - r exec - } {{a c e b d} {d b e c a}} - - test "SORT sorted set BY nosort + LIMIT" { - r del zset - r zadd zset 1 a - r zadd zset 5 b - r zadd zset 2 c - r zadd zset 10 d - r zadd zset 3 e - assert_equal [r sort zset by nosort asc limit 0 1] {a} - assert_equal [r sort zset by nosort desc limit 0 1] {d} - assert_equal [r sort zset by nosort asc limit 0 2] {a c} - assert_equal [r sort zset by nosort desc limit 0 2] {d b} - assert_equal [r sort zset by nosort limit 5 10] {} - assert_equal [r sort zset by nosort limit -10 100] {a c e b d} - } - - test "SORT sorted set BY nosort works as expected from scripts" { - r del zset - r zadd zset 1 a - r zadd zset 5 b - r zadd zset 2 c - r zadd zset 10 d - r zadd zset 3 e - r eval { - return {redis.call('sort',KEYS[1],'by','nosort','asc'), - redis.call('sort',KEYS[1],'by','nosort','desc')} - } 1 zset - } {{a c e b d} {d b e c a}} - - test "SORT sorted set: +inf and -inf handling" { - r del zset - r zadd zset -100 a - r zadd zset 200 b - r zadd zset -300 c - r zadd zset 1000000 d - r zadd zset +inf max - r zadd zset -inf min - r zrange zset 0 -1 - } {min c a b d max} - - test "SORT regression for issue #19, sorting floats" { - r flushdb - set floats {1.1 5.10 3.10 7.44 2.1 5.75 6.12 0.25 1.15} - foreach x $floats { - r lpush mylist $x - } - assert_equal [lsort -real $floats] [r sort mylist] - } - - test "SORT with STORE returns zero if result is empty (github issue 224)" { - r flushdb - r sort foo store bar - } {0} - - test "SORT with STORE does not create empty lists (github issue 224)" { - r flushdb - r lpush foo bar - r sort foo alpha limit 10 10 store zap - r exists zap - } {0} - - test "SORT with STORE removes key if result is empty (github issue 227)" { - r flushdb - r lpush foo bar - r sort emptylist store foo - r exists foo - } {0} - - test "SORT with BY and STORE should still order output" { - r del myset mylist - r sadd myset a b c d e f g h i l m n o p q r s t u v z aa aaa azz - r sort myset alpha by _ store mylist - r lrange mylist 0 -1 - } {a aa aaa azz b c d e f g h i l m n o p q r s t u v z} - - test "SORT will complain with numerical sorting and bad doubles (1)" { - r del myset - r sadd myset 1 2 3 4 not-a-double - set e {} - catch {r sort myset} e - set e - } {*ERR*double*} - - test "SORT will complain with numerical sorting and bad doubles (2)" { - r del myset - r sadd myset 1 2 3 4 - r mset score:1 10 score:2 20 score:3 30 score:4 not-a-double - set e {} - catch {r sort myset by score:*} e - set e - } {*ERR*double*} - - test "SORT BY sub-sorts lexicographically if score is the same" { - r del myset - r sadd myset a b c d e f g h i l m n o p q r s t u v z aa aaa azz - foreach ele {a aa aaa azz b c d e f g h i l m n o p q r s t u v z} { - set score:$ele 100 - } - r sort myset by score:* - } {a aa aaa azz b c d e f g h i l m n o p q r s t u v z} - - test "SORT GET with pattern ending with just -> does not get hash field" { - r del mylist - r lpush mylist a - r set x:a-> 100 - r sort mylist by num get x:*-> - } {100} - - test "SORT by nosort retains native order for lists" { - r del testa - r lpush testa 2 1 4 3 5 - r sort testa by nosort - } {5 3 4 1 2} - - test "SORT by nosort plus store retains native order for lists" { - r del testa - r lpush testa 2 1 4 3 5 - r sort testa by nosort store testb - r lrange testb 0 -1 - } {5 3 4 1 2} - - test "SORT by nosort with limit returns based on original list order" { - r sort testa by nosort limit 0 3 store testb - r lrange testb 0 -1 - } {5 3 4} - - tags {"slow"} { - set num 100 - set res [create_random_dataset $num lpush] - - test "SORT speed, $num element list BY key, 100 times" { - set start [clock clicks -milliseconds] - for {set i 0} {$i < 100} {incr i} { - set sorted [r sort tosort BY weight_* LIMIT 0 10] - } - set elapsed [expr [clock clicks -milliseconds]-$start] - if {$::verbose} { - puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds " - flush stdout - } - } - - test "SORT speed, $num element list BY hash field, 100 times" { - set start [clock clicks -milliseconds] - for {set i 0} {$i < 100} {incr i} { - set sorted [r sort tosort BY wobj_*->weight LIMIT 0 10] - } - set elapsed [expr [clock clicks -milliseconds]-$start] - if {$::verbose} { - puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds " - flush stdout - } - } - - test "SORT speed, $num element list directly, 100 times" { - set start [clock clicks -milliseconds] - for {set i 0} {$i < 100} {incr i} { - set sorted [r sort tosort LIMIT 0 10] - } - set elapsed [expr [clock clicks -milliseconds]-$start] - if {$::verbose} { - puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds " - flush stdout - } - } - - test "SORT speed, $num element list BY , 100 times" { - set start [clock clicks -milliseconds] - for {set i 0} {$i < 100} {incr i} { - set sorted [r sort tosort BY nokey LIMIT 0 10] - } - set elapsed [expr [clock clicks -milliseconds]-$start] - if {$::verbose} { - puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds " - flush stdout - } - } - } -} diff --git a/tools/pika_migrate/tests/unit/type/hash.tcl b/tools/pika_migrate/tests/unit/type/hash.tcl deleted file mode 100644 index 55441bd33a..0000000000 --- a/tools/pika_migrate/tests/unit/type/hash.tcl +++ /dev/null @@ -1,470 +0,0 @@ -start_server {tags {"hash"}} { - test {HSET/HLEN - Small hash creation} { - array set smallhash {} - for {set i 0} {$i < 8} {incr i} { - set key [randstring 0 8 alpha] - set val [randstring 0 8 alpha] - if {[info exists smallhash($key)]} { - incr i -1 - continue - } - r hset smallhash $key $val - set smallhash($key) $val - } - list [r hlen smallhash] - } {8} - -# test {Is the small hash encoded with a ziplist?} { -# assert_encoding ziplist smallhash -# } - - test {HSET/HLEN - Big hash creation} { - array set bighash {} - for {set i 0} {$i < 1024} {incr i} { - set key [randstring 0 8 alpha] - set val [randstring 0 8 alpha] - if {[info exists bighash($key)]} { - incr i -1 - continue - } - r hset bighash $key $val - set bighash($key) $val - } - list [r hlen bighash] - } {1024} - -# test {Is the big hash encoded with a ziplist?} { -# assert_encoding hashtable bighash -# } - - test {HGET against the small hash} { - set err {} - foreach k [array names smallhash *] { - if {$smallhash($k) ne [r hget smallhash $k]} { - set err "$smallhash($k) != [r hget smallhash $k]" - break - } - } - set _ $err - } {} - - test {HGET against the big hash} { - set err {} - foreach k [array names bighash *] { - if {$bighash($k) ne [r hget bighash $k]} { - set err "$bighash($k) != [r hget bighash $k]" - break - } - } - set _ $err - } {} - - test {HGET against non existing key} { - set rv {} - lappend rv [r hget smallhash __123123123__] - lappend rv [r hget bighash __123123123__] - set _ $rv - } {{} {}} - - test {HSET in update and insert mode} { - set rv {} - set k [lindex [array names smallhash *] 0] - lappend rv [r hset smallhash $k newval1] - set smallhash($k) newval1 - lappend rv [r hget smallhash $k] - lappend rv [r hset smallhash __foobar123__ newval] - set k [lindex [array names bighash *] 0] - lappend rv [r hset bighash $k newval2] - set bighash($k) newval2 - lappend rv [r hget bighash $k] - lappend rv [r hset bighash __foobar123__ newval] - lappend rv [r hdel smallhash __foobar123__] - lappend rv [r hdel bighash __foobar123__] - set _ $rv - } {0 newval1 1 0 newval2 1 1 1} - - test {HSETNX target key missing - small hash} { - r hsetnx smallhash __123123123__ foo - r hget smallhash __123123123__ - } {foo} - - test {HSETNX target key exists - small hash} { - r hsetnx smallhash __123123123__ bar - set result [r hget smallhash __123123123__] - r hdel smallhash __123123123__ - set _ $result - } {foo} - - test {HSETNX target key missing - big hash} { - r hsetnx bighash __123123123__ foo - r hget bighash __123123123__ - } {foo} - - test {HSETNX target key exists - big hash} { - r hsetnx bighash __123123123__ bar - set result [r hget bighash __123123123__] - r hdel bighash __123123123__ - set _ $result - } {foo} - - test {HMSET wrong number of args} { - catch {r hmset smallhash key1 val1 key2} err - format $err - } {*wrong number*} - - test {HMSET - small hash} { - set args {} - foreach {k v} [array get smallhash] { - set newval [randstring 0 8 alpha] - set smallhash($k) $newval - lappend args $k $newval - } - r hmset smallhash {*}$args - } {OK} - - test {HMSET - big hash} { - set args {} - foreach {k v} [array get bighash] { - set newval [randstring 0 8 alpha] - set bighash($k) $newval - lappend args $k $newval - } - r hmset bighash {*}$args - } {OK} - - test {HMGET against non existing key and fields} { - set rv {} - lappend rv [r hmget doesntexist __123123123__ __456456456__] - lappend rv [r hmget smallhash __123123123__ __456456456__] - lappend rv [r hmget bighash __123123123__ __456456456__] - set _ $rv - } {{{} {}} {{} {}} {{} {}}} - -# test {HMGET against wrong type} { -# r set wrongtype somevalue -# assert_error "*wrong*" {r hmget wrongtype field1 field2} -# } - - test {HMGET - small hash} { - set keys {} - set vals {} - foreach {k v} [array get smallhash] { - lappend keys $k - lappend vals $v - } - set err {} - set result [r hmget smallhash {*}$keys] - if {$vals ne $result} { - set err "$vals != $result" - break - } - set _ $err - } {} - - test {HMGET - big hash} { - set keys {} - set vals {} - foreach {k v} [array get bighash] { - lappend keys $k - lappend vals $v - } - set err {} - set result [r hmget bighash {*}$keys] - if {$vals ne $result} { - set err "$vals != $result" - break - } - set _ $err - } {} - - test {HKEYS - small hash} { - lsort [r hkeys smallhash] - } [lsort [array names smallhash *]] - - test {HKEYS - big hash} { - lsort [r hkeys bighash] - } [lsort [array names bighash *]] - - test {HVALS - small hash} { - set vals {} - foreach {k v} [array get smallhash] { - lappend vals $v - } - set _ [lsort $vals] - } [lsort [r hvals smallhash]] - - test {HVALS - big hash} { - set vals {} - foreach {k v} [array get bighash] { - lappend vals $v - } - set _ [lsort $vals] - } [lsort [r hvals bighash]] - - test {HGETALL - small hash} { - lsort [r hgetall smallhash] - } [lsort [array get smallhash]] - - test {HGETALL - big hash} { - lsort [r hgetall bighash] - } [lsort [array get bighash]] - - test {HDEL and return value} { - set rv {} - lappend rv [r hdel smallhash nokey] - lappend rv [r hdel bighash nokey] - set k [lindex [array names smallhash *] 0] - lappend rv [r hdel smallhash $k] - lappend rv [r hdel smallhash $k] - lappend rv [r hget smallhash $k] - unset smallhash($k) - set k [lindex [array names bighash *] 0] - lappend rv [r hdel bighash $k] - lappend rv [r hdel bighash $k] - lappend rv [r hget bighash $k] - unset bighash($k) - set _ $rv - } {0 0 1 0 {} 1 0 {}} - - test {HDEL - more than a single value} { - set rv {} - r del myhash - r hmset myhash a 1 b 2 c 3 - assert_equal 0 [r hdel myhash x y] - assert_equal 2 [r hdel myhash a c f] - r hgetall myhash - } {b 2} - - test {HDEL - hash becomes empty before deleting all specified fields} { - r del myhash - r hmset myhash a 1 b 2 c 3 - assert_equal 3 [r hdel myhash a b c d e] - assert_equal 0 [r exists myhash] - } - - test {HEXISTS} { - set rv {} - set k [lindex [array names smallhash *] 0] - lappend rv [r hexists smallhash $k] - lappend rv [r hexists smallhash nokey] - set k [lindex [array names bighash *] 0] - lappend rv [r hexists bighash $k] - lappend rv [r hexists bighash nokey] - } {1 0 1 0} - -# test {Is a ziplist encoded Hash promoted on big payload?} { -# r hset smallhash foo [string repeat a 1024] -# r debug object smallhash -# } {*hashtable*} - - test {HINCRBY against non existing database key} { - r del htest - list [r hincrby htest foo 2] - } {2} - - test {HINCRBY against non existing hash key} { - set rv {} - r hdel smallhash tmp - r hdel bighash tmp - lappend rv [r hincrby smallhash tmp 2] - lappend rv [r hget smallhash tmp] - lappend rv [r hincrby bighash tmp 2] - lappend rv [r hget bighash tmp] - } {2 2 2 2} - - test {HINCRBY against hash key created by hincrby itself} { - set rv {} - lappend rv [r hincrby smallhash tmp 3] - lappend rv [r hget smallhash tmp] - lappend rv [r hincrby bighash tmp 3] - lappend rv [r hget bighash tmp] - } {5 5 5 5} - - test {HINCRBY against hash key originally set with HSET} { - r hset smallhash tmp 100 - r hset bighash tmp 100 - list [r hincrby smallhash tmp 2] [r hincrby bighash tmp 2] - } {102 102} - - test {HINCRBY over 32bit value} { - r hset smallhash tmp 17179869184 - r hset bighash tmp 17179869184 - list [r hincrby smallhash tmp 1] [r hincrby bighash tmp 1] - } {17179869185 17179869185} - - test {HINCRBY over 32bit value with over 32bit increment} { - r hset smallhash tmp 17179869184 - r hset bighash tmp 17179869184 - list [r hincrby smallhash tmp 17179869184] [r hincrby bighash tmp 17179869184] - } {34359738368 34359738368} - - test {HINCRBY fails against hash value with spaces (left)} { - r hset smallhash str " 11" - r hset bighash str " 11" - catch {r hincrby smallhash str 1} smallerr - catch {r hincrby smallhash str 1} bigerr - set rv {} - lappend rv [string match "ERR*not an integer*" $smallerr] - lappend rv [string match "ERR*not an integer*" $bigerr] - } {1 1} - - test {HINCRBY fails against hash value with spaces (right)} { - r hset smallhash str "11 " - r hset bighash str "11 " - catch {r hincrby smallhash str 1} smallerr - catch {r hincrby smallhash str 1} bigerr - set rv {} - lappend rv [string match "ERR*not an integer*" $smallerr] - lappend rv [string match "ERR*not an integer*" $bigerr] - } {1 1} - - test {HINCRBY can detect overflows} { - set e {} - r hset hash n -9223372036854775484 - assert {[r hincrby hash n -1] == -9223372036854775485} - catch {r hincrby hash n -10000} e - set e - } {*overflow*} - - test {HINCRBYFLOAT against non existing database key} { - r del htest - list [r hincrbyfloat htest foo 2.5] - } {2.5} - - test {HINCRBYFLOAT against non existing hash key} { - set rv {} - r hdel smallhash tmp - r hdel bighash tmp - lappend rv [roundFloat [r hincrbyfloat smallhash tmp 2.5]] - lappend rv [roundFloat [r hget smallhash tmp]] - lappend rv [roundFloat [r hincrbyfloat bighash tmp 2.5]] - lappend rv [roundFloat [r hget bighash tmp]] - } {2.5 2.5 2.5 2.5} - - test {HINCRBYFLOAT against hash key created by hincrby itself} { - set rv {} - lappend rv [roundFloat [r hincrbyfloat smallhash tmp 3.5]] - lappend rv [roundFloat [r hget smallhash tmp]] - lappend rv [roundFloat [r hincrbyfloat bighash tmp 3.5]] - lappend rv [roundFloat [r hget bighash tmp]] - } {6 6 6 6} - - test {HINCRBYFLOAT against hash key originally set with HSET} { - r hset smallhash tmp 100 - r hset bighash tmp 100 - list [roundFloat [r hincrbyfloat smallhash tmp 2.5]] \ - [roundFloat [r hincrbyfloat bighash tmp 2.5]] - } {102.5 102.5} - - test {HINCRBYFLOAT over 32bit value} { - r hset smallhash tmp 17179869184 - r hset bighash tmp 17179869184 - list [r hincrbyfloat smallhash tmp 1] \ - [r hincrbyfloat bighash tmp 1] - } {17179869185 17179869185} - - test {HINCRBYFLOAT over 32bit value with over 32bit increment} { - r hset smallhash tmp 17179869184 - r hset bighash tmp 17179869184 - list [r hincrbyfloat smallhash tmp 17179869184] \ - [r hincrbyfloat bighash tmp 17179869184] - } {34359738368 34359738368} - - test {HINCRBYFLOAT fails against hash value with spaces (left)} { - r hset smallhash str " 11" - r hset bighash str " 11" - catch {r hincrbyfloat smallhash str 1} smallerr - catch {r hincrbyfloat smallhash str 1} bigerr - set rv {} - lappend rv [string match "ERR*not*float*" $smallerr] - lappend rv [string match "ERR*not*float*" $bigerr] - } {1 1} - - test {HINCRBYFLOAT fails against hash value with spaces (right)} { - r hset smallhash str "11 " - r hset bighash str "11 " - catch {r hincrbyfloat smallhash str 1} smallerr - catch {r hincrbyfloat smallhash str 1} bigerr - set rv {} - lappend rv [string match "ERR*not*float*" $smallerr] - lappend rv [string match "ERR*not*float*" $bigerr] - } {1 1} - - test {Hash ziplist regression test for large keys} { - r hset hash kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk a - r hset hash kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk b - r hget hash kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk - } {b} - - foreach size {10 512} { - test "Hash fuzzing #1 - $size fields" { - for {set times 0} {$times < 10} {incr times} { - catch {unset hash} - array set hash {} - r del hash - - # Create - for {set j 0} {$j < $size} {incr j} { - set field [randomValue] - set value [randomValue] - r hset hash $field $value - set hash($field) $value - } - - # Verify - foreach {k v} [array get hash] { - assert_equal $v [r hget hash $k] - } - assert_equal [array size hash] [r hlen hash] - } - } - - test "Hash fuzzing #2 - $size fields" { - for {set times 0} {$times < 10} {incr times} { - catch {unset hash} - array set hash {} - r del hash - - # Create - for {set j 0} {$j < $size} {incr j} { - randpath { - set field [randomValue] - set value [randomValue] - r hset hash $field $value - set hash($field) $value - } { - set field [randomSignedInt 512] - set value [randomSignedInt 512] - r hset hash $field $value - set hash($field) $value - } { - randpath { - set field [randomValue] - } { - set field [randomSignedInt 512] - } - r hdel hash $field - unset -nocomplain hash($field) - } - } - - # Verify - foreach {k v} [array get hash] { - assert_equal $v [r hget hash $k] - } - assert_equal [array size hash] [r hlen hash] - } - } - } - -# test {Stress test the hash ziplist -> hashtable encoding conversion} { -# r config set hash-max-ziplist-entries 32 -# for {set j 0} {$j < 100} {incr j} { -# r del myhash -# for {set i 0} {$i < 64} {incr i} { -# r hset myhash [randomValue] [randomValue] -# } -# assert {[r object encoding myhash] eq {hashtable}} -# } -# } -} diff --git a/tools/pika_migrate/tests/unit/type/list-2.tcl b/tools/pika_migrate/tests/unit/type/list-2.tcl deleted file mode 100644 index bf6a055eba..0000000000 --- a/tools/pika_migrate/tests/unit/type/list-2.tcl +++ /dev/null @@ -1,44 +0,0 @@ -start_server { - tags {"list"} - overrides { - "list-max-ziplist-value" 16 - "list-max-ziplist-entries" 256 - } -} { - source "tests/unit/type/list-common.tcl" - - foreach {type large} [array get largevalue] { - tags {"slow"} { - test "LTRIM stress testing - $type" { - set mylist {} - set startlen 32 - r del mylist - - # Start with the large value to ensure the - # right encoding is used. - r rpush mylist $large - lappend mylist $large - - for {set i 0} {$i < $startlen} {incr i} { - set str [randomInt 9223372036854775807] - r rpush mylist $str - lappend mylist $str - } - - for {set i 0} {$i < 1000} {incr i} { - set min [expr {int(rand()*$startlen)}] - set max [expr {$min+int(rand()*$startlen)}] - set mylist [lrange $mylist $min $max] - r ltrim mylist $min $max - assert_equal $mylist [r lrange mylist 0 -1] - - for {set j [r llen mylist]} {$j < $startlen} {incr j} { - set str [randomInt 9223372036854775807] - r rpush mylist $str - lappend mylist $str - } - } - } - } - } -} diff --git a/tools/pika_migrate/tests/unit/type/list-3.tcl b/tools/pika_migrate/tests/unit/type/list-3.tcl deleted file mode 100644 index 94f9a0b797..0000000000 --- a/tools/pika_migrate/tests/unit/type/list-3.tcl +++ /dev/null @@ -1,79 +0,0 @@ -start_server { - tags {list ziplist} - overrides { - "list-max-ziplist-value" 200000 - "list-max-ziplist-entries" 256 - } -} { - test {Explicit regression for a list bug} { - set mylist {49376042582 {BkG2o\pIC]4YYJa9cJ4GWZalG[4tin;1D2whSkCOW`mX;SFXGyS8sedcff3fQI^tgPCC@^Nu1J6o]meM@Lko]t_jRyotK?tH[\EvWqS]b`o2OCtjg:?nUTwdjpcUm]y:pg5q24q7LlCOwQE^}} - r del l - r rpush l [lindex $mylist 0] - r rpush l [lindex $mylist 1] - assert_equal [r lindex l 0] [lindex $mylist 0] - assert_equal [r lindex l 1] [lindex $mylist 1] - } - - tags {slow} { - test {ziplist implementation: value encoding and backlink} { - if {$::accurate} {set iterations 100} else {set iterations 10} - for {set j 0} {$j < $iterations} {incr j} { - r del l - set l {} - for {set i 0} {$i < 200} {incr i} { - randpath { - set data [string repeat x [randomInt 100000]] - } { - set data [randomInt 65536] - } { - set data [randomInt 4294967296] - } { - set data [randomInt 18446744073709551616] - } { - set data -[randomInt 65536] - if {$data eq {-0}} {set data 0} - } { - set data -[randomInt 4294967296] - if {$data eq {-0}} {set data 0} - } { - set data -[randomInt 18446744073709551616] - if {$data eq {-0}} {set data 0} - } - lappend l $data - r rpush l $data - } - assert_equal [llength $l] [r llen l] - # Traverse backward - for {set i 199} {$i >= 0} {incr i -1} { - if {[lindex $l $i] ne [r lindex l $i]} { - assert_equal [lindex $l $i] [r lindex l $i] - } - } - } - } - - test {ziplist implementation: encoding stress testing} { - for {set j 0} {$j < 200} {incr j} { - r del l - set l {} - set len [randomInt 400] - for {set i 0} {$i < $len} {incr i} { - set rv [randomValue] - randpath { - lappend l $rv - r rpush l $rv - } { - set l [concat [list $rv] $l] - r lpush l $rv - } - } - assert_equal [llength $l] [r llen l] - for {set i 0} {$i < $len} {incr i} { - if {[lindex $l $i] ne [r lindex l $i]} { - assert_equal [lindex $l $i] [r lindex l $i] - } - } - } - } - } -} diff --git a/tools/pika_migrate/tests/unit/type/list-common.tcl b/tools/pika_migrate/tests/unit/type/list-common.tcl deleted file mode 100644 index ab45f0b31b..0000000000 --- a/tools/pika_migrate/tests/unit/type/list-common.tcl +++ /dev/null @@ -1,5 +0,0 @@ -# We need a value larger than list-max-ziplist-value to make sure -# the list has the right encoding when it is swapped in again. -array set largevalue {} -set largevalue(ziplist) "hello" -set largevalue(linkedlist) [string repeat "hello" 4] diff --git a/tools/pika_migrate/tests/unit/type/list.tcl b/tools/pika_migrate/tests/unit/type/list.tcl deleted file mode 100644 index 17358ae378..0000000000 --- a/tools/pika_migrate/tests/unit/type/list.tcl +++ /dev/null @@ -1,896 +0,0 @@ -start_server { - tags {"list"} - overrides { - "list-max-ziplist-value" 16 - "list-max-ziplist-entries" 256 - } -} { - source "tests/unit/type/list-common.tcl" - - test {LPUSH, RPUSH, LLENGTH, LINDEX, LPOP - ziplist} { - # first lpush then rpush - assert_equal 1 [r lpush myziplist1 a] - assert_equal 2 [r rpush myziplist1 b] - assert_equal 3 [r rpush myziplist1 c] - assert_equal 3 [r llen myziplist1] - assert_equal a [r lindex myziplist1 0] - assert_equal b [r lindex myziplist1 1] - assert_equal c [r lindex myziplist1 2] - assert_equal {} [r lindex myziplist2 3] - assert_equal c [r rpop myziplist1] - assert_equal a [r lpop myziplist1] -# assert_encoding ziplist myziplist1 - - # first rpush then lpush - assert_equal 1 [r rpush myziplist2 a] - assert_equal 2 [r lpush myziplist2 b] - assert_equal 3 [r lpush myziplist2 c] - assert_equal 3 [r llen myziplist2] - assert_equal c [r lindex myziplist2 0] - assert_equal b [r lindex myziplist2 1] - assert_equal a [r lindex myziplist2 2] - assert_equal {} [r lindex myziplist2 3] - assert_equal a [r rpop myziplist2] - assert_equal c [r lpop myziplist2] -# assert_encoding ziplist myziplist2 - } - - test {LPUSH, RPUSH, LLENGTH, LINDEX, LPOP - regular list} { - # first lpush then rpush - assert_equal 1 [r lpush mylist1 $largevalue(linkedlist)] -# assert_encoding linkedlist mylist1 - assert_equal 2 [r rpush mylist1 b] - assert_equal 3 [r rpush mylist1 c] - assert_equal 3 [r llen mylist1] - assert_equal $largevalue(linkedlist) [r lindex mylist1 0] - assert_equal b [r lindex mylist1 1] - assert_equal c [r lindex mylist1 2] - assert_equal {} [r lindex mylist1 3] - assert_equal c [r rpop mylist1] - assert_equal $largevalue(linkedlist) [r lpop mylist1] - - # first rpush then lpush - assert_equal 1 [r rpush mylist2 $largevalue(linkedlist)] -# assert_encoding linkedlist mylist2 - assert_equal 2 [r lpush mylist2 b] - assert_equal 3 [r lpush mylist2 c] - assert_equal 3 [r llen mylist2] - assert_equal c [r lindex mylist2 0] - assert_equal b [r lindex mylist2 1] - assert_equal $largevalue(linkedlist) [r lindex mylist2 2] - assert_equal {} [r lindex mylist2 3] - assert_equal $largevalue(linkedlist) [r rpop mylist2] - assert_equal c [r lpop mylist2] - } - - test {R/LPOP against empty list} { - r lpop non-existing-list - } {} - - test {Variadic RPUSH/LPUSH} { - r del mylist - assert_equal 4 [r lpush mylist a b c d] - assert_equal 8 [r rpush mylist 0 1 2 3] - assert_equal {d c b a 0 1 2 3} [r lrange mylist 0 -1] - } - - test {DEL a list - ziplist} { - assert_equal 1 [r del myziplist2] - assert_equal 0 [r exists myziplist2] - assert_equal 0 [r llen myziplist2] - } - - test {DEL a list - regular list} { - assert_equal 1 [r del mylist2] - assert_equal 0 [r exists mylist2] - assert_equal 0 [r llen mylist2] - } - - proc create_ziplist {key entries} { - r del $key - foreach entry $entries { r rpush $key $entry } -# assert_encoding ziplist $key - } - - proc create_linkedlist {key entries} { - r del $key - foreach entry $entries { r rpush $key $entry } -# assert_encoding linkedlist $key - } - -# foreach {type large} [array get largevalue] { -# test "BLPOP, BRPOP: single existing list - $type" { -# set rd [redis_deferring_client] -# create_$type blist "a b $large c d" -# -# $rd blpop blist 1 -# assert_equal {blist a} [$rd read] -# $rd brpop blist 1 -# assert_equal {blist d} [$rd read] -# -# $rd blpop blist 1 -# assert_equal {blist b} [$rd read] -# $rd brpop blist 1 -# assert_equal {blist c} [$rd read] -# } -# -# test "BLPOP, BRPOP: multiple existing lists - $type" { -# set rd [redis_deferring_client] -# create_$type blist1 "a $large c" -# create_$type blist2 "d $large f" -# -# $rd blpop blist1 blist2 1 -# assert_equal {blist1 a} [$rd read] -# $rd brpop blist1 blist2 1 -# assert_equal {blist1 c} [$rd read] -# assert_equal 1 [r llen blist1] -# assert_equal 3 [r llen blist2] -# -# $rd blpop blist2 blist1 1 -# assert_equal {blist2 d} [$rd read] -# $rd brpop blist2 blist1 1 -# assert_equal {blist2 f} [$rd read] -# assert_equal 1 [r llen blist1] -# assert_equal 1 [r llen blist2] -# } -# -# test "BLPOP, BRPOP: second list has an entry - $type" { -# set rd [redis_deferring_client] -# r del blist1 -# create_$type blist2 "d $large f" -# -# $rd blpop blist1 blist2 1 -# assert_equal {blist2 d} [$rd read] -# $rd brpop blist1 blist2 1 -# assert_equal {blist2 f} [$rd read] -# assert_equal 0 [r llen blist1] -# assert_equal 1 [r llen blist2] -# } -# -# test "BRPOPLPUSH - $type" { -# r del target -# -# set rd [redis_deferring_client] -# create_$type blist "a b $large c d" -# -# $rd brpoplpush blist target 1 -# assert_equal d [$rd read] -# -# assert_equal d [r rpop target] -# assert_equal "a b $large c" [r lrange blist 0 -1] -# } -# } -# -# test "BLPOP, LPUSH + DEL should not awake blocked client" { -# set rd [redis_deferring_client] -# r del list -# -# $rd blpop list 0 -# r multi -# r lpush list a -# r del list -# r exec -# r del list -# r lpush list b -# $rd read -# } {list b} -# -# test "BLPOP, LPUSH + DEL + SET should not awake blocked client" { -# set rd [redis_deferring_client] -# r del list -# -# $rd blpop list 0 -# r multi -# r lpush list a -# r del list -# r set list foo -# r exec -# r del list -# r lpush list b -# $rd read -# } {list b} -# -# test "BLPOP with same key multiple times should work (issue #801)" { -# set rd [redis_deferring_client] -# r del list1 list2 -# -# # Data arriving after the BLPOP. -# $rd blpop list1 list2 list2 list1 0 -# r lpush list1 a -# assert_equal [$rd read] {list1 a} -# $rd blpop list1 list2 list2 list1 0 -# r lpush list2 b -# assert_equal [$rd read] {list2 b} -# -# # Data already there. -# r lpush list1 a -# r lpush list2 b -# $rd blpop list1 list2 list2 list1 0 -# assert_equal [$rd read] {list1 a} -# $rd blpop list1 list2 list2 list1 0 -# assert_equal [$rd read] {list2 b} -# } -# -# test "MULTI/EXEC is isolated from the point of view of BLPOP" { -# set rd [redis_deferring_client] -# r del list -# $rd blpop list 0 -# r multi -# r lpush list a -# r lpush list b -# r lpush list c -# r exec -# $rd read -# } {list c} -# -# test "BLPOP with variadic LPUSH" { -# set rd [redis_deferring_client] -# r del blist target -# if {$::valgrind} {after 100} -# $rd blpop blist 0 -# if {$::valgrind} {after 100} -# assert_equal 2 [r lpush blist foo bar] -# if {$::valgrind} {after 100} -# assert_equal {blist bar} [$rd read] -# assert_equal foo [lindex [r lrange blist 0 -1] 0] -# } -# -# test "BRPOPLPUSH with zero timeout should block indefinitely" { -# set rd [redis_deferring_client] -# r del blist target -# $rd brpoplpush blist target 0 -# after 1000 -# r rpush blist foo -# assert_equal foo [$rd read] -# assert_equal {foo} [r lrange target 0 -1] -# } -# -# test "BRPOPLPUSH with a client BLPOPing the target list" { -# set rd [redis_deferring_client] -# set rd2 [redis_deferring_client] -# r del blist target -# $rd2 blpop target 0 -# $rd brpoplpush blist target 0 -# after 1000 -# r rpush blist foo -# assert_equal foo [$rd read] -# assert_equal {target foo} [$rd2 read] -# assert_equal 0 [r exists target] -# } -# -# test "BRPOPLPUSH with wrong source type" { -# set rd [redis_deferring_client] -# r del blist target -# r set blist nolist -# $rd brpoplpush blist target 1 -# assert_error "WRONGTYPE*" {$rd read} -# } -# -# test "BRPOPLPUSH with wrong destination type" { -# set rd [redis_deferring_client] -# r del blist target -# r set target nolist -# r lpush blist foo -# $rd brpoplpush blist target 1 -# assert_error "WRONGTYPE*" {$rd read} -# -# set rd [redis_deferring_client] -# r del blist target -# r set target nolist -# $rd brpoplpush blist target 0 -# after 1000 -# r rpush blist foo -# assert_error "WRONGTYPE*" {$rd read} -# assert_equal {foo} [r lrange blist 0 -1] -# } -# -# test "BRPOPLPUSH maintains order of elements after failure" { -# set rd [redis_deferring_client] -# r del blist target -# r set target nolist -# $rd brpoplpush blist target 0 -# r rpush blist a b c -# assert_error "WRONGTYPE*" {$rd read} -# r lrange blist 0 -1 -# } {a b c} -# -# test "BRPOPLPUSH with multiple blocked clients" { -# set rd1 [redis_deferring_client] -# set rd2 [redis_deferring_client] -# r del blist target1 target2 -# r set target1 nolist -# $rd1 brpoplpush blist target1 0 -# $rd2 brpoplpush blist target2 0 -# r lpush blist foo -# -# assert_error "WRONGTYPE*" {$rd1 read} -# assert_equal {foo} [$rd2 read] -# assert_equal {foo} [r lrange target2 0 -1] -# } -# -# test "Linked BRPOPLPUSH" { -# set rd1 [redis_deferring_client] -# set rd2 [redis_deferring_client] -# -# r del list1 list2 list3 -# -# $rd1 brpoplpush list1 list2 0 -# $rd2 brpoplpush list2 list3 0 -# -# r rpush list1 foo -# -# assert_equal {} [r lrange list1 0 -1] -# assert_equal {} [r lrange list2 0 -1] -# assert_equal {foo} [r lrange list3 0 -1] -# } -# -# test "Circular BRPOPLPUSH" { -# set rd1 [redis_deferring_client] -# set rd2 [redis_deferring_client] -# -# r del list1 list2 -# -# $rd1 brpoplpush list1 list2 0 -# $rd2 brpoplpush list2 list1 0 -# -# r rpush list1 foo -# -# assert_equal {foo} [r lrange list1 0 -1] -# assert_equal {} [r lrange list2 0 -1] -# } -# -# test "Self-referential BRPOPLPUSH" { -# set rd [redis_deferring_client] -# -# r del blist -# -# $rd brpoplpush blist blist 0 -# -# r rpush blist foo -# -# assert_equal {foo} [r lrange blist 0 -1] -# } -# -# test "BRPOPLPUSH inside a transaction" { -# r del xlist target -# r lpush xlist foo -# r lpush xlist bar -# -# r multi -# r brpoplpush xlist target 0 -# r brpoplpush xlist target 0 -# r brpoplpush xlist target 0 -# r lrange xlist 0 -1 -# r lrange target 0 -1 -# r exec -# } {foo bar {} {} {bar foo}} -# -# test "PUSH resulting from BRPOPLPUSH affect WATCH" { -# set blocked_client [redis_deferring_client] -# set watching_client [redis_deferring_client] -# r del srclist dstlist somekey -# r set somekey somevalue -# $blocked_client brpoplpush srclist dstlist 0 -# $watching_client watch dstlist -# $watching_client read -# $watching_client multi -# $watching_client read -# $watching_client get somekey -# $watching_client read -# r lpush srclist element -# $watching_client exec -# $watching_client read -# } {} -# -# test "BRPOPLPUSH does not affect WATCH while still blocked" { -# set blocked_client [redis_deferring_client] -# set watching_client [redis_deferring_client] -# r del srclist dstlist somekey -# r set somekey somevalue -# $blocked_client brpoplpush srclist dstlist 0 -# $watching_client watch dstlist -# $watching_client read -# $watching_client multi -# $watching_client read -# $watching_client get somekey -# $watching_client read -# $watching_client exec -# # Blocked BLPOPLPUSH may create problems, unblock it. -# r lpush srclist element -# $watching_client read -# } {somevalue} -# -# test {BRPOPLPUSH timeout} { -# set rd [redis_deferring_client] -# -# $rd brpoplpush foo_list bar_list 1 -# after 2000 -# $rd read -# } {} -# -# test "BLPOP when new key is moved into place" { -# set rd [redis_deferring_client] -# -# $rd blpop foo 5 -# r lpush bob abc def hij -# r rename bob foo -# $rd read -# } {foo hij} -# -# test "BLPOP when result key is created by SORT..STORE" { -# set rd [redis_deferring_client] -# -# # zero out list from previous test without explicit delete -# r lpop foo -# r lpop foo -# r lpop foo -# -# $rd blpop foo 5 -# r lpush notfoo hello hola aguacate konichiwa zanzibar -# r sort notfoo ALPHA store foo -# $rd read -# } {foo aguacate} -# -# foreach {pop} {BLPOP BRPOP} { -# test "$pop: with single empty list argument" { -# set rd [redis_deferring_client] -# r del blist1 -# $rd $pop blist1 1 -# r rpush blist1 foo -# assert_equal {blist1 foo} [$rd read] -# assert_equal 0 [r exists blist1] -# } -# -# test "$pop: with negative timeout" { -# set rd [redis_deferring_client] -# $rd $pop blist1 -1 -# assert_error "ERR*is negative*" {$rd read} -# } -# -# test "$pop: with non-integer timeout" { -# set rd [redis_deferring_client] -# $rd $pop blist1 1.1 -# assert_error "ERR*not an integer*" {$rd read} -# } -# -# test "$pop: with zero timeout should block indefinitely" { -# # To test this, use a timeout of 0 and wait a second. -# # The blocking pop should still be waiting for a push. -# set rd [redis_deferring_client] -# $rd $pop blist1 0 -# after 1000 -# r rpush blist1 foo -# assert_equal {blist1 foo} [$rd read] -# } -# -# test "$pop: second argument is not a list" { -# set rd [redis_deferring_client] -# r del blist1 blist2 -# r set blist2 nolist -# $rd $pop blist1 blist2 1 -# assert_error "WRONGTYPE*" {$rd read} -# } -# -# test "$pop: timeout" { -# set rd [redis_deferring_client] -# r del blist1 blist2 -# $rd $pop blist1 blist2 1 -# assert_equal {} [$rd read] -# } -# -# test "$pop: arguments are empty" { -# set rd [redis_deferring_client] -# r del blist1 blist2 -# -# $rd $pop blist1 blist2 1 -# r rpush blist1 foo -# assert_equal {blist1 foo} [$rd read] -# assert_equal 0 [r exists blist1] -# assert_equal 0 [r exists blist2] -# -# $rd $pop blist1 blist2 1 -# r rpush blist2 foo -# assert_equal {blist2 foo} [$rd read] -# assert_equal 0 [r exists blist1] -# assert_equal 0 [r exists blist2] -# } -# } -# -# test {BLPOP inside a transaction} { -# r del xlist -# r lpush xlist foo -# r lpush xlist bar -# r multi -# r blpop xlist 0 -# r blpop xlist 0 -# r blpop xlist 0 -# r exec -# } {{xlist bar} {xlist foo} {}} - - test {LPUSHX, RPUSHX - generic} { - r del xlist - assert_equal 0 [r lpushx xlist a] - assert_equal 0 [r llen xlist] - assert_equal 0 [r rpushx xlist a] - assert_equal 0 [r llen xlist] - } - - foreach {type large} [array get largevalue] { - test "LPUSHX, RPUSHX - $type" { - create_$type xlist "$large c" - assert_equal 3 [r rpushx xlist d] - assert_equal 4 [r lpushx xlist a] - assert_equal "a $large c d" [r lrange xlist 0 -1] - } - - test "LINSERT - $type" { - create_$type xlist "a $large c d" - assert_equal 5 [r linsert xlist before c zz] - assert_equal "a $large zz c d" [r lrange xlist 0 10] - assert_equal 6 [r linsert xlist after c yy] - assert_equal "a $large zz c yy d" [r lrange xlist 0 10] - assert_equal 7 [r linsert xlist after d dd] - assert_equal -1 [r linsert xlist after bad ddd] - assert_equal "a $large zz c yy d dd" [r lrange xlist 0 10] - assert_equal 8 [r linsert xlist before a aa] - assert_equal -1 [r linsert xlist before bad aaa] - assert_equal "aa a $large zz c yy d dd" [r lrange xlist 0 10] - - # check inserting integer encoded value - assert_equal 9 [r linsert xlist before aa 42] - assert_equal 42 [r lrange xlist 0 0] - } - } - - test {LINSERT raise error on bad syntax} { - catch {[r linsert xlist aft3r aa 42]} e - set e - } {*ERR*syntax*error*} - -# test {LPUSHX, RPUSHX convert from ziplist to list} { -# set large $largevalue(linkedlist) -# -# # convert when a large value is pushed -# create_ziplist xlist a -# assert_equal 2 [r rpushx xlist $large] -# assert_encoding linkedlist xlist -# create_ziplist xlist a -# assert_equal 2 [r lpushx xlist $large] -# assert_encoding linkedlist xlist -# -# # convert when the length threshold is exceeded -# create_ziplist xlist [lrepeat 256 a] -# assert_equal 257 [r rpushx xlist b] -# assert_encoding linkedlist xlist -# create_ziplist xlist [lrepeat 256 a] -# assert_equal 257 [r lpushx xlist b] -# assert_encoding linkedlist xlist -# } - -# test {LINSERT convert from ziplist to list} { -# set large $largevalue(linkedlist) -# -# # convert when a large value is inserted -# create_ziplist xlist a -# assert_equal 2 [r linsert xlist before a $large] -# assert_encoding linkedlist xlist -# create_ziplist xlist a -# assert_equal 2 [r linsert xlist after a $large] -# assert_encoding linkedlist xlist -# -# # convert when the length threshold is exceeded -# create_ziplist xlist [lrepeat 256 a] -# assert_equal 257 [r linsert xlist before a a] -# assert_encoding linkedlist xlist -# create_ziplist xlist [lrepeat 256 a] -# assert_equal 257 [r linsert xlist after a a] -# assert_encoding linkedlist xlist -# -# # don't convert when the value could not be inserted -# create_ziplist xlist [lrepeat 256 a] -# assert_equal -1 [r linsert xlist before foo a] -# assert_encoding ziplist xlist -# create_ziplist xlist [lrepeat 256 a] -# assert_equal -1 [r linsert xlist after foo a] -# assert_encoding ziplist xlist -# } - - foreach {type num} {ziplist 250 linkedlist 500} { - proc check_numbered_list_consistency {key} { - set len [r llen $key] - for {set i 0} {$i < $len} {incr i} { - assert_equal $i [r lindex $key $i] - assert_equal [expr $len-1-$i] [r lindex $key [expr (-$i)-1]] - } - } - - proc check_random_access_consistency {key} { - set len [r llen $key] - for {set i 0} {$i < $len} {incr i} { - set rint [expr int(rand()*$len)] - assert_equal $rint [r lindex $key $rint] - assert_equal [expr $len-1-$rint] [r lindex $key [expr (-$rint)-1]] - } - } - - test "LINDEX consistency test - $type" { - r del mylist - for {set i 0} {$i < $num} {incr i} { - r rpush mylist $i - } -# assert_encoding $type mylist - check_numbered_list_consistency mylist - } - - test "LINDEX random access - $type" { -# assert_encoding $type mylist - check_random_access_consistency mylist - } - -# test "Check if list is still ok after a DEBUG RELOAD - $type" { -# r debug reload -# assert_encoding $type mylist -# check_numbered_list_consistency mylist -# check_random_access_consistency mylist -# } - } - -# test {LLEN against non-list value error} { -# r del mylist -# r set mylist foobar -# assert_error WRONGTYPE* {r llen mylist} -# } - - test {LLEN against non existing key} { - assert_equal 0 [r llen not-a-key] - } - -# test {LINDEX against non-list value error} { -# assert_error WRONGTYPE* {r lindex mylist 0} -# } - - test {LINDEX against non existing key} { - assert_equal "" [r lindex not-a-key 10] - } - -# test {LPUSH against non-list value error} { -# assert_error WRONGTYPE* {r lpush mylist 0} -# } - -# test {RPUSH against non-list value error} { -# assert_error WRONGTYPE* {r rpush mylist 0} -# } - - foreach {type large} [array get largevalue] { - test "RPOPLPUSH base case - $type" { - r del mylist1 mylist2 - create_$type mylist1 "a $large c d" - assert_equal d [r rpoplpush mylist1 mylist2] - assert_equal c [r rpoplpush mylist1 mylist2] - assert_equal "a $large" [r lrange mylist1 0 -1] - assert_equal "c d" [r lrange mylist2 0 -1] -# assert_encoding ziplist mylist2 - } - - test "RPOPLPUSH with the same list as src and dst - $type" { - create_$type mylist "a $large c" - assert_equal "a $large c" [r lrange mylist 0 -1] - assert_equal c [r rpoplpush mylist mylist] - assert_equal "c a $large" [r lrange mylist 0 -1] - } - - foreach {othertype otherlarge} [array get largevalue] { - test "RPOPLPUSH with $type source and existing target $othertype" { - create_$type srclist "a b c $large" - create_$othertype dstlist "$otherlarge" - assert_equal $large [r rpoplpush srclist dstlist] - assert_equal c [r rpoplpush srclist dstlist] - assert_equal "a b" [r lrange srclist 0 -1] - assert_equal "c $large $otherlarge" [r lrange dstlist 0 -1] - - # When we rpoplpush'ed a large value, dstlist should be - # converted to the same encoding as srclist. -# if {$type eq "linkedlist"} { -# assert_encoding linkedlist dstlist -# } - } - } - } - - test {RPOPLPUSH against non existing key} { - r del srclist dstlist - assert_equal {} [r rpoplpush srclist dstlist] - assert_equal 0 [r exists srclist] - assert_equal 0 [r exists dstlist] - } - - test {RPOPLPUSH against non list src key} { - r del srclist dstlist - r set srclist x -# assert_error WRONGTYPE* {r rpoplpush srclist dstlist} -# assert_type string srclist - assert_equal 0 [r exists newlist] - } - - test {RPOPLPUSH against non list dst key} { - create_ziplist srclist {a b c d} - r set dstlist x -# assert_error WRONGTYPE* {r rpoplpush srclist dstlist} -# assert_type string dstlist - assert_equal {a b c d} [r lrange srclist 0 -1] - } - - test {RPOPLPUSH against non existing src key} { - r del srclist dstlist - assert_equal {} [r rpoplpush srclist dstlist] - } {} - - foreach {type large} [array get largevalue] { - test "Basic LPOP/RPOP - $type" { - create_$type mylist "$large 1 2" - assert_equal $large [r lpop mylist] - assert_equal 2 [r rpop mylist] - assert_equal 1 [r lpop mylist] - assert_equal 0 [r llen mylist] - - # pop on empty list - assert_equal {} [r lpop mylist] - assert_equal {} [r rpop mylist] - } - } - -# test {LPOP/RPOP against non list value} { -# r set notalist foo -# assert_error WRONGTYPE* {r lpop notalist} -# assert_error WRONGTYPE* {r rpop notalist} -# } - - foreach {type num} {ziplist 250 linkedlist 500} { - test "Mass RPOP/LPOP - $type" { - r del mylist - set sum1 0 - for {set i 0} {$i < $num} {incr i} { - r lpush mylist $i - incr sum1 $i - } -# assert_encoding $type mylist - set sum2 0 - for {set i 0} {$i < [expr $num/2]} {incr i} { - incr sum2 [r lpop mylist] - incr sum2 [r rpop mylist] - } - assert_equal $sum1 $sum2 - } - } - - foreach {type large} [array get largevalue] { - test "LRANGE basics - $type" { - create_$type mylist "$large 1 2 3 4 5 6 7 8 9" - assert_equal {1 2 3 4 5 6 7 8} [r lrange mylist 1 -2] - assert_equal {7 8 9} [r lrange mylist -3 -1] - assert_equal {4} [r lrange mylist 4 4] - } - - test "LRANGE inverted indexes - $type" { - create_$type mylist "$large 1 2 3 4 5 6 7 8 9" - assert_equal {} [r lrange mylist 6 2] - } - - test "LRANGE out of range indexes including the full list - $type" { - create_$type mylist "$large 1 2 3" - assert_equal "$large 1 2 3" [r lrange mylist -1000 1000] - } - - test "LRANGE out of range negative end index - $type" { - create_$type mylist "$large 1 2 3" - assert_equal $large [r lrange mylist 0 -4] - assert_equal {} [r lrange mylist 0 -5] - } - } - - test {LRANGE against non existing key} { - assert_equal {} [r lrange nosuchkey 0 1] - } - - foreach {type large} [array get largevalue] { - proc trim_list {type min max} { - upvar 1 large large - r del mylist - create_$type mylist "1 2 3 4 $large" - r ltrim mylist $min $max - r lrange mylist 0 -1 - } - - test "LTRIM basics - $type" { - assert_equal "1" [trim_list $type 0 0] - assert_equal "1 2" [trim_list $type 0 1] - assert_equal "1 2 3" [trim_list $type 0 2] - assert_equal "2 3" [trim_list $type 1 2] - assert_equal "2 3 4 $large" [trim_list $type 1 -1] - assert_equal "2 3 4" [trim_list $type 1 -2] - assert_equal "4 $large" [trim_list $type -2 -1] - assert_equal "$large" [trim_list $type -1 -1] - assert_equal "1 2 3 4 $large" [trim_list $type -5 -1] - assert_equal "1 2 3 4 $large" [trim_list $type -10 10] - assert_equal "1 2 3 4 $large" [trim_list $type 0 5] - assert_equal "1 2 3 4 $large" [trim_list $type 0 10] - } - - test "LTRIM out of range negative end index - $type" { - assert_equal {1} [trim_list $type 0 -5] - assert_equal {} [trim_list $type 0 -6] - } - - } - - foreach {type large} [array get largevalue] { - test "LSET - $type" { - create_$type mylist "99 98 $large 96 95" - r lset mylist 1 foo - r lset mylist -1 bar - assert_equal "99 foo $large 96 bar" [r lrange mylist 0 -1] - } - - test "LSET out of range index - $type" { - assert_error ERR*range* {r lset mylist 10 foo} - } - } - - test {LSET against non existing key} { - assert_error ERR*key* {r lset nosuchkey 10 foo} - } - -# test {LSET against non list value} { -# r set nolist foobar -# assert_error WRONGTYPE* {r lset nolist 0 foo} -# } - - foreach {type e} [array get largevalue] { - test "LREM remove all the occurrences - $type" { - create_$type mylist "$e foo bar foobar foobared zap bar test foo" - assert_equal 2 [r lrem mylist 0 bar] - assert_equal "$e foo foobar foobared zap test foo" [r lrange mylist 0 -1] - } - - test "LREM remove the first occurrence - $type" { - assert_equal 1 [r lrem mylist 1 foo] - assert_equal "$e foobar foobared zap test foo" [r lrange mylist 0 -1] - } - - test "LREM remove non existing element - $type" { - assert_equal 0 [r lrem mylist 1 nosuchelement] - assert_equal "$e foobar foobared zap test foo" [r lrange mylist 0 -1] - } - - test "LREM starting from tail with negative count - $type" { - create_$type mylist "$e foo bar foobar foobared zap bar test foo foo" - assert_equal 1 [r lrem mylist -1 bar] - assert_equal "$e foo bar foobar foobared zap test foo foo" [r lrange mylist 0 -1] - } - - test "LREM starting from tail with negative count (2) - $type" { - assert_equal 2 [r lrem mylist -2 foo] - assert_equal "$e foo bar foobar foobared zap test" [r lrange mylist 0 -1] - } - - test "LREM deleting objects that may be int encoded - $type" { - create_$type myotherlist "$e 1 2 3" - assert_equal 1 [r lrem myotherlist 1 2] - assert_equal 3 [r llen myotherlist] - } - } - - test "Regression for bug 593 - chaining BRPOPLPUSH with other blocking cmds" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] - - $rd1 brpoplpush a b 0 - $rd1 brpoplpush a b 0 - $rd2 brpoplpush b c 0 - after 1000 - r lpush a data - $rd1 close - $rd2 close - r ping - } {PONG} -} diff --git a/tools/pika_migrate/tests/unit/type/set.tcl b/tools/pika_migrate/tests/unit/type/set.tcl deleted file mode 100644 index de3c493a9c..0000000000 --- a/tools/pika_migrate/tests/unit/type/set.tcl +++ /dev/null @@ -1,531 +0,0 @@ -start_server { - tags {"set"} - overrides { - "set-max-intset-entries" 512 - } -} { - proc create_set {key entries} { - r del $key - foreach entry $entries { r sadd $key $entry } - } - - test {SADD, SCARD, SISMEMBER, SMEMBERS basics - regular set} { - create_set myset {foo} -# assert_encoding hashtable myset - assert_equal 1 [r sadd myset bar] - assert_equal 0 [r sadd myset bar] - assert_equal 2 [r scard myset] - assert_equal 1 [r sismember myset foo] - assert_equal 1 [r sismember myset bar] - assert_equal 0 [r sismember myset bla] - assert_equal {bar foo} [lsort [r smembers myset]] - } - - test {SADD, SCARD, SISMEMBER, SMEMBERS basics - intset} { - create_set myset {17} -# assert_encoding intset myset - assert_equal 1 [r sadd myset 16] - assert_equal 0 [r sadd myset 16] - assert_equal 2 [r scard myset] - assert_equal 1 [r sismember myset 16] - assert_equal 1 [r sismember myset 17] - assert_equal 0 [r sismember myset 18] - assert_equal {16 17} [lsort [r smembers myset]] - } - -# test {SADD against non set} { -# r lpush mylist foo -# assert_error WRONGTYPE* {r sadd mylist bar} -# } - - test "SADD a non-integer against an intset" { - create_set myset {1 2 3} -# assert_encoding intset myset - assert_equal 1 [r sadd myset a] -# assert_encoding hashtable myset - } - - test "SADD an integer larger than 64 bits" { - create_set myset {213244124402402314402033402} -# assert_encoding hashtable myset - assert_equal 1 [r sismember myset 213244124402402314402033402] - } - - test "SADD overflows the maximum allowed integers in an intset" { - r del myset - for {set i 0} {$i < 512} {incr i} { r sadd myset $i } -# assert_encoding intset myset - assert_equal 1 [r sadd myset 512] -# assert_encoding hashtable myset - } - - test {Variadic SADD} { - r del myset - assert_equal 3 [r sadd myset a b c] - assert_equal 2 [r sadd myset A a b c B] - assert_equal [lsort {A a b c B}] [lsort [r smembers myset]] - } - -# test "Set encoding after DEBUG RELOAD" { -# r del myintset myhashset mylargeintset -# for {set i 0} {$i < 100} {incr i} { r sadd myintset $i } -# for {set i 0} {$i < 1280} {incr i} { r sadd mylargeintset $i } -# for {set i 0} {$i < 256} {incr i} { r sadd myhashset [format "i%03d" $i] } -# assert_encoding intset myintset -# assert_encoding hashtable mylargeintset -# assert_encoding hashtable myhashset -# -# r debug reload -# assert_encoding intset myintset -# assert_encoding hashtable mylargeintset -# assert_encoding hashtable myhashset -# } - - test {SREM basics - regular set} { - create_set myset {foo bar ciao} -# assert_encoding hashtable myset - assert_equal 0 [r srem myset qux] - assert_equal 1 [r srem myset foo] - assert_equal {bar ciao} [lsort [r smembers myset]] - } - - test {SREM basics - intset} { - create_set myset {3 4 5} -# assert_encoding intset myset - assert_equal 0 [r srem myset 6] - assert_equal 1 [r srem myset 4] - assert_equal {3 5} [lsort [r smembers myset]] - } - - test {SREM with multiple arguments} { - r del myset - r sadd myset a b c d - assert_equal 0 [r srem myset k k k] - assert_equal 2 [r srem myset b d x y] - lsort [r smembers myset] - } {a c} - - test {SREM variadic version with more args needed to destroy the key} { - r del myset - r sadd myset 1 2 3 - r srem myset 1 2 3 4 5 6 7 8 - } {3} - - foreach {type} {hashtable intset} { - for {set i 1} {$i <= 5} {incr i} { - r del [format "set%d" $i] - } - for {set i 0} {$i < 200} {incr i} { - r sadd set1 $i - r sadd set2 [expr $i+195] - } - foreach i {199 195 1000 2000} { - r sadd set3 $i - } - for {set i 5} {$i < 200} {incr i} { - r sadd set4 $i - } - r sadd set5 0 - - # To make sure the sets are encoded as the type we are testing -- also - # when the VM is enabled and the values may be swapped in and out - # while the tests are running -- an extra element is added to every - # set that determines its encoding. - set large 200 - if {$type eq "hashtable"} { - set large foo - } - - for {set i 1} {$i <= 5} {incr i} { - r sadd [format "set%d" $i] $large - } - -# test "Generated sets must be encoded as $type" { -# for {set i 1} {$i <= 5} {incr i} { -# assert_encoding $type [format "set%d" $i] -# } -# } - - test "SINTER with two sets - $type" { - assert_equal [list 195 196 197 198 199 $large] [lsort [r sinter set1 set2]] - } - - test "SINTERSTORE with two sets - $type" { - r sinterstore setres set1 set2 -# assert_encoding $type setres - assert_equal [list 195 196 197 198 199 $large] [lsort [r smembers setres]] - } - -# test "SINTERSTORE with two sets, after a DEBUG RELOAD - $type" { -# r debug reload -# r sinterstore setres set1 set2 -# assert_encoding $type setres -# assert_equal [list 195 196 197 198 199 $large] [lsort [r smembers setres]] -# } - - test "SUNION with two sets - $type" { - set expected [lsort -uniq "[r smembers set1] [r smembers set2]"] - assert_equal $expected [lsort [r sunion set1 set2]] - } - - test "SUNIONSTORE with two sets - $type" { - r sunionstore setres set1 set2 -# assert_encoding $type setres - set expected [lsort -uniq "[r smembers set1] [r smembers set2]"] - assert_equal $expected [lsort [r smembers setres]] - } - - test "SINTER against three sets - $type" { - assert_equal [list 195 199 $large] [lsort [r sinter set1 set2 set3]] - } - - test "SINTERSTORE with three sets - $type" { - r sinterstore setres set1 set2 set3 - assert_equal [list 195 199 $large] [lsort [r smembers setres]] - } - - test "SUNION with non existing keys - $type" { - set expected [lsort -uniq "[r smembers set1] [r smembers set2]"] - assert_equal $expected [lsort [r sunion nokey1 set1 set2 nokey2]] - } - - test "SDIFF with two sets - $type" { - assert_equal {0 1 2 3 4} [lsort [r sdiff set1 set4]] - } - - test "SDIFF with three sets - $type" { - assert_equal {1 2 3 4} [lsort [r sdiff set1 set4 set5]] - } - - test "SDIFFSTORE with three sets - $type" { - r sdiffstore setres set1 set4 set5 - # When we start with intsets, we should always end with intsets. -# if {$type eq {intset}} { -# assert_encoding intset setres -# } - assert_equal {1 2 3 4} [lsort [r smembers setres]] - } - } - - test "SDIFF with first set empty" { - r del set1 set2 set3 - r sadd set2 1 2 3 4 - r sadd set3 a b c d - r sdiff set1 set2 set3 - } {} - - test "SDIFF with same set two times" { - r del set1 - r sadd set1 a b c 1 2 3 4 5 6 - r sdiff set1 set1 - } {} - - test "SDIFF fuzzing" { - for {set j 0} {$j < 100} {incr j} { - unset -nocomplain s - array set s {} - set args {} - set num_sets [expr {[randomInt 10]+1}] - for {set i 0} {$i < $num_sets} {incr i} { - set num_elements [randomInt 100] - r del set_$i - lappend args set_$i - while {$num_elements} { - set ele [randomValue] - r sadd set_$i $ele - if {$i == 0} { - set s($ele) x - } else { - unset -nocomplain s($ele) - } - incr num_elements -1 - } - } - set result [lsort [r sdiff {*}$args]] - assert_equal $result [lsort [array names s]] - } - } - -# test "SINTER against non-set should throw error" { -# r set key1 x -# assert_error "WRONGTYPE*" {r sinter key1 noset} -# } - -# test "SUNION against non-set should throw error" { -# r set key1 x -# assert_error "WRONGTYPE*" {r sunion key1 noset} -# } - - test "SINTER should handle non existing key as empty" { - r del set1 set2 set3 - r sadd set1 a b c - r sadd set2 b c d - r sinter set1 set2 set3 - } {} - - test "SINTER with same integer elements but different encoding" { - r del set1 set2 - r sadd set1 1 2 3 - r sadd set2 1 2 3 a - r srem set2 a -# assert_encoding intset set1 -# assert_encoding hashtable set2 - lsort [r sinter set1 set2] - } {1 2 3} - - test "SINTERSTORE against non existing keys should delete dstkey" { - r set setres xxx - assert_equal 0 [r sinterstore setres foo111 bar222] -# assert_equal 0 [r exists setres] - } - - test "SUNIONSTORE against non existing keys should delete dstkey" { - r set setres xxx - assert_equal 0 [r sunionstore setres foo111 bar222] -# assert_equal 0 [r exists setres] - } - - foreach {type contents} {hashtable {a b c} intset {1 2 3}} { - test "SPOP basics - $type" { - create_set myset $contents -# assert_encoding $type myset - assert_equal $contents [lsort [list [r spop myset] [r spop myset] [r spop myset]]] - assert_equal 0 [r scard myset] - } - - test "SRANDMEMBER - $type" { - create_set myset $contents - unset -nocomplain myset - array set myset {} - for {set i 0} {$i < 100} {incr i} { - set myset([r srandmember myset]) 1 - } - assert_equal $contents [lsort [array names myset]] - } - } - - test "SRANDMEMBER with against non existing key" { - r srandmember nonexisting_key 100 - } {} - - foreach {type contents} { - hashtable { - 1 5 10 50 125 50000 33959417 4775547 65434162 - 12098459 427716 483706 2726473884 72615637475 - MARY PATRICIA LINDA BARBARA ELIZABETH JENNIFER MARIA - SUSAN MARGARET DOROTHY LISA NANCY KAREN BETTY HELEN - SANDRA DONNA CAROL RUTH SHARON MICHELLE LAURA SARAH - KIMBERLY DEBORAH JESSICA SHIRLEY CYNTHIA ANGELA MELISSA - BRENDA AMY ANNA REBECCA VIRGINIA KATHLEEN - } - intset { - 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 - 20 21 22 23 24 25 26 27 28 29 - 30 31 32 33 34 35 36 37 38 39 - 40 41 42 43 44 45 46 47 48 49 - } - } { - test "SRANDMEMBER with - $type" { - create_set myset $contents - unset -nocomplain myset - array set myset {} - foreach ele [r smembers myset] { - set myset($ele) 1 - } - assert_equal [lsort $contents] [lsort [array names myset]] - - # Make sure that a count of 0 is handled correctly. - assert_equal [r srandmember myset 0] {} - - # We'll stress different parts of the code, see the implementation - # of SRANDMEMBER for more information, but basically there are - # four different code paths. - # - # PATH 1: Use negative count. - # - # 1) Check that it returns repeated elements. - set res [r srandmember myset -100] - assert_equal [llength $res] 100 - - # 2) Check that all the elements actually belong to the - # original set. - foreach ele $res { - assert {[info exists myset($ele)]} - } - - # 3) Check that eventually all the elements are returned. - unset -nocomplain auxset - set iterations 1000 - while {$iterations != 0} { - incr iterations -1 - set res [r srandmember myset -10] - foreach ele $res { - set auxset($ele) 1 - } - if {[lsort [array names myset]] eq - [lsort [array names auxset]]} { - break; - } - } - assert {$iterations != 0} - - # PATH 2: positive count (unique behavior) with requested size - # equal or greater than set size. - foreach size {50 100} { - set res [r srandmember myset $size] - assert_equal [llength $res] 50 - assert_equal [lsort $res] [lsort [array names myset]] - } - - # PATH 3: Ask almost as elements as there are in the set. - # In this case the implementation will duplicate the original - # set and will remove random elements up to the requested size. - # - # PATH 4: Ask a number of elements definitely smaller than - # the set size. - # - # We can test both the code paths just changing the size but - # using the same code. - - foreach size {45 5} { - set res [r srandmember myset $size] - assert_equal [llength $res] $size - - # 1) Check that all the elements actually belong to the - # original set. - foreach ele $res { - assert {[info exists myset($ele)]} - } - - # 2) Check that eventually all the elements are returned. - unset -nocomplain auxset - set iterations 1000 - while {$iterations != 0} { - incr iterations -1 - set res [r srandmember myset -10] - foreach ele $res { - set auxset($ele) 1 - } - if {[lsort [array names myset]] eq - [lsort [array names auxset]]} { - break; - } - } - assert {$iterations != 0} - } - } - } - - proc setup_move {} { - r del myset3 myset4 - create_set myset1 {1 a b} - create_set myset2 {2 3 4} -# assert_encoding hashtable myset1 -# assert_encoding intset myset2 - } - - test "SMOVE basics - from regular set to intset" { - # move a non-integer element to an intset should convert encoding - setup_move - assert_equal 1 [r smove myset1 myset2 a] - assert_equal {1 b} [lsort [r smembers myset1]] - assert_equal {2 3 4 a} [lsort [r smembers myset2]] -# assert_encoding hashtable myset2 - - # move an integer element should not convert the encoding - setup_move - assert_equal 1 [r smove myset1 myset2 1] - assert_equal {a b} [lsort [r smembers myset1]] - assert_equal {1 2 3 4} [lsort [r smembers myset2]] -# assert_encoding intset myset2 - } - - test "SMOVE basics - from intset to regular set" { - setup_move - assert_equal 1 [r smove myset2 myset1 2] - assert_equal {1 2 a b} [lsort [r smembers myset1]] - assert_equal {3 4} [lsort [r smembers myset2]] - } - - test "SMOVE non existing key" { - setup_move - assert_equal 0 [r smove myset1 myset2 foo] - assert_equal {1 a b} [lsort [r smembers myset1]] - assert_equal {2 3 4} [lsort [r smembers myset2]] - } - - test "SMOVE non existing src set" { - setup_move - assert_equal 0 [r smove noset myset2 foo] - assert_equal {2 3 4} [lsort [r smembers myset2]] - } - - test "SMOVE from regular set to non existing destination set" { - setup_move - assert_equal 1 [r smove myset1 myset3 a] - assert_equal {1 b} [lsort [r smembers myset1]] - assert_equal {a} [lsort [r smembers myset3]] -# assert_encoding hashtable myset3 - } - - test "SMOVE from intset to non existing destination set" { - setup_move - assert_equal 1 [r smove myset2 myset3 2] - assert_equal {3 4} [lsort [r smembers myset2]] - assert_equal {2} [lsort [r smembers myset3]] -# assert_encoding intset myset3 - } - -# test "SMOVE wrong src key type" { -# r set x 10 -# assert_error "WRONGTYPE*" {r smove x myset2 foo} -# } - -# test "SMOVE wrong dst key type" { -# r set x 10 -# assert_error "WRONGTYPE*" {r smove myset2 x foo} -# } - - test "SMOVE with identical source and destination" { - r del set - r sadd set a b c - r smove set set b - lsort [r smembers set] - } {a b c} - - tags {slow} { - test {intsets implementation stress testing} { - for {set j 0} {$j < 20} {incr j} { - unset -nocomplain s - array set s {} - r del s - set len [randomInt 1024] - for {set i 0} {$i < $len} {incr i} { - randpath { - set data [randomInt 65536] - } { - set data [randomInt 4294967296] - } { - set data [randomInt 18446744073709551616] - } - set s($data) {} - r sadd s $data - } - assert_equal [lsort [r smembers s]] [lsort [array names s]] - set len [array size s] - for {set i 0} {$i < $len} {incr i} { - set e [r spop s] - if {![info exists s($e)]} { - puts "Can't find '$e' on local array" - puts "Local array: [lsort [r smembers s]]" - puts "Remote array: [lsort [array names s]]" - error "exception" - } - array unset s $e - } - assert_equal [r scard s] 0 - assert_equal [array size s] 0 - } - } - } -} diff --git a/tools/pika_migrate/tests/unit/type/zset.tcl b/tools/pika_migrate/tests/unit/type/zset.tcl deleted file mode 100644 index 626156c572..0000000000 --- a/tools/pika_migrate/tests/unit/type/zset.tcl +++ /dev/null @@ -1,944 +0,0 @@ -start_server {tags {"zset"}} { - proc create_zset {key items} { - r del $key - foreach {score entry} $items { - r zadd $key $score $entry - } - } - - proc basics {encoding} { - #if {$encoding == "ziplist"} { - # r config set zset-max-ziplist-entries 128 - # r config set zset-max-ziplist-value 64 - #} elseif {$encoding == "skiplist"} { - # r config set zset-max-ziplist-entries 0 - # r config set zset-max-ziplist-value 0 - #} else { - # puts "Unknown sorted set encoding" - # exit - #} - - test "Check encoding - $encoding" { - r del ztmp - r zadd ztmp 10 x - #assert_encoding $encoding ztmp - } - - test "ZSET basic ZADD and score update - $encoding" { - r del ztmp - r zadd ztmp 10 x - r zadd ztmp 20 y - r zadd ztmp 30 z - assert_equal {x y z} [r zrange ztmp 0 -1] - - r zadd ztmp 1 y - assert_equal {y x z} [r zrange ztmp 0 -1] - } - - test "ZSET element can't be set to NaN with ZADD - $encoding" { - assert_error "*not*float*" {r zadd myzset abcde abc} - } - - test "ZSET element can't be set to NaN with ZINCRBY" { - assert_error "*not*float*" {r zadd myzset abcde abc} - } - - test "ZINCRBY calls leading to NaN result in error" { - r zincrby myzset 999999999 abc - assert_error "*not*float*" {r zincrby myzset abcde abc} - } - - test {ZADD - Variadic version base case} { - r del myzset - list [r zadd myzset 10 a 20 b 30 c] [r zrange myzset 0 -1 withscores] - } {3 {a 10 b 20 c 30}} - - test {ZADD - Return value is the number of actually added items} { - list [r zadd myzset 5 x 20 b 30 c] [r zrange myzset 0 -1 withscores] - } {1 {x 5 a 10 b 20 c 30}} - - test {ZADD - Variadic version does not add nothing on single parsing err} { - r del myzset - catch {r zadd myzset 10 a 20 b 30.badscore c} e - assert_match {*ERR*not*float*} $e - #r exists myzset - } - - test {ZADD - Variadic version will raise error on missing arg} { - r del myzset - catch {r zadd myzset 10 a 20 b 30 c 40} e - assert_match {*ERR*syntax*} $e - } - - test {ZINCRBY does not work variadic even if shares ZADD implementation} { - r del myzset - catch {r zincrby myzset 10 a 20 b 30 c} e - assert_match {*ERR*wrong*number*arg*} $e - } - - test "ZCARD basics - $encoding" { - assert_equal 3 [r zcard ztmp] - assert_equal 0 [r zcard zdoesntexist] - } - - test "ZREM removes key after last element is removed" { - r del ztmp - r zadd ztmp 10 x - r zadd ztmp 20 y - - #assert_equal 1 [r exists ztmp] - assert_equal 0 [r zrem ztmp z] - assert_equal 1 [r zrem ztmp y] - assert_equal 1 [r zrem ztmp x] - #assert_equal 0 [r exists ztmp] - } - - test "ZREM variadic version" { - r del ztmp - r zadd ztmp 10 a 20 b 30 c - assert_equal 2 [r zrem ztmp x y a b k] - assert_equal 0 [r zrem ztmp foo bar] - assert_equal 1 [r zrem ztmp c] - #assert_equal 0 [r exists ztmp] - } - - test "ZREM variadic version -- remove elements after key deletion" { - r del ztmp - r zadd ztmp 10 a 20 b 30 c - r zrem ztmp a b c d e f g - } {3} - - test "ZRANGE basics - $encoding" { - r del ztmp - r zadd ztmp 1 a - r zadd ztmp 2 b - r zadd ztmp 3 c - r zadd ztmp 4 d - - assert_equal {a b c d} [r zrange ztmp 0 -1] - assert_equal {a b c} [r zrange ztmp 0 -2] - assert_equal {b c d} [r zrange ztmp 1 -1] - assert_equal {b c} [r zrange ztmp 1 -2] - assert_equal {c d} [r zrange ztmp -2 -1] - assert_equal {c} [r zrange ztmp -2 -2] - - # out of range start index - assert_equal {a b c} [r zrange ztmp -5 2] - assert_equal {a b} [r zrange ztmp -5 1] - assert_equal {} [r zrange ztmp 5 -1] - assert_equal {} [r zrange ztmp 5 -2] - - # out of range end index - assert_equal {a b c d} [r zrange ztmp 0 5] - assert_equal {b c d} [r zrange ztmp 1 5] - assert_equal {} [r zrange ztmp 0 -5] - assert_equal {} [r zrange ztmp 1 -5] - - # withscores - assert_equal {a 1 b 2 c 3 d 4} [r zrange ztmp 0 -1 withscores] - } - - test "ZREVRANGE basics - $encoding" { - r del ztmp - r zadd ztmp 1 a - r zadd ztmp 2 b - r zadd ztmp 3 c - r zadd ztmp 4 d - - assert_equal {d c b a} [r zrevrange ztmp 0 -1] - assert_equal {d c b} [r zrevrange ztmp 0 -2] - assert_equal {c b a} [r zrevrange ztmp 1 -1] - assert_equal {c b} [r zrevrange ztmp 1 -2] - assert_equal {b a} [r zrevrange ztmp -2 -1] - assert_equal {b} [r zrevrange ztmp -2 -2] - - # out of range start index - assert_equal {d c b} [r zrevrange ztmp -5 2] - assert_equal {d c} [r zrevrange ztmp -5 1] - assert_equal {} [r zrevrange ztmp 5 -1] - assert_equal {} [r zrevrange ztmp 5 -2] - - # out of range end index - assert_equal {d c b a} [r zrevrange ztmp 0 5] - assert_equal {c b a} [r zrevrange ztmp 1 5] - assert_equal {} [r zrevrange ztmp 0 -5] - assert_equal {} [r zrevrange ztmp 1 -5] - - ## withscores - assert_equal {d 4 c 3 b 2 a 1} [r zrevrange ztmp 0 -1 withscores] - } - - test "ZRANK/ZREVRANK basics - $encoding" { - r del zranktmp - r zadd zranktmp 10 x - r zadd zranktmp 20 y - r zadd zranktmp 30 z - assert_equal 0 [r zrank zranktmp x] - assert_equal 1 [r zrank zranktmp y] - assert_equal 2 [r zrank zranktmp z] - assert_equal "" [r zrank zranktmp foo] - assert_equal 2 [r zrevrank zranktmp x] - assert_equal 1 [r zrevrank zranktmp y] - assert_equal 0 [r zrevrank zranktmp z] - assert_equal "" [r zrevrank zranktmp foo] - } - - test "ZRANK - after deletion - $encoding" { - r zrem zranktmp y - assert_equal 0 [r zrank zranktmp x] - assert_equal 1 [r zrank zranktmp z] - } - - test "ZINCRBY - can create a new sorted set - $encoding" { - r del zset - r zincrby zset 1 foo - assert_equal {foo} [r zrange zset 0 -1] - assert_equal 1 [r zscore zset foo] - } - - test "ZINCRBY - increment and decrement - $encoding" { - r zincrby zset 2 foo - r zincrby zset 1 bar - assert_equal {bar foo} [r zrange zset 0 -1] - - r zincrby zset 10 bar - r zincrby zset -5 foo - r zincrby zset -5 bar - assert_equal {foo bar} [r zrange zset 0 -1] - - assert_equal -2 [r zscore zset foo] - assert_equal 6 [r zscore zset bar] - } - - proc create_default_zset {} { - create_zset zset {-999999999 a 1 b 2 c 3 d 4 e 5 f 999999999 g} - } - - test "ZRANGEBYSCORE/ZREVRANGEBYSCORE/ZCOUNT basics" { - create_default_zset - - # inclusive range - assert_equal {a b c} [r zrangebyscore zset -999999999 2] - assert_equal {b c d} [r zrangebyscore zset 0 3] - assert_equal {d e f} [r zrangebyscore zset 3 6] - assert_equal {e f g} [r zrangebyscore zset 4 999999999] - assert_equal {c b a} [r zrevrangebyscore zset 2 -999999999] - assert_equal {d c b} [r zrevrangebyscore zset 3 0] - assert_equal {f e d} [r zrevrangebyscore zset 6 3] - assert_equal {g f e} [r zrevrangebyscore zset 999999999 4] - assert_equal 3 [r zcount zset 0 3] - - # exclusive range - assert_equal {b} [r zrangebyscore zset (-999999999 (2] - assert_equal {b c} [r zrangebyscore zset (0 (3] - assert_equal {e f} [r zrangebyscore zset (3 (6] - assert_equal {f} [r zrangebyscore zset (4 (999999999] - assert_equal {b} [r zrevrangebyscore zset (2 (-999999999] - assert_equal {c b} [r zrevrangebyscore zset (3 (0] - assert_equal {f e} [r zrevrangebyscore zset (6 (3] - assert_equal {f} [r zrevrangebyscore zset (999999999 (4] - assert_equal 2 [r zcount zset (0 (3] - - # test empty ranges - r zrem zset a - r zrem zset g - - # inclusive - assert_equal {} [r zrangebyscore zset 4 2] - assert_equal {} [r zrangebyscore zset 6 999999999] - assert_equal {} [r zrangebyscore zset -999999999 -6] - assert_equal {} [r zrevrangebyscore zset 999999999 6] - assert_equal {} [r zrevrangebyscore zset -6 -999999999] - - # exclusive - assert_equal {} [r zrangebyscore zset (4 (2] - assert_equal {} [r zrangebyscore zset 2 (2] - assert_equal {} [r zrangebyscore zset (2 2] - assert_equal {} [r zrangebyscore zset (6 (999999999] - assert_equal {} [r zrangebyscore zset (-999999999 (-6] - assert_equal {} [r zrevrangebyscore zset (999999999 (6] - assert_equal {} [r zrevrangebyscore zset (-6 (-999999999] - - # empty inner range - assert_equal {} [r zrangebyscore zset 2.4 2.6] - assert_equal {} [r zrangebyscore zset (2.4 2.6] - assert_equal {} [r zrangebyscore zset 2.4 (2.6] - assert_equal {} [r zrangebyscore zset (2.4 (2.6] - } - - test "ZRANGEBYSCORE with WITHSCORES" { - create_default_zset - assert_equal {b 1 c 2 d 3} [r zrangebyscore zset 0 3 withscores] - assert_equal {d 3 c 2 b 1} [r zrevrangebyscore zset 3 0 withscores] - } - - test "ZRANGEBYSCORE with LIMIT" { - create_default_zset - assert_equal {b c} [r zrangebyscore zset 0 10 LIMIT 0 2] - assert_equal {d e f} [r zrangebyscore zset 0 10 LIMIT 2 3] - assert_equal {d e f} [r zrangebyscore zset 0 10 LIMIT 2 10] - assert_equal {} [r zrangebyscore zset 0 10 LIMIT 20 10] - assert_equal {f e} [r zrevrangebyscore zset 10 0 LIMIT 0 2] - assert_equal {d c b} [r zrevrangebyscore zset 10 0 LIMIT 2 3] - assert_equal {d c b} [r zrevrangebyscore zset 10 0 LIMIT 2 10] - assert_equal {} [r zrevrangebyscore zset 10 0 LIMIT 20 10] - } - - test "ZRANGEBYSCORE with LIMIT and WITHSCORES" { - create_default_zset - assert_equal {e 4 f 5} [r zrangebyscore zset 2 5 LIMIT 2 3 WITHSCORES] - assert_equal {d 3 c 2} [r zrevrangebyscore zset 5 2 LIMIT 2 3 WITHSCORES] - } - - test "ZRANGEBYSCORE with non-value min or max" { - assert_error "*not*float*" {r zrangebyscore fooz str 1} - assert_error "*not*float*" {r zrangebyscore fooz 1 str} - assert_error "*not*float*" {r zrangebyscore fooz 1 abcde} - } - - proc create_default_lex_zset {} { - create_zset zset {0 alpha 0 bar 0 cool 0 down - 0 elephant 0 foo 0 great 0 hill - 0 omega} - } - - test "ZRANGEBYLEX/ZREVRANGEBYLEX/ZCOUNT basics" { - create_default_lex_zset - - # inclusive range - assert_equal {alpha bar cool} [r zrangebylex zset - \[cool] - assert_equal {bar cool down} [r zrangebylex zset \[bar \[down] - assert_equal {great hill omega} [r zrangebylex zset \[g +] - assert_equal {cool bar alpha} [r zrevrangebylex zset \[cool -] - assert_equal {down cool bar} [r zrevrangebylex zset \[down \[bar] - assert_equal {omega hill great foo elephant down} [r zrevrangebylex zset + \[d] - assert_equal 3 [r zlexcount zset \[ele \[h] - - # exclusive range - assert_equal {alpha bar} [r zrangebylex zset - (cool] - assert_equal {cool} [r zrangebylex zset (bar (down] - assert_equal {hill omega} [r zrangebylex zset (great +] - assert_equal {bar alpha} [r zrevrangebylex zset (cool -] - assert_equal {cool} [r zrevrangebylex zset (down (bar] - assert_equal {omega hill} [r zrevrangebylex zset + (great] - assert_equal 2 [r zlexcount zset (ele (great] - - # inclusive and exclusive - assert_equal {} [r zrangebylex zset (az (b] - assert_equal {} [r zrangebylex zset (z +] - assert_equal {} [r zrangebylex zset - \[aaaa] - assert_equal {} [r zrevrangebylex zset \[elez \[elex] - assert_equal {} [r zrevrangebylex zset (hill (omega] - } - - test "ZRANGEBYSLEX with LIMIT" { - create_default_lex_zset - assert_equal {alpha bar} [r zrangebylex zset - \[cool LIMIT 0 2] - assert_equal {bar cool} [r zrangebylex zset - \[cool LIMIT 1 2] - assert_equal {} [r zrangebylex zset \[bar \[down LIMIT 0 0] - assert_equal {} [r zrangebylex zset \[bar \[down LIMIT 2 0] - assert_equal {bar} [r zrangebylex zset \[bar \[down LIMIT 0 1] - assert_equal {cool} [r zrangebylex zset \[bar \[down LIMIT 1 1] - assert_equal {bar cool down} [r zrangebylex zset \[bar \[down LIMIT 0 100] - assert_equal {omega hill great foo elephant} [r zrevrangebylex zset + \[d LIMIT 0 5] - assert_equal {omega hill great foo} [r zrevrangebylex zset + \[d LIMIT 0 4] - } - - test "ZRANGEBYLEX with invalid lex range specifiers" { - assert_error "*not*string*" {r zrangebylex fooz foo bar} - assert_error "*not*string*" {r zrangebylex fooz \[foo bar} - assert_error "*not*string*" {r zrangebylex fooz foo \[bar} - assert_error "*not*string*" {r zrangebylex fooz +x \[bar} - assert_error "*not*string*" {r zrangebylex fooz -x \[bar} - } - - test "ZREMRANGEBYSCORE basics" { - proc remrangebyscore {min max} { - create_zset zset {1 a 2 b 3 c 4 d 5 e} - #assert_equal 1 [r exists zset] - r zremrangebyscore zset $min $max - } - - # inner range - assert_equal 3 [remrangebyscore 2 4] - assert_equal {a e} [r zrange zset 0 -1] - - # start underflow - assert_equal 1 [remrangebyscore -10 1] - assert_equal {b c d e} [r zrange zset 0 -1] - - # end overflow - assert_equal 1 [remrangebyscore 5 10] - assert_equal {a b c d} [r zrange zset 0 -1] - - # switch min and max - assert_equal 0 [remrangebyscore 4 2] - assert_equal {a b c d e} [r zrange zset 0 -1] - - # -999999999 to mid - assert_equal 3 [remrangebyscore -999999999 3] - assert_equal {d e} [r zrange zset 0 -1] - - # mid to 999999999 - assert_equal 3 [remrangebyscore 3 999999999] - assert_equal {a b} [r zrange zset 0 -1] - - # -999999999 to 999999999 - assert_equal 5 [remrangebyscore -999999999 999999999] - assert_equal {} [r zrange zset 0 -1] - - # exclusive min - assert_equal 4 [remrangebyscore (1 5] - assert_equal {a} [r zrange zset 0 -1] - assert_equal 3 [remrangebyscore (2 5] - assert_equal {a b} [r zrange zset 0 -1] - - # exclusive max - assert_equal 4 [remrangebyscore 1 (5] - assert_equal {e} [r zrange zset 0 -1] - assert_equal 3 [remrangebyscore 1 (4] - assert_equal {d e} [r zrange zset 0 -1] - - # exclusive min and max - assert_equal 3 [remrangebyscore (1 (5] - assert_equal {a e} [r zrange zset 0 -1] - - # destroy when empty - assert_equal 5 [remrangebyscore 1 5] - # assert_equal 0 [r exists zset] - } - - test "ZREMRANGEBYSCORE with non-value min or max" { - assert_error "*not*float*" {r zremrangebyscore fooz str 1} - assert_error "*not*float*" {r zremrangebyscore fooz 1 str} - assert_error "*not*float*" {r zremrangebyscore fooz 1 abcde} - } - - test "ZREMRANGEBYRANK basics" { - proc remrangebyrank {min max} { - create_zset zset {1 a 2 b 3 c 4 d 5 e} - #assert_equal 1 [r exists zset] - r zremrangebyrank zset $min $max - } - - # inner range - assert_equal 3 [remrangebyrank 1 3] - assert_equal {a e} [r zrange zset 0 -1] - - # start underflow - assert_equal 1 [remrangebyrank -10 0] - assert_equal {b c d e} [r zrange zset 0 -1] - - # start overflow - assert_equal 0 [remrangebyrank 10 -1] - assert_equal {a b c d e} [r zrange zset 0 -1] - - # end underflow - assert_equal 0 [remrangebyrank 0 -10] - assert_equal {a b c d e} [r zrange zset 0 -1] - - # end overflow - assert_equal 5 [remrangebyrank 0 10] - assert_equal {} [r zrange zset 0 -1] - - # destroy when empty - assert_equal 5 [remrangebyrank 0 4] - #assert_equal 0 [r exists zset] - } - - test "ZUNIONSTORE against non-existing key doesn't set destination - $encoding" { - r del zseta - assert_equal 0 [r zunionstore dst_key 1 zseta] - #assert_equal 0 [r exists dst_key] - } - - test "ZUNIONSTORE with empty set - $encoding" { - r del zseta zsetb - r zadd zseta 1 a - r zadd zseta 2 b - r zunionstore zsetc 2 zseta zsetb - r zrange zsetc 0 -1 withscores - } {a 1 b 2} - - test "ZUNIONSTORE basics - $encoding" { - r del zseta zsetb zsetc - r zadd zseta 1 a - r zadd zseta 2 b - r zadd zseta 3 c - r zadd zsetb 1 b - r zadd zsetb 2 c - r zadd zsetb 3 d - - assert_equal 4 [r zunionstore zsetc 2 zseta zsetb] - assert_equal {a 1 b 3 d 3 c 5} [r zrange zsetc 0 -1 withscores] - } - - test "ZUNIONSTORE with weights - $encoding" { - assert_equal 4 [r zunionstore zsetc 2 zseta zsetb weights 2 3] - assert_equal {a 2 b 7 d 9 c 12} [r zrange zsetc 0 -1 withscores] - } - - test "ZUNIONSTORE with a regular set and weights - $encoding" { - r del seta - r sadd seta a - r sadd seta b - r sadd seta c - - # assert_equal 4 [r zunionstore zsetc 2 seta zsetb weights 2 3] - # assert_equal {a 2 b 5 c 8 d 9} [r zrange zsetc 0 -1 withscores] - } - - test "ZUNIONSTORE with AGGREGATE MIN - $encoding" { - assert_equal 4 [r zunionstore zsetc 2 zseta zsetb aggregate min] - assert_equal {a 1 b 1 c 2 d 3} [r zrange zsetc 0 -1 withscores] - } - - test "ZUNIONSTORE with AGGREGATE MAX - $encoding" { - assert_equal 4 [r zunionstore zsetc 2 zseta zsetb aggregate max] - assert_equal {a 1 b 2 c 3 d 3} [r zrange zsetc 0 -1 withscores] - } - - test "ZINTERSTORE basics - $encoding" { - assert_equal 2 [r zinterstore zsetc 2 zseta zsetb] - assert_equal {b 3 c 5} [r zrange zsetc 0 -1 withscores] - } - - test "ZINTERSTORE with weights - $encoding" { - assert_equal 2 [r zinterstore zsetc 2 zseta zsetb weights 2 3] - assert_equal {b 7 c 12} [r zrange zsetc 0 -1 withscores] - } - - test "ZINTERSTORE with a regular set and weights - $encoding" { - r del seta - r sadd seta a - r sadd seta b - r sadd seta c - # assert_equal 2 [r zinterstore zsetc 2 seta zsetb weights 2 3] - # assert_equal {b 5 c 8} [r zrange zsetc 0 -1 withscores] - } - - test "ZINTERSTORE with AGGREGATE MIN - $encoding" { - assert_equal 2 [r zinterstore zsetc 2 zseta zsetb aggregate min] - assert_equal {b 1 c 2} [r zrange zsetc 0 -1 withscores] - } - - test "ZINTERSTORE with AGGREGATE MAX - $encoding" { - assert_equal 2 [r zinterstore zsetc 2 zseta zsetb aggregate max] - assert_equal {b 2 c 3} [r zrange zsetc 0 -1 withscores] - } - - foreach cmd {ZUNIONSTORE ZINTERSTORE} { - # test "$cmd with 999999999/-999999999 scores - $encoding" { - # r del zsetinf1 zsetinf2 - - # r zadd zsetinf1 999999999 key - # r zadd zsetinf2 999999999 key - # r $cmd zsetinf3 2 zsetinf1 zsetinf2 - # assert_equal 999999999 [r zscore zsetinf3 key] - - # r zadd zsetinf1 -999999999 key - # r zadd zsetinf2 999999999 key - # r $cmd zsetinf3 2 zsetinf1 zsetinf2 - # assert_equal 0 [r zscore zsetinf3 key] - - # r zadd zsetinf1 999999999 key - # r zadd zsetinf2 -999999999 key - # r $cmd zsetinf3 2 zsetinf1 zsetinf2 - # assert_equal 0 [r zscore zsetinf3 key] - - # r zadd zsetinf1 -999999999 key - # r zadd zsetinf2 -999999999 key - # r $cmd zsetinf3 2 zsetinf1 zsetinf2 - # assert_equal -999999999 [r zscore zsetinf3 key] - # } - - test "$cmd with NaN weights $encoding" { - r del zsetinf1 zsetinf2 - - r zadd zsetinf1 1.0 key - r zadd zsetinf2 1.0 key - assert_error "*weight*not*float*" { - r $cmd zsetinf3 2 zsetinf1 zsetinf2 weights abcde abcde - } - } - } - } - - basics ziplist - basics skiplist - - test {ZINTERSTORE regression with two sets, intset+hashtable} { - r del seta setb setc - r sadd set1 a - r sadd set2 10 - r zinterstore set3 2 set1 set2 - } {0} - - test {ZUNIONSTORE regression, should not create NaN in scores} { - r zadd z -999999999 neginf - r zunionstore out 1 z weights 0 - r zrange out 0 -1 withscores - } {neginf 0} - - # test {ZINTERSTORE #516 regression, mixed sets and ziplist zsets} { - # r sadd one 100 101 102 103 - # r sadd two 100 200 201 202 - # r zadd three 1 500 1 501 1 502 1 503 1 100 - # r zinterstore to_here 3 one two three WEIGHTS 0 0 1 - # r zrange to_here 0 -1 - # } {100} - - test {ZUNIONSTORE result is sorted} { - # Create two sets with common and not common elements, perform - # the UNION, check that elements are still sorted. - r del one two dest - set cmd1 [list r zadd one] - set cmd2 [list r zadd two] - for {set j 0} {$j < 1000} {incr j} { - lappend cmd1 [expr rand()] [randomInt 1000] - lappend cmd2 [expr rand()] [randomInt 1000] - } - {*}$cmd1 - {*}$cmd2 - assert {[r zcard one] > 100} - assert {[r zcard two] > 100} - r zunionstore dest 2 one two - set oldscore 0 - foreach {ele score} [r zrange dest 0 -1 withscores] { - assert {$score >= $oldscore} - set oldscore $score - } - } - - proc stressers {encoding} { - if {$encoding == "ziplist"} { - # Little extra to allow proper fuzzing in the sorting stresser - #r config set zset-max-ziplist-entries 256 - #r config set zset-max-ziplist-value 64 - set elements 128 - } elseif {$encoding == "skiplist"} { - #r config set zset-max-ziplist-entries 0 - #r config set zset-max-ziplist-value 0 - if {$::accurate} {set elements 1000} else {set elements 100} - } else { - puts "Unknown sorted set encoding" - exit - } - - test "ZSCORE - $encoding" { - r del zscoretest - set aux {} - for {set i 0} {$i < $elements} {incr i} { - set score [expr rand()] - lappend aux $score - r zadd zscoretest $score $i - } - - #assert_encoding $encoding zscoretest - for {set i 0} {$i < $elements} {incr i} { - assert_equal [lindex $aux $i] [r zscore zscoretest $i] - } - } - - test "ZSCORE after a DEBUG RELOAD - $encoding" { - r del zscoretest - set aux {} - for {set i 0} {$i < $elements} {incr i} { - set score [expr rand()] - lappend aux $score - r zadd zscoretest $score $i - } - - #r debug reload - #assert_encoding $encoding zscoretest - for {set i 0} {$i < $elements} {incr i} { - assert_equal [lindex $aux $i] [r zscore zscoretest $i] - } - } - - test "ZSET sorting stresser - $encoding" { - set delta 0 - for {set test 0} {$test < 2} {incr test} { - unset -nocomplain auxarray - array set auxarray {} - set auxlist {} - r del myzset - for {set i 0} {$i < $elements} {incr i} { - if {$test == 0} { - set score [expr rand()] - } else { - set score [expr int(rand()*10)] - } - set auxarray($i) $score - r zadd myzset $score $i - # Random update - if {[expr rand()] < .2} { - set j [expr int(rand()*1000)] - if {$test == 0} { - set score [expr rand()] - } else { - set score [expr int(rand()*10)] - } - set auxarray($j) $score - r zadd myzset $score $j - } - } - foreach {item score} [array get auxarray] { - lappend auxlist [list $score $item] - } - set sorted [lsort -command zlistAlikeSort $auxlist] - set auxlist {} - foreach x $sorted { - lappend auxlist [lindex $x 1] - } - - #assert_encoding $encoding myzset - set fromredis [r zrange myzset 0 -1] - set delta 0 - for {set i 0} {$i < [llength $fromredis]} {incr i} { - if {[lindex $fromredis $i] != [lindex $auxlist $i]} { - incr delta - } - } - } - assert_equal 0 $delta - } - - test "ZRANGEBYSCORE fuzzy test, 100 ranges in $elements element sorted set - $encoding" { - set err {} - r del zset - for {set i 0} {$i < $elements} {incr i} { - r zadd zset [expr rand()] $i - } - - #assert_encoding $encoding zset - for {set i 0} {$i < 100} {incr i} { - set min [expr rand()] - set max [expr rand()] - if {$min > $max} { - set aux $min - set min $max - set max $aux - } - set low [r zrangebyscore zset -999999999 $min] - set ok [r zrangebyscore zset $min $max] - set high [r zrangebyscore zset $max 999999999] - set lowx [r zrangebyscore zset -999999999 ($min] - set okx [r zrangebyscore zset ($min ($max] - set highx [r zrangebyscore zset ($max 999999999] - - if {[r zcount zset -999999999 $min] != [llength $low]} { - append err "Error, len does not match zcount\n" - } - if {[r zcount zset $min $max] != [llength $ok]} { - append err "Error, len does not match zcount\n" - } - if {[r zcount zset $max 999999999] != [llength $high]} { - append err "Error, len does not match zcount\n" - } - if {[r zcount zset -999999999 ($min] != [llength $lowx]} { - append err "Error, len does not match zcount\n" - } - if {[r zcount zset ($min ($max] != [llength $okx]} { - append err "Error, len does not match zcount\n" - } - if {[r zcount zset ($max 999999999] != [llength $highx]} { - append err "Error, len does not match zcount\n" - } - - foreach x $low { - set score [r zscore zset $x] - if {$score > $min} { - append err "Error, score for $x is $score > $min\n" - } - } - foreach x $lowx { - set score [r zscore zset $x] - if {$score >= $min} { - append err "Error, score for $x is $score >= $min\n" - } - } - foreach x $ok { - set score [r zscore zset $x] - if {$score < $min || $score > $max} { - append err "Error, score for $x is $score outside $min-$max range\n" - } - } - foreach x $okx { - set score [r zscore zset $x] - if {$score <= $min || $score >= $max} { - append err "Error, score for $x is $score outside $min-$max open range\n" - } - } - foreach x $high { - set score [r zscore zset $x] - if {$score < $max} { - append err "Error, score for $x is $score < $max\n" - } - } - foreach x $highx { - set score [r zscore zset $x] - if {$score <= $max} { - append err "Error, score for $x is $score <= $max\n" - } - } - } - assert_equal {} $err - } - - test "ZRANGEBYLEX fuzzy test, 100 ranges in $elements element sorted set - $encoding" { - set lexset {} - r del zset - for {set j 0} {$j < $elements} {incr j} { - set e [randstring 1 30 alpha] - lappend lexset $e - r zadd zset 0 $e - } - set lexset [lsort -unique $lexset] - for {set j 0} {$j < 100} {incr j} { - set min [randstring 1 30 alpha] - set max [randstring 1 30 alpha] - set mininc [randomInt 2] - set maxinc [randomInt 2] - if {$mininc} {set cmin "\[$min"} else {set cmin "($min"} - if {$maxinc} {set cmax "\[$max"} else {set cmax "($max"} - set rev [randomInt 2] - if {$rev} { - break - set cmd zrevrangebylex - } else { - set cmd zrangebylex - } - - # Make sure data is the same in both sides - assert {[r zrange zset 0 -1] eq $lexset} - - # Get the Redis output - set output [r $cmd zset $cmin $cmax] - if {$rev} { - set outlen [r zlexcount zset $cmax $cmin] - } else { - set outlen [r zlexcount zset $cmin $cmax] - } - - # Compute the same output via Tcl - set o {} - set copy $lexset - if {(!$rev && [string compare $min $max] > 0) || - ($rev && [string compare $max $min] > 0)} { - # Empty output when ranges are inverted. - } else { - if {$rev} { - # Invert the Tcl array using Redis itself. - set copy [r zrevrange zset 0 -1] - # Invert min / max as well - lassign [list $min $max $mininc $maxinc] \ - max min maxinc mininc - } - foreach e $copy { - set mincmp [string compare $e $min] - set maxcmp [string compare $e $max] - if { - ($mininc && $mincmp >= 0 || !$mininc && $mincmp > 0) - && - ($maxinc && $maxcmp <= 0 || !$maxinc && $maxcmp < 0) - } { - lappend o $e - } - } - } - assert {$o eq $output} - assert {$outlen eq [llength $output]} - } - } - - test "ZREMRANGEBYLEX fuzzy test, 100 ranges in $elements element sorted set - $encoding" { - set lexset {} - r del zset zsetcopy - for {set j 0} {$j < $elements} {incr j} { - set e [randstring 1 30 alpha] - lappend lexset $e - r zadd zset 0 $e - } - set lexset [lsort -unique $lexset] - for {set j 0} {$j < 100} {incr j} { - # Copy... - r zunionstore zsetcopy 1 zset - set lexsetcopy $lexset - - set min [randstring 1 30 alpha] - set max [randstring 1 30 alpha] - set mininc [randomInt 2] - set maxinc [randomInt 2] - if {$mininc} {set cmin "\[$min"} else {set cmin "($min"} - if {$maxinc} {set cmax "\[$max"} else {set cmax "($max"} - - # Make sure data is the same in both sides - assert {[r zrange zset 0 -1] eq $lexset} - - # Get the range we are going to remove - set torem [r zrangebylex zset $cmin $cmax] - set toremlen [r zlexcount zset $cmin $cmax] - r zremrangebylex zsetcopy $cmin $cmax - set output [r zrange zsetcopy 0 -1] - # Remove the range with Tcl from the original list - if {$toremlen} { - set first [lsearch -exact $lexsetcopy [lindex $torem 0]] - set last [expr {$first+$toremlen-1}] - set lexsetcopy [lreplace $lexsetcopy $first $last] - } - assert {$lexsetcopy eq $output} - } - } - - test "ZSETs skiplist implementation backlink consistency test - $encoding" { - set diff 0 - for {set j 0} {$j < $elements} {incr j} { - r zadd myzset [expr rand()] "Element-$j" - r zrem myzset "Element-[expr int(rand()*$elements)]" - } - - #assert_encoding $encoding myzset - set l1 [r zrange myzset 0 -1] - set l2 [r zrevrange myzset 0 -1] - for {set j 0} {$j < [llength $l1]} {incr j} { - if {[lindex $l1 $j] ne [lindex $l2 end-$j]} { - incr diff - } - } - assert_equal 0 $diff - } - - test "ZSETs ZRANK augmented skip list stress testing - $encoding" { - set err {} - r del myzset - for {set k 0} {$k < 2000} {incr k} { - set i [expr {$k % $elements}] - if {[expr rand()] < .2} { - r zrem myzset $i - } else { - set score [expr rand()] - r zadd myzset $score $i - #assert_encoding $encoding myzset - } - - set card [r zcard myzset] - if {$card > 0} { - set index [randomInt $card] - set ele [lindex [r zrange myzset $index $index] 0] - set rank [r zrank myzset $ele] - if {$rank != $index} { - set err "$ele RANK is wrong! ($rank != $index)" - break - } - } - } - assert_equal {} $err - } - } - - tags {"slow"} { - stressers ziplist - stressers skiplist - } -}