diff --git a/.ci/env/apt.sh b/.ci/env/apt.sh index 53e99fa89e6..d0fbf5ad399 100755 --- a/.ci/env/apt.sh +++ b/.ci/env/apt.sh @@ -31,16 +31,21 @@ function add_repo { } function install_dpcpp { - sudo apt-get install -y intel-oneapi-compiler-dpcpp-cpp-2025.0 intel-oneapi-runtime-libs=2025.0.0-406 + sudo apt-get install -y intel-oneapi-compiler-dpcpp-cpp-2025.0 intel-oneapi-runtime-libs } function install_tbb { sudo apt-get install -y intel-oneapi-tbb-devel-2022.0 } +function install_dpl { + sudo apt-get install -y intel-oneapi-libdpstd-devel +} + function install_mkl { sudo apt-get install -y intel-oneapi-mkl-devel-2025.0 install_tbb + install_dpl } function install_clang-format { @@ -129,6 +134,9 @@ elif [ "${component}" == "tbb" ]; then elif [ "${component}" == "mkl" ]; then add_repo install_mkl +elif [ "${component}" == "dpl" ]; then + add_repo + install_dpl elif [ "${component}" == "gnu-cross-compilers" ]; then update install_gnu-cross-compilers "$2" @@ -160,6 +168,6 @@ elif [ "${component}" == "miniforge" ] ; then install_dev-base-conda else echo "Usage:" - echo " $0 [dpcpp|tbb|mkl|gnu-cross-compilers|clang-format|dev-base|qemu-apt|qemu-deb|llvm-version|build-sysroot|miniforge]" + echo " $0 [dpcpp|tbb|mkl|dpl|gnu-cross-compilers|clang-format|dev-base|qemu-apt|qemu-deb|llvm-version|build-sysroot|miniforge]" exit 1 fi diff --git a/.ci/pipeline/ci.yml b/.ci/pipeline/ci.yml index 8b1f494149f..fbd71714600 100755 --- a/.ci/pipeline/ci.yml +++ b/.ci/pipeline/ci.yml @@ -29,7 +29,7 @@ variables: VM_IMAGE : 'ubuntu-24.04' SYSROOT_OS: 'noble' WINDOWS_BASEKIT_URL: 'https://registrationcenter-download.intel.com/akdlm/IRC_NAS/b380d914-366b-4b77-a74a-05e3c38b3514/intel-oneapi-base-toolkit-2025.0.0.882_offline.exe' - WINDOWS_DPCPP_COMPONENTS: 'intel.oneapi.win.mkl.devel:intel.oneapi.win.tbb.devel' + WINDOWS_DPCPP_COMPONENTS: 'intel.oneapi.win.mkl.devel:intel.oneapi.win.tbb.devel:intel.oneapi.win.dpl' resources: repositories: diff --git a/INSTALL.md b/INSTALL.md index efff2765a04..292eb41c40e 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -23,6 +23,7 @@ Required Software: * BLAS and LAPACK libraries - both provided by oneMKL * Python version 3.9 or higher * oneTBB library (repository contains script to download it) +* oneDPL library * Microsoft Visual Studio\* (Windows\* only) * [MSYS2](http://msys2.github.io) (Windows\* only) * `make` and `dos2unix` tools; install these packages using MSYS2 on Windows\* as follows: @@ -105,9 +106,24 @@ is available as an alternative to the manual setup. ./dev/download_tbb.sh -6. Download and install Python (version 3.9 or higher). +6. Set up oneDPL + _Note: if you used the general oneAPI setvars script from a Base Toolkit installation, this step will not be necessary as oneDPL will already have been set up._ -7. Build oneDAL via command-line interface. Choose the appropriate commands based on the interface, platform, and the compiler you use. Interface and platform are required arguments of makefile while others are optional. Below you can find the set of examples for building oneDAL. You may use a combination of them to get the desired build configuration: + Download and install [Intel(R) oneDPL](https://www.intel.com/content/www/us/en/developer/tools/oneapi/dpc-library.html). + Set the environment variables for for Intel(R) oneDPL. For example: + + - oneDPL (Windows\*): + + call "C:\Program Files (x86)\Intel\oneAPI\dpl\latest\env\vars.bat" intel64 + + - oneDPL (Linux\*): + + source /opt/intel/oneapi/dpl/latest/env/vars.sh intel64 + + +7. Download and install Python (version 3.9 or higher). + +8. Build oneDAL via command-line interface. Choose the appropriate commands based on the interface, platform, and the compiler you use. Interface and platform are required arguments of makefile while others are optional. Below you can find the set of examples for building oneDAL. You may use a combination of them to get the desired build configuration: - DAAL interfaces on **Linux\*** using **Intel(R) C++ Compiler**: diff --git a/MODULE.bazel b/MODULE.bazel index 3117d8f0df7..9294996a4ab 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -90,6 +90,21 @@ ccl_repo( ] ) +dpl_repo = use_repo_rule("@onedal//dev/bazel/deps:dpl.bzl", "dpl_repo") +dpl_repo( + name = "dpl", + root_env_var = "DPL_ROOT", + urls = [ + "https://files.pythonhosted.org/packages/95/f6/18f78cb933e01ecd9e99d37a10da4971a795fcfdd1d24640799b4050fdbb/onedpl_devel-2022.7.1-py2.py3-none-manylinux_2_28_x86_64.whl", + ], + sha256s = [ + "3b270999d2464c5151aa0e7995dda9e896d072c75069ccee1efae9dc56bdc417", + ], + strip_prefixes = [ + "onedpl_devel-2022.7.1.data/data", + ], +) + mkl_repo = use_repo_rule("@onedal//dev/bazel/deps:mkl.bzl", "mkl_repo") mkl_repo( name = "mkl", diff --git a/cpp/oneapi/dal.hpp b/cpp/oneapi/dal.hpp index cd9c1e74a32..781d3ba8a99 100644 --- a/cpp/oneapi/dal.hpp +++ b/cpp/oneapi/dal.hpp @@ -24,6 +24,7 @@ #include "oneapi/dal/exceptions.hpp" #include "oneapi/dal/infer.hpp" #include "oneapi/dal/read.hpp" +#include "oneapi/dal/rng.hpp" #include "oneapi/dal/train.hpp" #include "oneapi/dal/partial_compute.hpp" #include "oneapi/dal/finalize_compute.hpp" diff --git a/cpp/oneapi/dal/BUILD b/cpp/oneapi/dal/BUILD index 7a3ba863105..0a3d18bf3fd 100644 --- a/cpp/oneapi/dal/BUILD +++ b/cpp/oneapi/dal/BUILD @@ -31,6 +31,7 @@ dal_module( ], dpc_deps = [ "@mkl//:mkl_dpc", + "@dpl//:headers", ], ) diff --git a/cpp/oneapi/dal/algo/decision_forest/backend/gpu/train_feature_type_dpc.cpp b/cpp/oneapi/dal/algo/decision_forest/backend/gpu/train_feature_type_dpc.cpp index 7306533ed50..1a2a3ac7e3d 100644 --- a/cpp/oneapi/dal/algo/decision_forest/backend/gpu/train_feature_type_dpc.cpp +++ b/cpp/oneapi/dal/algo/decision_forest/backend/gpu/train_feature_type_dpc.cpp @@ -20,6 +20,7 @@ #include "oneapi/dal/table/row_accessor.hpp" #include "oneapi/dal/backend/memory.hpp" #include "oneapi/dal/detail/profiler.hpp" +#include #ifdef ONEDAL_DATA_PARALLEL @@ -34,8 +35,15 @@ inline sycl::event sort_inplace(sycl::queue& queue_, pr::ndarray& src, const bk::event_vector& deps = {}) { ONEDAL_ASSERT(src.get_count() > 0); + auto device = queue_.get_device(); + std::string device_name = device.get_info(); auto src_ind = pr::ndarray::empty(queue_, { src.get_count() }); - return pr::radix_sort_indices_inplace{ queue_ }(src, src_ind, deps); + if (device_name.find("Data Center GPU Max") != std::string::npos) { + return pr::radix_sort_indices_inplace_dpl(queue_, src, src_ind, deps); + } + else { + return pr::radix_sort_indices_inplace{ queue_ }(src, src_ind, deps); + } } template @@ -429,15 +437,36 @@ sycl::event indexed_features::operator()(const table& tbl, pr::ndarray::empty(queue_, { row_count_ }, sycl::usm::alloc::device); } - pr::radix_sort_indices_inplace sort{ queue_ }; - sycl::event last_event; - - for (Index i = 0; i < column_count_; i++) { - last_event = extract_column(data_nd_, values_nd, indices_nd, i, { last_event }); - last_event = sort(values_nd, indices_nd, { last_event }); - last_event = - compute_bins(values_nd, indices_nd, column_bin_vec_[i], entries_[i], i, { last_event }); + auto device = queue_.get_device(); + std::string device_name = device.get_info(); + if (device_name.find("Data Center GPU Max") != std::string::npos) { + for (Index i = 0; i < column_count_; i++) { + last_event = extract_column(data_nd_, values_nd, indices_nd, i, { last_event }); + last_event = pr::radix_sort_indices_inplace_dpl(queue_, + values_nd, + indices_nd, + { last_event }); + last_event = compute_bins(values_nd, + indices_nd, + column_bin_vec_[i], + entries_[i], + i, + { last_event }); + } + } + else { + pr::radix_sort_indices_inplace sort{ queue_ }; + for (Index i = 0; i < column_count_; i++) { + last_event = extract_column(data_nd_, values_nd, indices_nd, i, { last_event }); + last_event = sort(values_nd, indices_nd, { last_event }); + last_event = compute_bins(values_nd, + indices_nd, + column_bin_vec_[i], + entries_[i], + i, + { last_event }); + } } last_event.wait_and_throw(); diff --git a/cpp/oneapi/dal/algo/decision_forest/backend/gpu/train_kernel_hist_impl.hpp b/cpp/oneapi/dal/algo/decision_forest/backend/gpu/train_kernel_hist_impl.hpp index 64dbae4c084..d764bc636cb 100644 --- a/cpp/oneapi/dal/algo/decision_forest/backend/gpu/train_kernel_hist_impl.hpp +++ b/cpp/oneapi/dal/algo/decision_forest/backend/gpu/train_kernel_hist_impl.hpp @@ -21,7 +21,7 @@ #include "oneapi/dal/backend/primitives/utils.hpp" #include "oneapi/dal/algo/decision_forest/train_types.hpp" -#include "oneapi/dal/backend/primitives/rng/host_engine_collection.hpp" +#include "oneapi/dal/backend/primitives/rng/device_engine.hpp" #include "oneapi/dal/algo/decision_forest/backend/gpu/train_misc_structs.hpp" #include "oneapi/dal/algo/decision_forest/backend/gpu/train_impurity_data.hpp" @@ -50,8 +50,7 @@ class train_kernel_hist_impl { using model_manager_t = train_model_manager; using train_context_t = train_context; using imp_data_t = impurity_data; - using rng_engine_t = pr::host_engine; - using rng_engine_list_t = std::vector; + using rng_engine_t = pr::device_engine; using msg = dal::detail::error_messages; using comm_t = bk::communicator; using node_t = node; @@ -79,7 +78,7 @@ class train_kernel_hist_impl { Index class_count) const; sycl::event gen_initial_tree_order(train_context_t& ctx, - rng_engine_list_t& rng_engine_list, + rng_engine_t& rng_engine, pr::ndarray& node_list, pr::ndarray& tree_order_level, Index engine_offset, @@ -103,6 +102,7 @@ class train_kernel_hist_impl { const table& data, const table& labels, const table& weights); + /// Allocates all buffers that are used for training. /// @param[in] ctx a training context structure for a GPU backend void allocate_buffers(const train_context_t& ctx); @@ -115,12 +115,12 @@ class train_kernel_hist_impl { /// @param[in] ctx a training context structure for a GPU backend /// @param[in] node_count number of nodes on the current level /// @param[in] node_vs_tree_map an initial tree order - /// @param[in] rng_engine_list a list of random generator engines + /// @param[in] rng_engine a random generator engine std::tuple, sycl::event> gen_feature_list( const train_context_t& ctx, Index node_count, const pr::ndarray& node_vs_tree_map, - rng_engine_list_t& rng_engine_list); + rng_engine_t& rng_engine); /// Generates random thresholds for each node and for each selected feature for node. /// Thresholds are used for a random splitter kernel to split each node. @@ -129,12 +129,12 @@ class train_kernel_hist_impl { /// @param[in] ctx a training context structure for a GPU backend /// @param[in] node_count number of nodes on the current level /// @param[in] node_vs_tree_map an initial tree order - /// @param[in] rng_engine_list a list of random generator engines + /// @param[in] rng_engine a random generator engine std::tuple, sycl::event> gen_random_thresholds( const train_context_t& ctx, Index node_count, const pr::ndarray& node_vs_tree_map, - rng_engine_list_t& rng_engine_list); + rng_engine_t& rng_engine); /// Computes initial impurity for each node. /// @@ -561,7 +561,7 @@ class train_kernel_hist_impl { /// @param[in] oob_per_obs_list an array of OOB values per observation /// @param[in] var_imp variable importance values /// @param[in] var_imp_variance variable importance variance values - /// @param[in] rng_engine_arr a list of random generator engines + /// @param[in] rng_engine a random generator engine /// @param[in] tree_idx a tree index /// @param[in] tree_in_block number of trees in the computational block /// @param[in] built_tree_count number of built trees @@ -575,7 +575,7 @@ class train_kernel_hist_impl { pr::ndarray& oob_per_obs_list, pr::ndarray& var_imp, pr::ndarray& var_imp_variance, - const rng_engine_list_t& rng_engine_arr, + rng_engine_t& rng_engine, Index tree_idx, Index tree_in_block, Index built_tree_count, diff --git a/cpp/oneapi/dal/algo/decision_forest/backend/gpu/train_kernel_hist_impl_dpc.cpp b/cpp/oneapi/dal/algo/decision_forest/backend/gpu/train_kernel_hist_impl_dpc.cpp index a396f11c048..aa256b54fa6 100644 --- a/cpp/oneapi/dal/algo/decision_forest/backend/gpu/train_kernel_hist_impl_dpc.cpp +++ b/cpp/oneapi/dal/algo/decision_forest/backend/gpu/train_kernel_hist_impl_dpc.cpp @@ -173,6 +173,7 @@ void train_kernel_hist_impl::init_params(train_context_ : ctx.column_count_ / 3 ? ctx.column_count_ / 3 : 1; } + ctx.min_observations_in_leaf_node_ = desc.get_min_observations_in_leaf_node(); ctx.impurity_threshold_ = desc.get_impurity_threshold(); @@ -231,9 +232,11 @@ void train_kernel_hist_impl::init_params(train_context_ ctx.float_min_ = de::limits::min(); ctx.index_max_ = de::limits::max(); - // define number of trees which can be built in parallel + // Calculate memory requirements and adjust fraction if needed const std::uint64_t device_global_mem_size = queue_.get_device().get_info(); + + // define number of trees which can be built in parallel const std::uint64_t device_max_mem_alloc_size = queue_.get_device().get_info(); @@ -339,6 +342,7 @@ void train_kernel_hist_impl::allocate_buffers(const tra pr::ndarray::empty(queue_, { ctx.selected_row_total_count_ * ctx.tree_in_block_ }, alloc::device); + tree_order_lev_buf_ = pr::ndarray::empty(queue_, { ctx.selected_row_total_count_ * ctx.tree_in_block_ }, @@ -368,7 +372,7 @@ void train_kernel_hist_impl::allocate_buffers(const tra template sycl::event train_kernel_hist_impl::gen_initial_tree_order( train_context_t& ctx, - rng_engine_list_t& rng_engine_list, + rng_engine_t& rng_engine, pr::ndarray& node_list_host, pr::ndarray& tree_order_level, Index engine_offset, @@ -380,78 +384,71 @@ sycl::event train_kernel_hist_impl::gen_initial_tree_or ctx.tree_in_block_ * ctx.selected_row_total_count_); sycl::event last_event; + Index* const node_list_ptr = node_list_host.get_mutable_data(); + Index* const tree_order_ptr = tree_order_level.get_mutable_data(); if (ctx.bootstrap_) { - auto selected_row_global_host = - pr::ndarray::empty({ ctx.selected_row_total_count_ * ctx.tree_in_block_ }); - pr::ndarray selected_row_host; - if (ctx.distr_mode_) { - selected_row_host = pr::ndarray::empty( - { ctx.selected_row_total_count_ * ctx.tree_in_block_ }); - } + // Generate random indices directly into tree_order_ptr + auto generation_event = pr::uniform(queue_, + ctx.selected_row_total_count_ * node_count, + tree_order_ptr, + rng_engine, + 0, + ctx.row_total_count_); - Index* const selected_row_global_ptr = selected_row_global_host.get_mutable_data(); - Index* const selected_row_ptr = - ctx.distr_mode_ ? selected_row_host.get_mutable_data() : nullptr; - Index* const node_list_ptr = node_list_host.get_mutable_data(); + if (ctx.distr_mode_) { + last_event = queue_.submit([&](sycl::handler& cgh) { + cgh.depends_on(generation_event); + cgh.parallel_for(sycl::range<1>(node_count), [=](sycl::id<1> node_idx) { + // Use tree_order_ptr directly for both source and destination + Index* ptr = tree_order_ptr + ctx.selected_row_total_count_ * node_idx; + + Index row_idx = 0; + for (Index i = 0; i < ctx.selected_row_total_count_; i++) { + Index val = ptr[i]; + if (val >= ctx.global_row_offset_ && + val < (ctx.global_row_offset_ + ctx.row_count_)) { + ptr[row_idx++] = val - ctx.global_row_offset_; + } + } + // Store count directly in node_list_host + Index* node_ptr = node_list_ptr + node_idx * impl_const_t::node_prop_count_; + node_ptr[impl_const_t::ind_lrc] = row_idx; - for (Index node_idx = 0; node_idx < node_count; ++node_idx) { - Index* gen_row_idx_global_ptr = - selected_row_global_ptr + ctx.selected_row_total_count_ * node_idx; - pr::uniform(ctx.selected_row_total_count_, - gen_row_idx_global_ptr, - rng_engine_list[engine_offset + node_idx], - 0, - ctx.row_total_count_); - - if (ctx.distr_mode_) { - Index* node_ptr = node_list_ptr + node_idx * impl_const_t::node_prop_count_; - Index* src = gen_row_idx_global_ptr; - - Index* const dst = selected_row_ptr + ctx.selected_row_total_count_ * node_idx; - - Index row_idx = 0; - for (Index i = 0; i < ctx.selected_row_total_count_; ++i) { - dst[i] = 0; - if (src[i] >= ctx.global_row_offset_ && - src[i] < (ctx.global_row_offset_ + ctx.row_count_)) { - dst[row_idx++] = src[i] - ctx.global_row_offset_; + // Zero out remaining elements + for (Index i = row_idx; i < ctx.selected_row_total_count_; i++) { + ptr[i] = 0; } - } - node_ptr[impl_const_t::ind_lrc] = row_idx; - } + }); + }); + } + else { + last_event = generation_event; // Direct assignment is complete } - - last_event = ctx.distr_mode_ - ? tree_order_level.assign_from_host(queue_, selected_row_host) - : tree_order_level.assign_from_host(queue_, selected_row_global_host); } else { Index row_count = ctx.selected_row_count_; - Index stride = ctx.selected_row_total_count_; if (ctx.distr_mode_) { row_count = 0; if (ctx.global_row_offset_ < ctx.selected_row_total_count_) { row_count = std::min(ctx.selected_row_total_count_ - ctx.global_row_offset_, ctx.row_count_); } - // in case of no bootstrap - // it is valid case if this worker's rows set wasn't taken for tree build - // i.e. row_count can be eq 0 - - Index* node_list_ptr = node_list_host.get_mutable_data(); - - for (Index node_idx = 0; node_idx < node_count; ++node_idx) { - Index* node_ptr = node_list_ptr + node_idx * impl_const_t::node_prop_count_; - node_ptr[impl_const_t::ind_lrc] = row_count; - } + last_event = queue_.submit([&](sycl::handler& cgh) { + cgh.parallel_for(sycl::range<1>(node_count), [=](sycl::id<1> node_idx) { + // Store count directly in node_list_host + Index* node_ptr = node_list_ptr + node_idx * impl_const_t::node_prop_count_; + node_ptr[impl_const_t::ind_lrc] = row_count; + }); + }); } if (row_count > 0) { - last_event = train_service_kernels_.initialize_tree_order(tree_order_level, - node_count, - row_count, - stride); + last_event = + train_service_kernels_.initialize_tree_order(tree_order_level, + node_count, + row_count, + ctx.selected_row_total_count_); } } @@ -464,32 +461,26 @@ train_kernel_hist_impl::gen_feature_list( const train_context_t& ctx, Index node_count, const pr::ndarray& node_vs_tree_map_list, - rng_engine_list_t& rng_engine_list) { + rng_engine_t& rng_engine) { ONEDAL_PROFILER_TASK(gen_feature_list, queue_); ONEDAL_ASSERT(node_vs_tree_map_list.get_count() == node_count); de::check_mul_overflow((node_count + 1), ctx.selected_ftr_count_); - // first part is used for features indices, +1 block - part for generator - auto selected_features_host = - pr::ndarray::empty({ (node_count + 1) * ctx.selected_ftr_count_ }); + auto selected_features_com = pr::ndarray::empty(queue_, { node_count * ctx.selected_ftr_count_ }, - alloc::device); + alloc::shared); - auto selected_features_host_ptr = selected_features_host.get_mutable_data(); + auto selected_features_host_ptr = selected_features_com.get_mutable_data(); - auto node_vs_tree_map_list_host = node_vs_tree_map_list.to_host(queue_); - - auto tree_map_ptr = node_vs_tree_map_list_host.get_mutable_data(); if (ctx.selected_ftr_count_ != ctx.column_count_) { for (Index node = 0; node < node_count; ++node) { pr::uniform_without_replacement( ctx.selected_ftr_count_, selected_features_host_ptr + node * ctx.selected_ftr_count_, - selected_features_host_ptr + (node + 1) * ctx.selected_ftr_count_, - rng_engine_list[tree_map_ptr[node]], + rng_engine, 0, ctx.column_count_); } @@ -502,11 +493,7 @@ train_kernel_hist_impl::gen_feature_list( } } - auto event = selected_features_com.assign_from_host(queue_, - selected_features_host_ptr, - selected_features_com.get_count()); - - return std::tuple{ selected_features_com, event }; + return std::tuple{ selected_features_com, sycl::event() }; } template @@ -515,35 +502,26 @@ train_kernel_hist_impl::gen_random_thresholds( const train_context_t& ctx, Index node_count, const pr::ndarray& node_vs_tree_map, - rng_engine_list_t& rng_engine_list) { + rng_engine_t& rng_engine) { ONEDAL_PROFILER_TASK(gen_random_thresholds, queue_); ONEDAL_ASSERT(node_vs_tree_map.get_count() == node_count); auto node_vs_tree_map_list_host = node_vs_tree_map.to_host(queue_); - auto tree_map_ptr = node_vs_tree_map_list_host.get_mutable_data(); - - // Create arrays for random generated bins - auto random_bins_host = - pr::ndarray::empty(queue_, { node_count * ctx.selected_ftr_count_ }); auto random_bins_com = pr::ndarray::empty(queue_, { node_count * ctx.selected_ftr_count_ }, - alloc::device); - auto random_bins_host_ptr = random_bins_host.get_mutable_data(); + alloc::shared); + auto random_bins_host_ptr = random_bins_com.get_mutable_data(); // Generate random bins for selected features - for (Index node = 0; node < node_count; ++node) { - pr::uniform(ctx.selected_ftr_count_, - random_bins_host_ptr + node * ctx.selected_ftr_count_, - rng_engine_list[tree_map_ptr[node]], - 0.0f, - 1.0f); - } - auto event_rnd_generate = - random_bins_com.assign_from_host(queue_, random_bins_host_ptr, random_bins_com.get_count()); + pr::uniform(ctx.selected_ftr_count_ * node_count, + random_bins_host_ptr, + rng_engine, + 0.0f, + 1.0f); - return std::tuple{ random_bins_com, event_rnd_generate }; + return std::tuple{ random_bins_com, sycl::event() }; }; template @@ -1610,7 +1588,7 @@ sycl::event train_kernel_hist_impl::compute_results( pr::ndarray& oob_per_obs_list, pr::ndarray& var_imp, pr::ndarray& var_imp_variance, - const rng_engine_list_t& engine_arr, + rng_engine_t& engine_gpu, Index tree_idx_in_block, Index tree_in_block_count, Index built_tree_count, @@ -1658,9 +1636,7 @@ sycl::event train_kernel_hist_impl::compute_results( const Float div1 = Float(1) / Float(built_tree_count + tree_idx_in_block + 1); for (Index column_idx = 0; column_idx < ctx.column_count_; ++column_idx) { - pr::shuffle(oob_row_count, - permutation_ptr, - engine_arr[built_tree_count + tree_idx_in_block]); + pr::shuffle(oob_row_count, permutation_ptr, engine_gpu); const Float oob_err_perm = compute_oob_error_perm(ctx, model_manager, data_host, @@ -1846,19 +1822,17 @@ train_result train_kernel_hist_impl::operator()( model_manager_t model_manager(ctx, ctx.tree_count_, ctx.column_count_); - /*init engines*/ - auto skip_num = - de::check_mul_overflow(ctx.row_total_count_, (ctx.selected_ftr_count_ + 1)); - skip_num = de::check_mul_overflow(ctx.tree_count_, skip_num); - - de::check_mul_overflow((ctx.tree_count_ - 1), skip_num); - - pr::host_engine_collection collection(ctx.tree_count_, desc.get_seed()); - rng_engine_list_t engine_arr = collection([&](std::size_t i, std::size_t& skip) { - skip = i * skip_num; - }); + auto engine_type = pr::convert_engine_method(desc.get_engine_type()); + rng_engine_t engine_gpu = + ::oneapi::dal::backend::primitives::device_engine(queue_, desc.get_seed(), engine_type); pr::ndarray node_imp_decrease_list; + if (ctx.distr_mode_) { + std::int64_t skip_value = + comm_.get_rank() * ctx.tree_count_ * ctx.selected_row_total_count_; + skip_value += comm_.get_rank() * ctx.selected_ftr_count_ * ctx.tree_count_ * 2; + engine_gpu.skip_ahead(skip_value); + } sycl::event last_event; @@ -1902,16 +1876,16 @@ train_result train_kernel_hist_impl::operator()( 0; // for distr_mode it will be updated during tree_order_gen node_ptr[impl_const_t::ind_fid] = impl_const_t::bad_val_; } - + auto level_node_list_init_gpu = level_node_list_init_host.to_device(queue_); last_event = gen_initial_tree_order(ctx, - engine_arr, - level_node_list_init_host, + engine_gpu, + level_node_list_init_gpu, tree_order_lev_, iter, node_count); auto node_vs_tree_map_list = node_vs_tree_map_list_host.to_device(queue_); - level_node_lists.push_back(level_node_list_init_host.to_device(queue_)); + level_node_lists.push_back(level_node_list_init_gpu); last_event = compute_initial_histogram(ctx, response_nd_, @@ -1939,11 +1913,11 @@ train_result train_kernel_hist_impl::operator()( imp_data_t left_child_imp_data(queue_, ctx, node_count); auto [selected_features_com, event] = - gen_feature_list(ctx, node_count, node_vs_tree_map_list, engine_arr); + gen_feature_list(ctx, node_count, node_vs_tree_map_list, engine_gpu); event.wait_and_throw(); auto [random_bins_com, gen_bins_event] = - gen_random_thresholds(ctx, node_count, node_vs_tree_map_list, engine_arr); + gen_random_thresholds(ctx, node_count, node_vs_tree_map_list, engine_gpu); gen_bins_event.wait_and_throw(); if (ctx.mdi_required_) { @@ -2047,18 +2021,14 @@ train_result train_kernel_hist_impl::operator()( node_vs_tree_map_list = node_vs_tree_map_list_new; - last_event = train_service_kernels_.do_level_partition_by_groups( - ctx, - full_data_nd_, - node_list, - tree_order_lev_, - tree_order_lev_buf_, - ctx.row_count_, - ctx.selected_row_total_count_, - ctx.column_count_, - node_count, - ctx.tree_in_block_, - { last_event }); + last_event = + train_service_kernels_.do_level_partition_by_groups(ctx, + full_data_nd_, + node_list, + tree_order_lev_, + tree_order_lev_buf_, + node_count, + { last_event }); } } last_event.wait_and_throw(); @@ -2079,7 +2049,7 @@ train_result train_kernel_hist_impl::operator()( oob_per_obs_list_, res_var_imp_, var_imp_variance_host_, - engine_arr, + engine_gpu, tree_idx, iter_tree_count, iter, diff --git a/cpp/oneapi/dal/algo/decision_forest/backend/gpu/train_service_kernels.hpp b/cpp/oneapi/dal/algo/decision_forest/backend/gpu/train_service_kernels.hpp index c832a4fd1ac..6134146f4e3 100644 --- a/cpp/oneapi/dal/algo/decision_forest/backend/gpu/train_service_kernels.hpp +++ b/cpp/oneapi/dal/algo/decision_forest/backend/gpu/train_service_kernels.hpp @@ -63,11 +63,7 @@ class train_service_kernels { const pr::ndarray& node_list, pr::ndarray& tree_order, pr::ndarray& tree_order_buf, - Index data_row_count, - Index data_selected_row_count, - Index data_column_count, Index node_count, - Index tree_count, const bk::event_vector& deps = {}); sycl::event initialize_tree_order(pr::ndarray& tree_order, diff --git a/cpp/oneapi/dal/algo/decision_forest/backend/gpu/train_service_kernels_dpc.cpp b/cpp/oneapi/dal/algo/decision_forest/backend/gpu/train_service_kernels_dpc.cpp index 11549f3d62d..192adabcf5c 100644 --- a/cpp/oneapi/dal/algo/decision_forest/backend/gpu/train_service_kernels_dpc.cpp +++ b/cpp/oneapi/dal/algo/decision_forest/backend/gpu/train_service_kernels_dpc.cpp @@ -236,18 +236,14 @@ sycl::event train_service_kernels::do_level_partition_b const pr::ndarray& node_list, pr::ndarray& tree_order, pr::ndarray& tree_order_buf, - Index data_row_count, - Index data_selected_row_count, - Index data_column_count, Index node_count, - Index tree_count, const bk::event_vector& deps) { ONEDAL_PROFILER_TASK(do_level_partition, queue_); - ONEDAL_ASSERT(data.get_count() == data_row_count * data_column_count); + ONEDAL_ASSERT(data.get_count() == ctx.row_count_ * ctx.column_count_); ONEDAL_ASSERT(node_list.get_count() == node_count * impl_const_t::node_prop_count_); - ONEDAL_ASSERT(tree_order.get_count() == data_selected_row_count * tree_count); - ONEDAL_ASSERT(tree_order_buf.get_count() == data_selected_row_count * tree_count); + ONEDAL_ASSERT(tree_order.get_count() == ctx.selected_row_count_ * ctx.tree_count_); + ONEDAL_ASSERT(tree_order_buf.get_count() == ctx.selected_row_count_ * ctx.tree_count_); const Index total_block_count = de::check_mul_overflow(node_count, partition_max_block_count_); @@ -269,7 +265,7 @@ sycl::event train_service_kernels::do_level_partition_b aux_node_buffer_prop_count_; // num of auxilliary attributes for node const Index max_block_count = partition_max_block_count_; const Index min_block_size = partition_min_block_size_; - + const auto column_count = ctx.column_count_; const Bin* data_ptr = data.get_data(); const Index* node_list_ptr = node_list.get_data(); Index* node_aux_list_ptr = node_aux_list.get_mutable_data(); @@ -346,8 +342,8 @@ sycl::event train_service_kernels::do_level_partition_b i += sub_group_size) { const Index id = tree_order_ptr[offset + i]; const Index to_right = - Index(static_cast( - data_ptr[id * data_column_count + feat_id]) > feat_bin); + Index(static_cast(data_ptr[id * column_count + feat_id]) > + feat_bin); group_row_to_right_count += sycl::reduce_over_group(sbg, to_right, plus()); } @@ -367,9 +363,8 @@ sycl::event train_service_kernels::do_level_partition_b for (Index i = ind_start + sub_group_local_id; i < ind_end; i += sub_group_size) { const Index id = tree_order_ptr[offset + i]; - const Index to_right = - Index(static_cast(data_ptr[id * data_column_count + feat_id]) > - feat_bin); + const Index to_right = Index( + static_cast(data_ptr[id * column_count + feat_id]) > feat_bin); const Index boundary = group_row_to_right_count + sycl::exclusive_scan_over_group(sbg, to_right, plus()); diff --git a/cpp/oneapi/dal/algo/decision_forest/common.cpp b/cpp/oneapi/dal/algo/decision_forest/common.cpp index 1b15dd83220..b4344162a3b 100644 --- a/cpp/oneapi/dal/algo/decision_forest/common.cpp +++ b/cpp/oneapi/dal/algo/decision_forest/common.cpp @@ -61,6 +61,10 @@ class descriptor_impl : public base { error_metric_mode error_metric_mode_value = error_metric_mode::none; infer_mode infer_mode_value = infer_mode::class_responses; + // The default engine has been switched from mt2203 to philox for GPU, + // as philox is more efficient in terms of performance on GPU architectures. + // Note: Due to this change, some conformance(not critical) tests might fail as a result. + engine_type df_engine_type = engine_type::philox4x32x10; bool memory_saving_mode = false; bool bootstrap = true; splitter_mode splitter_mode_value = splitter_mode::best; @@ -179,6 +183,11 @@ std::int64_t descriptor_base::get_seed() const { return impl_->seed; } +template +engine_type descriptor_base::get_engine_type() const { + return impl_->df_engine_type; +} + template void descriptor_base::set_observations_per_tree_fraction_impl(double value) { check_domain_cond((value > 0.0 && value <= 1.0), @@ -299,6 +308,11 @@ void descriptor_base::set_seed_impl(std::int64_t value) { impl_->seed = value; } +template +void descriptor_base::set_engine_type_impl(engine_type value) { + impl_->df_engine_type = value; +} + template class ONEDAL_EXPORT descriptor_base; template class ONEDAL_EXPORT descriptor_base; diff --git a/cpp/oneapi/dal/algo/decision_forest/common.hpp b/cpp/oneapi/dal/algo/decision_forest/common.hpp index 0350002c83d..56541e6bfec 100644 --- a/cpp/oneapi/dal/algo/decision_forest/common.hpp +++ b/cpp/oneapi/dal/algo/decision_forest/common.hpp @@ -23,6 +23,8 @@ #include "oneapi/dal/detail/serialization.hpp" #include "oneapi/dal/detail/threading.hpp" +#include "oneapi/dal/rng.hpp" + namespace oneapi::dal::decision_forest { namespace task { @@ -249,6 +251,7 @@ class descriptor_base : public base { return get_voting_mode_impl(); } + engine_type get_engine_type() const; std::int64_t get_seed() const; protected: @@ -277,6 +280,7 @@ class descriptor_base : public base { infer_mode get_infer_mode_impl() const; voting_mode get_voting_mode_impl() const; + void set_engine_type_impl(engine_type value); void set_seed_impl(std::int64_t value); private: @@ -594,6 +598,17 @@ class descriptor : public detail::descriptor_base { return *this; } + /// Engine method for the random numbers generator used by the algorithm + /// @remark default = engine_method::philox4x32x10 + engine_type get_engine_type() const { + return base_t::get_engine_type(); + } + + auto& set_engine_type(engine_type value) { + base_t::set_engine_type_impl(value); + return *this; + } + /// Seed for the random numbers generator used by the algorithm /// @invariant :expr:`tree_count > 0` std::int64_t get_seed() const { diff --git a/cpp/oneapi/dal/algo/decision_forest/test/spmd.cpp b/cpp/oneapi/dal/algo/decision_forest/test/spmd.cpp index 534acddb04a..18f855e03c2 100644 --- a/cpp/oneapi/dal/algo/decision_forest/test/spmd.cpp +++ b/cpp/oneapi/dal/algo/decision_forest/test/spmd.cpp @@ -122,7 +122,7 @@ class df_spmd_test : public df_test> { train_result_t train_spmd_weighted_base_checks(const descriptor_t& desc, const te::dataframe& data, const te::table_id& data_table_id) { - const auto x = data.get_table(data_table_id, range(0, -1)); + const auto x = data.get_table(data_table_id, range(0, -2)); const auto y = data.get_table(data_table_id, range(data.get_column_count() - 2, data.get_column_count() - 1)); @@ -400,6 +400,24 @@ DF_SPMD_CLS_TEST("df cls base check with default params") { this->infer_base_checks(desc, data_test, this->get_homogen_table_id(), model, checker_list); } +DF_SPMD_CLS_TEST("df cls base check with default params bootstrap disabled") { + SKIP_IF(this->get_policy().is_cpu()); + SKIP_IF(this->not_available_on_device()); + SKIP_IF(this->not_float64_friendly()); + + const auto [data, data_test, class_count, checker_list] = this->get_cls_dataframe_base(); + + auto desc = this->get_default_descriptor(); + + desc.set_class_count(class_count); + desc.set_bootstrap(false); + this->set_rank_count(2); + const auto train_result = + this->train_spmd_base_checks(desc, data, this->get_homogen_table_id()); + const auto model = train_result.get_model(); + this->infer_base_checks(desc, data_test, this->get_homogen_table_id(), model, checker_list); +} + DF_SPMD_CLS_TEST("df cls base check with default params and train weights") { SKIP_IF(this->get_policy().is_cpu()); SKIP_IF(this->not_available_on_device()); @@ -469,6 +487,23 @@ DF_SPMD_REG_TEST("df reg base check with default params") { this->infer_base_checks(desc, data_test, this->get_homogen_table_id(), model, checker_list); } +DF_SPMD_REG_TEST("df reg base check with default params without bootstrap") { + SKIP_IF(this->get_policy().is_cpu()); + SKIP_IF(this->not_available_on_device()); + SKIP_IF(this->not_float64_friendly()); + + const auto [data, data_test, checker_list] = this->get_reg_dataframe_base(); + + auto desc = this->get_default_descriptor(); + desc.set_bootstrap(false); + + this->set_rank_count(2); + const auto train_result = + this->train_spmd_base_checks(desc, data, this->get_homogen_table_id()); + const auto model = train_result.get_model(); + this->infer_base_checks(desc, data_test, this->get_homogen_table_id(), model, checker_list); +} + DF_SPMD_REG_TEST("df reg base check with default params and train weights") { SKIP_IF(this->get_policy().is_cpu()); SKIP_IF(this->not_available_on_device()); diff --git a/cpp/oneapi/dal/backend/primitives/rng/device_engine.hpp b/cpp/oneapi/dal/backend/primitives/rng/device_engine.hpp index 7acb09812e5..a49faaa3803 100644 --- a/cpp/oneapi/dal/backend/primitives/rng/device_engine.hpp +++ b/cpp/oneapi/dal/backend/primitives/rng/device_engine.hpp @@ -42,8 +42,8 @@ class gen_base { virtual ~gen_base() = default; /// Method to retrieve the engine method. - /// @return The engine method as an enum value of `engine_type`. - virtual engine_type get_engine_type() const = 0; + /// @return The engine method as an enum value of `engine_type_internal`. + virtual engine_type_internal get_engine_type() const = 0; /// Method to skip ahead in the random number sequence. /// @param[in] nSkip The number of steps to skip in the generator sequence. @@ -64,9 +64,9 @@ class gen_mt2203 : public gen_base { : _gen(queue, seed, engine_idx) {} /// Returns the engine method for mt2203. - /// @return The `mt2203` engine method as an enum value of `engine_type`. - engine_type get_engine_type() const override { - return engine_type::mt2203; + /// @return The `mt2203` engine method as an enum value of `engine_type_internal`. + engine_type_internal get_engine_type() const override { + return engine_type_internal::mt2203; } /// Skips ahead in the random number sequence for mt2203 on the GPU. @@ -99,9 +99,9 @@ class gen_philox : public gen_base { gen_philox(sycl::queue queue, std::int64_t seed) : _gen(queue, seed) {} /// Returns the engine method for philox4x32x10. - /// @return The `philox4x32x10` engine method as an enum value of `engine_type`. - engine_type get_engine_type() const override { - return engine_type::philox4x32x10; + /// @return The `philox4x32x10` engine method as an enum value of `engine_type_internal`. + engine_type_internal get_engine_type() const override { + return engine_type_internal::philox4x32x10; } /// Skips ahead in the random number sequence for philox4x32x10 on the GPU. @@ -133,9 +133,9 @@ class gen_mrg32k : public gen_base { gen_mrg32k(sycl::queue queue, std::int64_t seed) : _gen(queue, seed) {} /// Returns the engine method for mrg32k3a. - /// @return The `mrg32k3a` engine method as an enum value of `engine_type`. - engine_type get_engine_type() const override { - return engine_type::mrg32k3a; + /// @return The `mrg32k3a` engine method as an enum value of `engine_type_internal`. + engine_type_internal get_engine_type() const override { + return engine_type_internal::mrg32k3a; } /// Skips ahead in the random number sequence for mrg32k3a on the GPU. @@ -167,9 +167,9 @@ class gen_mt19937 : public gen_base { gen_mt19937(sycl::queue queue, std::int64_t seed) : _gen(queue, seed) {} /// Returns the engine method for mt19937. - /// @return The `mt19937` engine method as an enum value of `engine_type`. - engine_type get_engine_type() const override { - return engine_type::mt19937; + /// @return The `mt19937` engine method as an enum value of `engine_type_internal`. + engine_type_internal get_engine_type() const override { + return engine_type_internal::mt19937; } /// Skips ahead in the random number sequence for mt19937 on the GPU. @@ -201,9 +201,9 @@ class gen_mcg59 : public gen_base { gen_mcg59(sycl::queue queue, std::int64_t seed) : _gen(queue, seed) {} /// Returns the engine method for mcg59. - /// @return The `mcg59` engine method as an enum value of `engine_type`. - engine_type get_engine_type() const override { - return engine_type::mcg59; + /// @return The `mcg59` engine method as an enum value of `engine_type_internal`. + engine_type_internal get_engine_type() const override { + return engine_type_internal::mcg59; } /// Skips ahead in the random number sequence for mcg59 on the GPU. @@ -236,30 +236,30 @@ class device_engine { public: /// @param[in] queue The SYCL queue used to manage device operations. /// @param[in] seed The initial seed for the random number generator. Defaults to `777`. - /// @param[in] method The engine method. Defaults to `engine_type::mt2203`. + /// @param[in] method The engine method. Defaults to `engine_type_internal::mt2203`. device_engine(sycl::queue& queue, std::int64_t seed = 777, - engine_type method = engine_type::mt2203, + engine_type_internal method = engine_type_internal::mt2203, std::int64_t idx = 0) : q(queue) { switch (method) { - case engine_type::mt2203: + case engine_type_internal::mt2203: host_engine_ = daal::algorithms::engines::mt2203::Batch<>::create(seed); dpc_engine_ = std::make_shared(queue, seed, idx); break; - case engine_type::mcg59: + case engine_type_internal::mcg59: host_engine_ = daal::algorithms::engines::mcg59::Batch<>::create(seed); dpc_engine_ = std::make_shared(queue, seed); break; - case engine_type::mrg32k3a: + case engine_type_internal::mrg32k3a: host_engine_ = daal::algorithms::engines::mrg32k3a::Batch<>::create(seed); dpc_engine_ = std::make_shared(queue, seed); break; - case engine_type::philox4x32x10: + case engine_type_internal::philox4x32x10: host_engine_ = daal::algorithms::engines::philox4x32x10::Batch<>::create(seed); dpc_engine_ = std::make_shared(queue, seed); break; - case engine_type::mt19937: + case engine_type_internal::mt19937: host_engine_ = daal::algorithms::engines::mt19937::Batch<>::create(seed); dpc_engine_ = std::make_shared(queue, seed); break; @@ -360,7 +360,6 @@ void uniform(std::int64_t count, Type* dst, device_engine& engine_, Type a, Type template void uniform_without_replacement(std::int64_t count, Type* dst, - Type* buffer, device_engine& engine_, Type a, Type b) { @@ -369,7 +368,7 @@ void uniform_without_replacement(std::int64_t count, throw domain_error(dal::detail::error_messages::unsupported_data_type()); } void* state = engine_.get_host_engine_state(); - uniform_dispatcher::uniform_without_replacement_by_cpu(count, dst, buffer, state, a, b); + uniform_dispatcher::uniform_without_replacement_by_cpu(count, dst, state, a, b); engine_.skip_ahead_gpu(count); } @@ -425,7 +424,6 @@ template sycl::event uniform_without_replacement(sycl::queue& queue, std::int64_t count, Type* dst, - Type* buffer, device_engine& engine_, Type a, Type b, @@ -454,12 +452,13 @@ sycl::event shuffle(sycl::queue& queue, /// @param[in] method The rng engine type. Defaults to `mt19937`. /// @param[in] deps Dependencies for the SYCL event. template -sycl::event partial_fisher_yates_shuffle(sycl::queue& queue_, - ndview& result_array, - std::int64_t top, - std::int64_t seed, - engine_type method = engine_type::mt19937, - const event_vector& deps = {}); +sycl::event partial_fisher_yates_shuffle( + sycl::queue& queue_, + ndview& result_array, + std::int64_t top, + std::int64_t seed, + engine_type_internal method = engine_type_internal::mt19937, + const event_vector& deps = {}); #endif } // namespace oneapi::dal::backend::primitives diff --git a/cpp/oneapi/dal/backend/primitives/rng/device_engine_dpc.cpp b/cpp/oneapi/dal/backend/primitives/rng/device_engine_dpc.cpp index 845d058ca30..88b70de7d06 100644 --- a/cpp/oneapi/dal/backend/primitives/rng/device_engine_dpc.cpp +++ b/cpp/oneapi/dal/backend/primitives/rng/device_engine_dpc.cpp @@ -29,33 +29,35 @@ sycl::event generate_rng(Distribution& distr, std::int64_t count, Type* dst, const event_vector& deps) { - switch (engine_.get_device_engine_base_ptr()->get_engine_type()) { - case engine_type::mt2203: { - auto& device_engine = - *(dynamic_cast(engine_.get_device_engine_base_ptr().get()))->get(); - return oneapi::mkl::rng::generate(distr, device_engine, count, dst, deps); - } - case engine_type::mcg59: { - auto& device_engine = - *(dynamic_cast(engine_.get_device_engine_base_ptr().get()))->get(); - return oneapi::mkl::rng::generate(distr, device_engine, count, dst, deps); - } - case engine_type::mrg32k3a: { - auto& device_engine = - *(dynamic_cast(engine_.get_device_engine_base_ptr().get()))->get(); - return oneapi::mkl::rng::generate(distr, device_engine, count, dst, deps); - } - case engine_type::philox4x32x10: { - auto& device_engine = - *(dynamic_cast(engine_.get_device_engine_base_ptr().get()))->get(); - return oneapi::mkl::rng::generate(distr, device_engine, count, dst, deps); - } - case engine_type::mt19937: { - auto& device_engine = - *(dynamic_cast(engine_.get_device_engine_base_ptr().get()))->get(); - return oneapi::mkl::rng::generate(distr, device_engine, count, dst, deps); - } - default: throw std::runtime_error("Unsupported engine type in generate_rng"); + auto engine_type = engine_.get_device_engine_base_ptr()->get_engine_type(); + + if (engine_type == engine_type_internal::philox4x32x10) { + auto& device_engine = + *(static_cast(engine_.get_device_engine_base_ptr().get()))->get(); + return oneapi::mkl::rng::generate(distr, device_engine, count, dst, deps); + } + else if (engine_type == engine_type_internal::mt19937) { + auto& device_engine = + *(static_cast(engine_.get_device_engine_base_ptr().get()))->get(); + return oneapi::mkl::rng::generate(distr, device_engine, count, dst, deps); + } + else if (engine_type == engine_type_internal::mrg32k3a) { + auto& device_engine = + *(static_cast(engine_.get_device_engine_base_ptr().get()))->get(); + return oneapi::mkl::rng::generate(distr, device_engine, count, dst, deps); + } + else if (engine_type == engine_type_internal::mcg59) { + auto& device_engine = + *(static_cast(engine_.get_device_engine_base_ptr().get()))->get(); + return oneapi::mkl::rng::generate(distr, device_engine, count, dst, deps); + } + else if (engine_type == engine_type_internal::mt2203) { + auto& device_engine = + *(static_cast(engine_.get_device_engine_base_ptr().get()))->get(); + return oneapi::mkl::rng::generate(distr, device_engine, count, dst, deps); + } + else { + throw std::runtime_error("Unsupported engine type in generate_rng"); } } @@ -99,7 +101,6 @@ template sycl::event uniform_without_replacement(sycl::queue& queue, std::int64_t count, Type* dst, - Type* buffer, device_engine& engine_, Type a, Type b, @@ -110,7 +111,7 @@ sycl::event uniform_without_replacement(sycl::queue& queue, } void* state = engine_.get_host_engine_state(); engine_.skip_ahead_gpu(count); - uniform_dispatcher::uniform_without_replacement_by_cpu(count, dst, buffer, state, a, b); + uniform_dispatcher::uniform_without_replacement_by_cpu(count, dst, state, a, b); auto event = queue.submit([&](sycl::handler& h) { h.depends_on(deps); }); @@ -161,7 +162,7 @@ sycl::event partial_fisher_yates_shuffle(sycl::queue& queue_, ndview& result_array, std::int64_t top, std::int64_t seed, - engine_type method, + engine_type_internal method, const event_vector& deps) { device_engine eng_ = device_engine(queue_, seed, method); const auto casted_top = dal::detail::integral_cast(top); @@ -209,7 +210,6 @@ INSTANTIATE_UNIFORM(std::int32_t) template ONEDAL_EXPORT sycl::event uniform_without_replacement(sycl::queue& queue, \ std::int64_t count_, \ F* dst, \ - F* buff, \ device_engine& engine_, \ F a, \ F b, \ @@ -228,12 +228,12 @@ INSTANTIATE_UWR(std::int32_t) INSTANTIATE_SHUFFLE(std::int32_t) -#define INSTANTIATE_PARTIAL_SHUFFLE(F) \ - template ONEDAL_EXPORT sycl::event partial_fisher_yates_shuffle(sycl::queue& queue, \ - ndview& a, \ - std::int64_t top, \ - std::int64_t seed, \ - engine_type method, \ +#define INSTANTIATE_PARTIAL_SHUFFLE(F) \ + template ONEDAL_EXPORT sycl::event partial_fisher_yates_shuffle(sycl::queue& queue, \ + ndview& a, \ + std::int64_t top, \ + std::int64_t seed, \ + engine_type_internal method, \ const event_vector& deps); INSTANTIATE_PARTIAL_SHUFFLE(std::int32_t) diff --git a/cpp/oneapi/dal/backend/primitives/rng/host_engine.hpp b/cpp/oneapi/dal/backend/primitives/rng/host_engine.hpp index 7d6356196bd..b4597c11746 100644 --- a/cpp/oneapi/dal/backend/primitives/rng/host_engine.hpp +++ b/cpp/oneapi/dal/backend/primitives/rng/host_engine.hpp @@ -38,22 +38,23 @@ namespace oneapi::dal::backend::primitives { class host_engine { public: /// @param[in] seed The initial seed for the random number generator. Defaults to `777`. - /// @param[in] method The engine method. Defaults to `engine_type::mt2203`. - host_engine(std::int64_t seed = 777, engine_type method = engine_type::mt2203) { + /// @param[in] method The engine method. Defaults to `engine_type_internal::mt2203`. + host_engine(std::int64_t seed = 777, + engine_type_internal method = engine_type_internal::mt2203) { switch (method) { - case engine_type::mt2203: + case engine_type_internal::mt2203: host_engine_ = daal::algorithms::engines::mt2203::Batch<>::create(seed); break; - case engine_type::mcg59: + case engine_type_internal::mcg59: host_engine_ = daal::algorithms::engines::mcg59::Batch<>::create(seed); break; - case engine_type::mrg32k3a: + case engine_type_internal::mrg32k3a: host_engine_ = daal::algorithms::engines::mrg32k3a::Batch<>::create(seed); break; - case engine_type::philox4x32x10: + case engine_type_internal::philox4x32x10: host_engine_ = daal::algorithms::engines::philox4x32x10::Batch<>::create(seed); break; - case engine_type::mt19937: + case engine_type_internal::mt19937: host_engine_ = daal::algorithms::engines::mt19937::Batch<>::create(seed); break; default: throw std::invalid_argument("Unsupported engine type 1"); @@ -165,7 +166,7 @@ template void partial_fisher_yates_shuffle(ndview& result_array, std::int64_t top, std::int64_t seed, - engine_type method = engine_type::mt19937) { + engine_type_internal method = engine_type_internal::mt19937) { host_engine eng_ = host_engine(seed, method); const auto casted_top = dal::detail::integral_cast(top); const std::int64_t count = result_array.get_count(); diff --git a/cpp/oneapi/dal/backend/primitives/rng/host_engine_collection.hpp b/cpp/oneapi/dal/backend/primitives/rng/host_engine_collection.hpp index 9b9c241a1a8..34daa3d8a64 100644 --- a/cpp/oneapi/dal/backend/primitives/rng/host_engine_collection.hpp +++ b/cpp/oneapi/dal/backend/primitives/rng/host_engine_collection.hpp @@ -26,7 +26,7 @@ class host_engine_collection { public: explicit host_engine_collection(std::int64_t count, std::int64_t seed = 777, - engine_type method = engine_type::mt2203) + engine_type_internal method = engine_type_internal::mt2203) : count_(count), engine_(initialize_host_engine(seed, method)), params_(count), @@ -61,16 +61,17 @@ class host_engine_collection { private: daal::algorithms::engines::EnginePtr initialize_host_engine(std::int64_t seed, - engine_type method) { + engine_type_internal method) { switch (method) { - case engine_type::mt2203: + case engine_type_internal::mt2203: return daal::algorithms::engines::mt2203::Batch<>::create(seed); - case engine_type::mcg59: return daal::algorithms::engines::mcg59::Batch<>::create(seed); - case engine_type::mrg32k3a: + case engine_type_internal::mcg59: + return daal::algorithms::engines::mcg59::Batch<>::create(seed); + case engine_type_internal::mrg32k3a: return daal::algorithms::engines::mrg32k3a::Batch<>::create(seed); - case engine_type::philox4x32x10: + case engine_type_internal::philox4x32x10: return daal::algorithms::engines::philox4x32x10::Batch<>::create(seed); - case engine_type::mt19937: + case engine_type_internal::mt19937: return daal::algorithms::engines::mt19937::Batch<>::create(seed); default: throw std::invalid_argument("Unsupported engine type"); } diff --git a/cpp/oneapi/dal/backend/primitives/rng/rng_types.hpp b/cpp/oneapi/dal/backend/primitives/rng/rng_types.hpp index 2007a9bdf43..60dce98bbad 100644 --- a/cpp/oneapi/dal/backend/primitives/rng/rng_types.hpp +++ b/cpp/oneapi/dal/backend/primitives/rng/rng_types.hpp @@ -14,6 +14,7 @@ * limitations under the License. *******************************************************************************/ +#include "oneapi/dal/rng.hpp" #pragma once namespace oneapi::dal::backend::primitives { @@ -22,13 +23,28 @@ namespace oneapi::dal::backend::primitives { /// /// This enumeration defines the available RNG engines supported by the library. /// Each engine method corresponds to a specific algorithm for generating random numbers -/// @enum engine_type +/// @enum engine_type_internal /// Enumeration of RNG engine methods: /// - `mt2203`: Mersenne Twister engine with specific optimizations for parallel environments. /// - `mcg59`: Multiplicative congruential generator with a modulus of \(2^{59}\). /// - `mt19937`: Standard Mersenne Twister engine with a period of \(2^{19937} - 1\). /// - `mrg32k3a`: Combined multiple recursive generator with a period of \(2^{191}\). /// - `philox4x32x10`: Counter-based RNG engine optimized for parallel computations. -enum class engine_type { mt2203, mcg59, mt19937, mrg32k3a, philox4x32x10 }; +enum class engine_type_internal { mt2203, mcg59, mt19937, mrg32k3a, philox4x32x10 }; +inline engine_type_internal convert_engine_method(engine_type method) { + switch (method) { + case engine_type::mt2203: + return ::oneapi::dal::backend::primitives::engine_type_internal::mt2203; + case engine_type::mcg59: + return ::oneapi::dal::backend::primitives::engine_type_internal::mcg59; + case engine_type::mrg32k3a: + return ::oneapi::dal::backend::primitives::engine_type_internal::mrg32k3a; + case engine_type::philox4x32x10: + return ::oneapi::dal::backend::primitives::engine_type_internal::philox4x32x10; + case engine_type::mt19937: + return ::oneapi::dal::backend::primitives::engine_type_internal::mt19937; + default: throw std::runtime_error("Unsupported engine type in generate_rng"); + } +} } // namespace oneapi::dal::backend::primitives diff --git a/cpp/oneapi/dal/backend/primitives/rng/test/rng_dpc.cpp b/cpp/oneapi/dal/backend/primitives/rng/test/rng_dpc.cpp index 49476e9bc65..f1e08226b9a 100644 --- a/cpp/oneapi/dal/backend/primitives/rng/test/rng_dpc.cpp +++ b/cpp/oneapi/dal/backend/primitives/rng/test/rng_dpc.cpp @@ -37,36 +37,36 @@ class mrg32k3a {}; class mt19937 {}; class philox4x32x10 {}; -template +template struct engine_map {}; template <> struct engine_map { - constexpr static auto value = engine_type::mt2203; + constexpr static auto value = engine_type_internal::mt2203; }; template <> struct engine_map { - constexpr static auto value = engine_type::mcg59; + constexpr static auto value = engine_type_internal::mcg59; }; template <> struct engine_map { - constexpr static auto value = engine_type::mrg32k3a; + constexpr static auto value = engine_type_internal::mrg32k3a; }; template <> struct engine_map { - constexpr static auto value = engine_type::philox4x32x10; + constexpr static auto value = engine_type_internal::philox4x32x10; }; template <> struct engine_map { - constexpr static auto value = engine_type::mt19937; + constexpr static auto value = engine_type_internal::mt19937; }; -template -constexpr auto engine_v = engine_map::value; +template +constexpr auto engine_v = engine_map::value; template class rng_test : public te::float_algo_fixture> { diff --git a/cpp/oneapi/dal/backend/primitives/sort/sort.hpp b/cpp/oneapi/dal/backend/primitives/sort/sort.hpp index 47f40b92cdf..e5e27c7eddb 100644 --- a/cpp/oneapi/dal/backend/primitives/sort/sort.hpp +++ b/cpp/oneapi/dal/backend/primitives/sort/sort.hpp @@ -161,6 +161,25 @@ class radix_sort { static constexpr inline std::uint32_t radix_count_ = sizeof(Integer); }; +template +sycl::event radix_sort_indices_inplace_dpl(sycl::queue& queue, + ndview& val, + ndview& ind, + const event_vector& deps = {}); + +template +sycl::event radix_sort_dpl(sycl::queue& queue, + ndview& val_in, + ndview& val_out, + std::int64_t sorted_elem_count, + const event_vector& deps = {}); + +template +sycl::event radix_sort_dpl(sycl::queue& queue, + ndview& val_in, + ndview& val_out, + const event_vector& deps = {}); + #endif } // namespace oneapi::dal::backend::primitives diff --git a/cpp/oneapi/dal/backend/primitives/sort/sort_dpc.cpp b/cpp/oneapi/dal/backend/primitives/sort/sort_dpc.cpp index a68e4c6a1fb..0e67bdf1c07 100644 --- a/cpp/oneapi/dal/backend/primitives/sort/sort_dpc.cpp +++ b/cpp/oneapi/dal/backend/primitives/sort/sort_dpc.cpp @@ -20,6 +20,14 @@ #include +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunused-variable" +#pragma clang diagnostic ignored "-Wunused-local-typedef" + +#include + +#pragma clang diagnostic pop + namespace oneapi::dal::backend::primitives { namespace de = dal::detail; @@ -524,6 +532,73 @@ sycl::event radix_sort::operator()(ndview& val_in, return sort_event_; } +template +sycl::event radix_sort_indices_inplace_dpl(sycl::queue& queue, + ndview& val_in, + ndview& ind_in, + const event_vector& deps) { + ONEDAL_PROFILER_TASK(sort.radix_sort_indices_inplace, queue); + ONEDAL_ASSERT(val_in.has_mutable_data()); + ONEDAL_ASSERT(ind_in.has_mutable_data()); + ONEDAL_ASSERT(val_in.get_count() == ind_in.get_count()); + + if (val_in.get_count() > de::limits::max()) { + throw domain_error(dal::detail::error_messages::invalid_number_of_elements_to_sort()); + } + + auto event = oneapi::dpl::experimental::kt::gpu::esimd::radix_sort_by_key( + queue, + val_in.get_mutable_data(), + val_in.get_mutable_data() + val_in.get_count(), + ind_in.get_mutable_data(), + // Parameters have been chosen based on the oneDPL example for radix sort by key. + // Reference: https://www.intel.com/content/www/us/en/docs/onedpl/developer-guide/2022-7/radix-sort-by-key.html + // These parameters ensure optimal performance for the given data type and distribution. + dpl::experimental::kt::kernel_param<96, 64>{}); + return event; +} + +template +sycl::event radix_sort_dpl(sycl::queue& queue, + ndview& val_in, + ndview& val_out, + std::int64_t sorted_elem_count, + const event_vector& deps) { + ONEDAL_PROFILER_TASK(sort.radix_sort, queue); + + const auto row_count = val_in.get_dimension(0); + const auto col_count = val_in.get_dimension(1); + sycl::event radix_sort_event; + + for (std::int64_t row = 0; row < row_count; ++row) { + Integer* row_start_in = val_in.get_mutable_data() + row * col_count; + Integer* row_start_out = val_out.get_mutable_data() + row * col_count; + + const auto row_sorted_elem_count = std::min(sorted_elem_count, col_count); + + radix_sort_event = oneapi::dpl::experimental::kt::gpu::esimd::radix_sort( + queue, + row_start_in, + row_start_in + row_sorted_elem_count, + row_start_out, + // Parameters have been chosen based on the oneDPL example for radix sort by key. + // Reference: https://www.intel.com/content/www/us/en/docs/onedpl/developer-guide/2022-7/radix-sort-by-key.html + // These parameters ensure optimal performance for the given data type and distribution. + dpl::experimental::kt::kernel_param<96, 64>{}); + } + + return radix_sort_event; +} + +template +sycl::event radix_sort_dpl(sycl::queue& queue, + ndview& val_in, + ndview& val_out, + const event_vector& deps) { + ONEDAL_PROFILER_TASK(sort.radix_sort, queue); + return radix_sort_dpl(queue, val_in, val_out, val_in.get_dimension(1), deps); +} + template sycl::event radix_sort::operator()(ndview& val_in, ndview& val_out, @@ -546,4 +621,42 @@ INSTANTIATE_SORT(std::int32_t) INSTANTIATE_SORT(std::uint32_t) INSTANTIATE_SORT(std::int64_t) INSTANTIATE_SORT(std::uint64_t) + +#define INSTANTIATE_SORT_INDICES_DPL(Float, Index) \ + template ONEDAL_EXPORT sycl::event radix_sort_indices_inplace_dpl( \ + sycl::queue&, \ + ndview&, \ + ndview&, \ + const event_vector&); + +#define INSTANTIATE_FLOAT(Index) \ + INSTANTIATE_SORT_INDICES_DPL(float, Index) \ + INSTANTIATE_SORT_INDICES_DPL(double, Index) + +INSTANTIATE_FLOAT(std::uint32_t) +INSTANTIATE_FLOAT(std::int32_t) + +#define INSTANTIATE_RADIX_SORT(Index) \ + template ONEDAL_EXPORT sycl::event radix_sort_dpl(sycl::queue&, \ + ndview&, \ + ndview&, \ + const event_vector&); + +INSTANTIATE_RADIX_SORT(std::int32_t) +INSTANTIATE_RADIX_SORT(std::uint32_t) +INSTANTIATE_RADIX_SORT(std::int64_t) +INSTANTIATE_RADIX_SORT(std::uint64_t) + +#define INSTANTIATE_RADIX_SORT_WITH_COUNT(Index) \ + template ONEDAL_EXPORT sycl::event radix_sort_dpl(sycl::queue&, \ + ndview&, \ + ndview&, \ + std::int64_t, \ + const event_vector&); + +INSTANTIATE_RADIX_SORT_WITH_COUNT(std::int32_t) +INSTANTIATE_RADIX_SORT_WITH_COUNT(std::uint32_t) +INSTANTIATE_RADIX_SORT_WITH_COUNT(std::int64_t) +INSTANTIATE_RADIX_SORT_WITH_COUNT(std::uint64_t) + } // namespace oneapi::dal::backend::primitives diff --git a/cpp/oneapi/dal/rng.hpp b/cpp/oneapi/dal/rng.hpp new file mode 100644 index 00000000000..cc2d0b3883a --- /dev/null +++ b/cpp/oneapi/dal/rng.hpp @@ -0,0 +1,40 @@ +/******************************************************************************* +# Copyright contributors to the oneDAL project +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#pragma once + +namespace oneapi::dal { + +namespace v1 { + +/// Enumeration of RNG engine methods +enum class engine_type { + /// Mersenne Twister engine with specific optimizations for parallel environments. + mt2203, + /// Multiplicative congruential generator with a modulus of \(2^{59}\) + mcg59, + /// Counter-based RNG engine optimized for parallel computations + philox4x32x10, + /// Standard Mersenne Twister engine with a period of \(2^{19937} - 1\) + mt19937, + /// Combined multiple recursive generator with a period of \(2^{191}\) + mrg32k3a +}; + +} // namespace v1 + +using v1::engine_type; +} // namespace oneapi::dal diff --git a/dev/bazel/deps/dpl.bzl b/dev/bazel/deps/dpl.bzl new file mode 100644 index 00000000000..93ee00b4c03 --- /dev/null +++ b/dev/bazel/deps/dpl.bzl @@ -0,0 +1,27 @@ +#=============================================================================== +# Copyright contributors to the oneDAL project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#=============================================================================== + +load("@onedal//dev/bazel:repos.bzl", "repos") + +dpl_repo = repos.prebuilt_libs_repo_rule( + includes = [ + "include", + ], + libs = [ + "lib", + ], + build_template = "@onedal//dev/bazel/deps:dpl.tpl.BUILD", +) diff --git a/dev/bazel/deps/dpl.tpl.BUILD b/dev/bazel/deps/dpl.tpl.BUILD new file mode 100644 index 00000000000..b5d588a37fa --- /dev/null +++ b/dev/bazel/deps/dpl.tpl.BUILD @@ -0,0 +1,7 @@ +package(default_visibility = ["//visibility:public"]) + +cc_library( + name = "headers", + hdrs = glob(["include/**/**/*"]), + includes = [ "include" ], +) diff --git a/makefile b/makefile index baffc8a1cc1..3127607ecf6 100644 --- a/makefile +++ b/makefile @@ -318,6 +318,11 @@ ifeq ($(REQPROFILE), yes) VTUNESDK.LIBS_A := $(if $(OS_is_lnx), $(VTUNESDK.libia)/libittnotify.a,) endif +#=============================== oneDPL folders ====================================== + +ONEDPLDIR := $(subst \,/,$(DPL_ROOT)) +ONEDPL.include := $(ONEDPLDIR)/include + #=============================================================================== # Release library names #=============================================================================== @@ -451,7 +456,7 @@ CORE.srcdirs := $(CORE.SERV.srcdir) $(CORE.srcdir) \ $(CPPDIR.daal)/src/data_management CORE.incdirs.common := $(RELEASEDIR.include) $(CPPDIR.daal) $(WORKDIR) -CORE.incdirs.thirdp := $(daaldep.math_backend.incdir) $(VTUNESDK.include) $(TBBDIR.include) +CORE.incdirs.thirdp := $(daaldep.math_backend.incdir) $(VTUNESDK.include) $(ONEDPL.include) $(TBBDIR.include) CORE.incdirs := $(CORE.incdirs.common) $(CORE.incdirs.thirdp) $(info CORE.incdirs: $(CORE.incdirs)) @@ -569,7 +574,7 @@ PARAMETERS.tmpdir_a.dpc := $(WORKDIR)/parameters_dpc_static PARAMETERS.tmpdir_y.dpc := $(WORKDIR)/parameters_dpc_dynamic ONEAPI.incdirs.common := $(CPPDIR) -ONEAPI.incdirs.thirdp := $(CORE.incdirs.common) $(daaldep.math_backend_oneapi.incdir) $(VTUNESDK.include) $(TBBDIR.include) +ONEAPI.incdirs.thirdp := $(CORE.incdirs.common) $(daaldep.math_backend_oneapi.incdir) $(VTUNESDK.include) $(ONEDPL.include) $(TBBDIR.include) ONEAPI.incdirs := $(ONEAPI.incdirs.common) $(CORE.incdirs.thirdp) $(ONEAPI.incdirs.thirdp) ONEAPI.dispatcher_cpu = $(WORKDIR)/oneapi/dal/_dal_cpu_dispatcher_gen.hpp