Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 10 additions & 1 deletion tests/cpp/helpers.cc
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/**
* Copyright 2016-2025, XGBoost contributors
* Copyright 2016-2026, XGBoost contributors
*/
#include "helpers.h"

Expand All @@ -17,6 +17,7 @@
#include <random> // for mt19937

#include "../../src/collective/communicator-inl.h" // for GetRank
#include "../../src/common/cuda_rt_utils.h" // for SetDevice
#include "../../src/data/adapter.h"
#include "../../src/data/batch_utils.h" // for AutoHostRatio, AutoCachePageBytes
#include "../../src/data/iterative_dmatrix.h"
Expand Down Expand Up @@ -785,4 +786,12 @@ RMMAllocatorPtr SetUpRMMResourceForCppTests(int, char**) { return {nullptr, Dele
#endif // !defined(XGBOOST_USE_RMM) || XGBOOST_USE_RMM != 1

std::int32_t DistGpuIdx() { return curt::AllVisibleGPUs() == 1 ? 0 : collective::GetRank(); }

[[nodiscard]] Context MakeCUDACtx(std::int32_t device) {
if (device == DeviceOrd::CPUOrdinal()) {
return Context{};
}
curt::SetDevice(device);
return Context{}.MakeCUDA(device);
}
} // namespace xgboost
77 changes: 30 additions & 47 deletions tests/cpp/helpers.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/**
* Copyright 2016-2025, XGBoost contributors
* Copyright 2016-2026, XGBoost contributors
*/
#pragma once

Expand All @@ -19,17 +19,16 @@
#include <string>
#include <vector>


#if defined(__CUDACC__)
#include "../../src/collective/communicator-inl.h" // for GetRank
#include "../../src/common/cuda_rt_utils.h" // for AllVisibleGPUs
#endif // defined(__CUDACC__)
#endif // defined(__CUDACC__)

#include "filesystem.h" // for TemporaryDirectory
#include "xgboost/linalg.h"

#if defined(__CUDACC__)
#define DeclareUnifiedTest(name) GPU ## name
#define DeclareUnifiedTest(name) GPU##name
#else
#define DeclareUnifiedTest(name) name
#endif
Expand All @@ -41,7 +40,7 @@
#endif

#if defined(__CUDACC__)
#define DeclareUnifiedDistributedTest(name) MGPU ## name
#define DeclareUnifiedDistributedTest(name) MGPU##name
#else
#define DeclareUnifiedDistributedTest(name) name
#endif
Expand All @@ -51,7 +50,7 @@ class ObjFunction;
class Metric;
struct LearnerModelParam;
class GradientBooster;
}
} // namespace xgboost

template <typename Float>
Float RelError(Float l, Float r) {
Expand Down Expand Up @@ -82,20 +81,16 @@ void CreateBigTestData(const std::string& filename, size_t n_entries, bool zero_
void CreateTestCSV(std::string const& path, size_t rows, size_t cols);

void CheckObjFunction(std::unique_ptr<xgboost::ObjFunction> const& obj,
std::vector<xgboost::bst_float> preds,
std::vector<xgboost::bst_float> labels,
std::vector<xgboost::bst_float> preds, std::vector<xgboost::bst_float> labels,
std::vector<xgboost::bst_float> weights,
std::vector<xgboost::bst_float> out_grad,
std::vector<xgboost::bst_float> out_hess);

xgboost::Json CheckConfigReloadImpl(xgboost::Configurable* const configurable,
std::string name);
xgboost::Json CheckConfigReloadImpl(xgboost::Configurable* const configurable, std::string name);

template <typename T>
xgboost::Json CheckConfigReload(std::unique_ptr<T> const& configurable,
std::string name = "") {
return CheckConfigReloadImpl(dynamic_cast<xgboost::Configurable*>(configurable.get()),
name);
xgboost::Json CheckConfigReload(std::unique_ptr<T> const& configurable, std::string name = "") {
return CheckConfigReloadImpl(dynamic_cast<xgboost::Configurable*>(configurable.get()), name);
}

void CheckRankingObjFunction(std::unique_ptr<xgboost::ObjFunction> const& obj,
Expand All @@ -107,12 +102,11 @@ void CheckRankingObjFunction(std::unique_ptr<xgboost::ObjFunction> const& obj,
std::vector<xgboost::bst_float> out_hess);

xgboost::bst_float GetMetricEval(
xgboost::Metric * metric,
xgboost::HostDeviceVector<xgboost::bst_float> const& preds,
std::vector<xgboost::bst_float> labels,
std::vector<xgboost::bst_float> weights = std::vector<xgboost::bst_float>(),
std::vector<xgboost::bst_uint> groups = std::vector<xgboost::bst_uint>(),
xgboost::DataSplitMode data_split_Mode = xgboost::DataSplitMode::kRow);
xgboost::Metric* metric, xgboost::HostDeviceVector<xgboost::bst_float> const& preds,
std::vector<xgboost::bst_float> labels,
std::vector<xgboost::bst_float> weights = std::vector<xgboost::bst_float>(),
std::vector<xgboost::bst_uint> groups = std::vector<xgboost::bst_uint>(),
xgboost::DataSplitMode data_split_Mode = xgboost::DataSplitMode::kRow);

double GetMultiMetricEval(xgboost::Metric* metric,
xgboost::HostDeviceVector<xgboost::bst_float> const& preds,
Expand Down Expand Up @@ -179,8 +173,8 @@ class SimpleRealUniformDistribution {
template <size_t Bits, typename GeneratorT>
ResultT GenerateCanonical(GeneratorT* rng) const {
static_assert(std::is_floating_point_v<ResultT>, "Result type must be floating point.");
long double const r = (static_cast<long double>(rng->Max())
- static_cast<long double>(rng->Min())) + 1.0L;
long double const r =
(static_cast<long double>(rng->Max()) - static_cast<long double>(rng->Min())) + 1.0L;
auto const log2r = static_cast<size_t>(std::log(r) / std::log(2.0L));
size_t m = std::max<size_t>(1UL, (Bits + log2r - 1UL) / log2r);
ResultT sum_value = 0, r_k = 1;
Expand All @@ -195,13 +189,11 @@ class SimpleRealUniformDistribution {
}

public:
SimpleRealUniformDistribution(ResultT l, ResultT u) :
lower_{l}, upper_{u} {}
SimpleRealUniformDistribution(ResultT l, ResultT u) : lower_{l}, upper_{u} {}

template <typename GeneratorT>
ResultT operator()(GeneratorT* rng) const {
ResultT tmp = GenerateCanonical<std::numeric_limits<ResultT>::digits,
GeneratorT>(rng);
ResultT tmp = GenerateCanonical<std::numeric_limits<ResultT>::digits, GeneratorT>(rng);
auto ret = (tmp * (upper_ - lower_)) + lower_;
// Correct floating point error.
return std::max(ret, lower_);
Expand All @@ -225,7 +217,7 @@ Json GetArrayInterface(HostDeviceVector<T> const* storage, size_t rows, size_t c
array_interface["shape"][1] = cols;

char t = linalg::detail::ArrayInterfaceHandler::TypeChar<T>();
array_interface["typestr"] = String(std::string{"<"} + t + std::to_string(sizeof(T)));
array_interface["typestr"] = String(std::string{"<"} + t + std::to_string(sizeof(T))); // NOLINT
array_interface["version"] = 3;
return array_interface;
}
Expand Down Expand Up @@ -382,14 +374,9 @@ std::unique_ptr<GradientBooster> CreateTrainedGBM(std::string name, Args kwargs,
Context const* generic_param);

/**
* \brief Make a context that uses CUDA if device >= 0.
* @brief Make a context that uses CUDA if device >= 0.
*/
inline Context MakeCUDACtx(std::int32_t device) {
if (device == DeviceOrd::CPUOrdinal()) {
return Context{};
}
return Context{}.MakeCUDA(device);
}
[[nodiscard]] Context MakeCUDACtx(std::int32_t device);

inline HostDeviceVector<GradientPair> GenerateRandomGradients(const size_t n_rows,
float lower = 0.0f,
Expand All @@ -410,12 +397,13 @@ inline auto GenerateRandomGradients(Context const* ctx, bst_idx_t n_rows, bst_ta
float lower = 0.0f, float upper = 1.0f) {
auto g = GenerateRandomGradients(n_rows * n_targets, lower, upper);
GradientContainer gpair;
gpair.gpair = linalg::Matrix<GradientPair>{{n_rows, static_cast<bst_idx_t>(n_targets)}, ctx->Device()};
gpair.gpair =
linalg::Matrix<GradientPair>{{n_rows, static_cast<bst_idx_t>(n_targets)}, ctx->Device()};
gpair.gpair.Data()->Copy(g);
return gpair;
}

typedef void *DMatrixHandle; // NOLINT(*);
typedef void* DMatrixHandle; // NOLINT(*);

class ArrayIterForTest {
protected:
Expand Down Expand Up @@ -476,19 +464,14 @@ class NumpyArrayIterForTest : public ArrayIterForTest {
~NumpyArrayIterForTest() override = default;
};

void DMatrixToCSR(DMatrix *dmat, std::vector<float> *p_data,
std::vector<size_t> *p_row_ptr,
std::vector<bst_feature_t> *p_cids);
void DMatrixToCSR(DMatrix* dmat, std::vector<float>* p_data, std::vector<size_t>* p_row_ptr,
std::vector<bst_feature_t>* p_cids);

typedef void *DataIterHandle; // NOLINT(*)
typedef void* DataIterHandle; // NOLINT(*)

inline void Reset(DataIterHandle self) {
static_cast<ArrayIterForTest*>(self)->Reset();
}
inline void Reset(DataIterHandle self) { static_cast<ArrayIterForTest*>(self)->Reset(); }

inline int Next(DataIterHandle self) {
return static_cast<ArrayIterForTest*>(self)->Next();
}
inline int Next(DataIterHandle self) { return static_cast<ArrayIterForTest*>(self)->Next(); }

/**
* @brief Create an array interface for host vector.
Expand All @@ -501,7 +484,7 @@ char const* Make1dInterfaceTest(T const* vec, std::size_t len) {
}

class RMMAllocator;
using RMMAllocatorPtr = std::unique_ptr<RMMAllocator, void(*)(RMMAllocator*)>;
using RMMAllocatorPtr = std::unique_ptr<RMMAllocator, void (*)(RMMAllocator*)>;
RMMAllocatorPtr SetUpRMMResourceForCppTests(int argc, char** argv);

/*
Expand Down
Loading