Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add cutlass 3xTF32,DMMA based L2/cosine distance kernels for SM 8.0 or higher #939

Merged
merged 28 commits into from
Nov 16, 2022
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
20648c5
cutlass based euclidean expanded, cosine kernels
mdoijade Oct 20, 2022
a9dabc8
add prior ampere pairwisedistmat kernel to prevent redundant kernel c…
mdoijade Oct 21, 2022
1a45bfa
add noexcept to the functor methods
mdoijade Oct 21, 2022
c6f091b
merge branch 22.12 and resolve conflicts
mdoijade Oct 21, 2022
7786fcb
fix comments, remove redundant code and fix formatting issues
mdoijade Oct 27, 2022
181fc40
add cutlass cmake support for raft with custom namespace, fix formati…
mdoijade Oct 28, 2022
3d34545
fix formatting issues
mdoijade Oct 28, 2022
02c23ed
fix the cutlass_include_dir path in cmake
mdoijade Nov 3, 2022
7933436
fix bugs in get_cutlass cmake to use cutlass provided properties corr…
mdoijade Nov 4, 2022
d4bdec5
remove the cutlass namespace setting in test cmakefiles as it is not …
mdoijade Nov 4, 2022
d26bcef
temp remove dist dependency from cutlass to check if it works in ci/cd
mdoijade Nov 4, 2022
4df4185
merge branch-22.12 latest changes
mdoijade Nov 7, 2022
451c3c0
fix get_cutlass.cmake to work with pylibraft by using NvidiaCutlass i…
mdoijade Nov 10, 2022
7b512f9
fix get_cutlass install path, make changes as per review comments
mdoijade Nov 10, 2022
a05e1e2
merge branch-22.12
mdoijade Nov 10, 2022
d32b4c0
fix clang format issues
mdoijade Nov 10, 2022
f7c440a
temp fix to check if python build works
mdoijade Nov 11, 2022
b1a1fd7
add raft-exports instead of raft-distance-exports as other raft compo…
mdoijade Nov 15, 2022
4ef44e7
make cutlass to depend only on raft_distance and add raft_distance de…
mdoijade Nov 16, 2022
186fcc7
fix cmake formatting issues
mdoijade Nov 16, 2022
8aa8909
prevent cutlass based pairwise dist kernels to be disabled on cuda 12…
mdoijade Nov 16, 2022
abfd493
Moving cutlass dependency to distance and nn to keep them separate.
cjnolet Nov 16, 2022
f1b1239
Adding CUTLASS to build docs as dependency
cjnolet Nov 16, 2022
32e6052
Updating to export to both distance and nn
cjnolet Nov 16, 2022
f6de9ee
Adding cutlass as private dependency
cjnolet Nov 16, 2022
9bf0647
Making cutlass INTERFACE in raft::nn and raft::distance
cjnolet Nov 16, 2022
8f0119a
Using proper exports per Robert Maynard's suggestion.
cjnolet Nov 16, 2022
6ad4fd1
Adding cutlass as private dependency of lib targets
cjnolet Nov 16, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
cutlass based euclidean expanded, cosine kernels
  • Loading branch information
mdoijade committed Oct 20, 2022
commit 20648c599ec8a95eca23820ce2a5ac05f7b86c55
57 changes: 29 additions & 28 deletions cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,8 @@ add_library(raft::raft ALIAS raft)

target_include_directories(raft INTERFACE
"$<BUILD_INTERFACE:${RAFT_SOURCE_DIR}/include>"
"$<INSTALL_INTERFACE:include>")
"$<INSTALL_INTERFACE:include>"
"${CUTLASS_DIR}/include")

# Keep RAFT as lightweight as possible.
# Only CUDA libs and rmm should
Expand Down Expand Up @@ -225,21 +226,21 @@ set_target_properties(raft_distance PROPERTIES EXPORT_NAME distance)
if(RAFT_COMPILE_DIST_LIBRARY)
add_library(raft_distance_lib
src/distance/pairwise_distance.cu
src/distance/specializations/detail/canberra.cu
src/distance/specializations/detail/chebyshev.cu
src/distance/specializations/detail/correlation.cu
src/distance/specializations/detail/cosine.cu
src/distance/specializations/detail/hamming_unexpanded.cu
src/distance/specializations/detail/hellinger_expanded.cu
src/distance/specializations/detail/jensen_shannon_float_float_float_int.cu
src/distance/specializations/detail/jensen_shannon_float_float_float_uint32.cu
src/distance/specializations/detail/jensen_shannon_double_double_double_int.cu
src/distance/specializations/detail/kl_divergence_float_float_float_int.cu
src/distance/specializations/detail/kl_divergence_float_float_float_uint32.cu
src/distance/specializations/detail/kl_divergence_double_double_double_int.cu
src/distance/specializations/detail/l1_float_float_float_int.cu
src/distance/specializations/detail/l1_float_float_float_uint32.cu
src/distance/specializations/detail/l1_double_double_double_int.cu
# src/distance/specializations/detail/canberra.cu
# src/distance/specializations/detail/chebyshev.cu
# src/distance/specializations/detail/correlation.cu
# src/distance/specializations/detail/cosine.cu
# src/distance/specializations/detail/hamming_unexpanded.cu
# src/distance/specializations/detail/hellinger_expanded.cu
# src/distance/specializations/detail/jensen_shannon_float_float_float_int.cu
# src/distance/specializations/detail/jensen_shannon_float_float_float_uint32.cu
# src/distance/specializations/detail/jensen_shannon_double_double_double_int.cu
# src/distance/specializations/detail/kl_divergence_float_float_float_int.cu
# src/distance/specializations/detail/kl_divergence_float_float_float_uint32.cu
# src/distance/specializations/detail/kl_divergence_double_double_double_int.cu
# src/distance/specializations/detail/l1_float_float_float_int.cu
# src/distance/specializations/detail/l1_float_float_float_uint32.cu
# src/distance/specializations/detail/l1_double_double_double_int.cu
src/distance/specializations/detail/l2_expanded_float_float_float_int.cu
src/distance/specializations/detail/l2_expanded_float_float_float_uint32.cu
src/distance/specializations/detail/l2_expanded_double_double_double_int.cu
Expand All @@ -249,12 +250,12 @@ if(RAFT_COMPILE_DIST_LIBRARY)
src/distance/specializations/detail/l2_sqrt_unexpanded_float_float_float_int.cu
src/distance/specializations/detail/l2_sqrt_unexpanded_float_float_float_uint32.cu
src/distance/specializations/detail/l2_sqrt_unexpanded_double_double_double_int.cu
src/distance/specializations/detail/l2_unexpanded_double_double_double_int.cu
src/distance/specializations/detail/l2_unexpanded_float_float_float_uint32.cu
src/distance/specializations/detail/l2_unexpanded_float_float_float_int.cu
src/distance/specializations/detail/lp_unexpanded_double_double_double_int.cu
src/distance/specializations/detail/lp_unexpanded_float_float_float_uint32.cu
src/distance/specializations/detail/lp_unexpanded_float_float_float_int.cu
# src/distance/specializations/detail/l2_unexpanded_double_double_double_int.cu
# src/distance/specializations/detail/l2_unexpanded_float_float_float_uint32.cu
# src/distance/specializations/detail/l2_unexpanded_float_float_float_int.cu
# src/distance/specializations/detail/lp_unexpanded_double_double_double_int.cu
# src/distance/specializations/detail/lp_unexpanded_float_float_float_uint32.cu
# src/distance/specializations/detail/lp_unexpanded_float_float_float_int.cu
)
set_target_properties(
raft_distance_lib
Expand Down Expand Up @@ -305,16 +306,16 @@ set_target_properties(raft_nn PROPERTIES EXPORT_NAME nn)

if(RAFT_COMPILE_NN_LIBRARY)
add_library(raft_nn_lib
src/nn/specializations/ball_cover.cu
src/nn/specializations/detail/ball_cover_lowdim_pass_one_2d.cu
src/nn/specializations/detail/ball_cover_lowdim_pass_two_2d.cu
src/nn/specializations/detail/ball_cover_lowdim_pass_one_3d.cu
src/nn/specializations/detail/ball_cover_lowdim_pass_two_3d.cu
# src/nn/specializations/ball_cover.cu
# src/nn/specializations/detail/ball_cover_lowdim_pass_one_2d.cu
# src/nn/specializations/detail/ball_cover_lowdim_pass_two_2d.cu
# src/nn/specializations/detail/ball_cover_lowdim_pass_one_3d.cu
# src/nn/specializations/detail/ball_cover_lowdim_pass_two_3d.cu
src/nn/specializations/fused_l2_knn_long_float_true.cu
src/nn/specializations/fused_l2_knn_long_float_false.cu
src/nn/specializations/fused_l2_knn_int_float_true.cu
src/nn/specializations/fused_l2_knn_int_float_false.cu
src/nn/specializations/knn.cu
# src/nn/specializations/knn.cu
)
set_target_properties(
raft_nn_lib
Expand Down
13 changes: 13 additions & 0 deletions cpp/include/raft/core/cudart_utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -354,6 +354,19 @@ inline int getMultiProcessorCount()
return mpCount;
}

/** helper method to get max usable shared mem per block parameter */
inline std::pair <int,int> getMajorMinorVersion()
{
int devId;
RAFT_CUDA_TRY(cudaGetDevice(&devId));
int majorVer, minorVer;
RAFT_CUDA_TRY(cudaDeviceGetAttribute(&majorVer, cudaDevAttrComputeCapabilityMajor, devId));
RAFT_CUDA_TRY(cudaDeviceGetAttribute(&minorVer, cudaDevAttrComputeCapabilityMinor, devId));

return std::make_pair(majorVer, minorVer);
}


/** helper method to convert an array on device to a string on host */
template <typename T>
std::string arr2Str(const T* arr, int size, std::string name, cudaStream_t stream, int width = 4)
Expand Down
146 changes: 82 additions & 64 deletions cpp/include/raft/distance/detail/cosine.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,24 @@

#include <raft/distance/detail/pairwise_distance_base.cuh>
#include <raft/linalg/norm.cuh>
#include <raft/distance/detail/pairwise_distance_cutlass_base.cuh>

namespace raft {
namespace distance {
namespace detail {

template <typename DataT, typename AccT>
struct CosineOp {
__device__ __host__ CosineOp() { }
__device__ __host__ AccT operator() (DataT &aNorm, const DataT &bNorm, DataT &accVal) const {
return static_cast<AccT>(1.0) - (AccT) (accVal / (aNorm * bNorm));
}
__device__ __host__ AccT operator() (DataT aData) const {
return aData;
}
};


/**
* @brief the cosine distance matrix calculation implementer
* It computes the following equation:
Expand Down Expand Up @@ -71,61 +84,72 @@ void cosineImpl(const DataT* x,
FinalLambda fin_op,
cudaStream_t stream)
{
typedef typename raft::linalg::Policy4x4<DataT, VecLen>::Policy RowPolicy;
typedef typename raft::linalg::Policy4x4<DataT, VecLen>::ColPolicy ColPolicy;

typedef typename std::conditional<isRowMajor, RowPolicy, ColPolicy>::type KPolicy;

dim3 blk(KPolicy::Nthreads);

// Accumulation operation lambda
auto core_lambda = [] __device__(AccT & acc, DataT & x, DataT & y) { acc += x * y; };

// epilogue operation lambda for final value calculation
auto epilog_lambda = [] __device__(AccT acc[KPolicy::AccRowsPerTh][KPolicy::AccColsPerTh],
DataT * regxn,
DataT * regyn,
IdxT gridStrideX,
IdxT gridStrideY) {
#pragma unroll
for (int i = 0; i < KPolicy::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < KPolicy::AccColsPerTh; ++j) {
acc[i][j] = acc[i][j] / (regxn[i] * regyn[j]);
}
}
};
const auto deviceVersion = getMajorMinorVersion();
if (deviceVersion.first >= 8) {
using CosineOp_ = CosineOp<DataT, AccT>;
CosineOp_ cosine_dist_op;

cutlassDistanceKernel<DataT, AccT, OutT, IdxT, VecLen, FinalLambda, CosineOp_, isRowMajor>(
x, y, xn, yn, m, n, k, lda, ldb, ldd, dOutput, fin_op, cosine_dist_op, stream);

constexpr size_t shmemSize =
KPolicy::SmemSize + ((KPolicy::Mblk + KPolicy::Nblk) * sizeof(DataT));
if (isRowMajor) {
auto cosineRowMajor = pairwiseDistanceMatKernel<true,
DataT,
AccT,
OutT,
IdxT,
KPolicy,
decltype(core_lambda),
decltype(epilog_lambda),
FinalLambda,
true>;
dim3 grid = launchConfigGenerator<KPolicy>(m, n, shmemSize, cosineRowMajor);
cosineRowMajor<<<grid, blk, shmemSize, stream>>>(
x, y, xn, yn, m, n, k, lda, ldb, ldd, dOutput, core_lambda, epilog_lambda, fin_op);
} else {
auto cosineColMajor = pairwiseDistanceMatKernel<true,
DataT,
AccT,
OutT,
IdxT,
KPolicy,
decltype(core_lambda),
decltype(epilog_lambda),
FinalLambda,
false>;
dim3 grid = launchConfigGenerator<KPolicy>(m, n, shmemSize, cosineColMajor);
cosineColMajor<<<grid, blk, shmemSize, stream>>>(
x, y, xn, yn, m, n, k, lda, ldb, ldd, dOutput, core_lambda, epilog_lambda, fin_op);

typedef typename raft::linalg::Policy4x4<DataT, VecLen>::Policy RowPolicy;
typedef typename raft::linalg::Policy4x4<DataT, VecLen>::ColPolicy ColPolicy;

typedef typename std::conditional<isRowMajor, RowPolicy, ColPolicy>::type KPolicy;

dim3 blk(KPolicy::Nthreads);

// Accumulation operation lambda
auto core_lambda = [] __device__(AccT & acc, DataT & x, DataT & y) { acc += x * y; };

// epilogue operation lambda for final value calculation
auto epilog_lambda = [] __device__(AccT acc[KPolicy::AccRowsPerTh][KPolicy::AccColsPerTh],
DataT * regxn,
DataT * regyn,
IdxT gridStrideX,
IdxT gridStrideY) {
#pragma unroll
for (int i = 0; i < KPolicy::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < KPolicy::AccColsPerTh; ++j) {
acc[i][j] = 1.0 - (acc[i][j] / (regxn[i] * regyn[j]) );
}
}
};

constexpr size_t shmemSize =
KPolicy::SmemSize + ((KPolicy::Mblk + KPolicy::Nblk) * sizeof(DataT));
if (isRowMajor) {
auto cosineRowMajor = pairwiseDistanceMatKernel<true,
DataT,
AccT,
OutT,
IdxT,
KPolicy,
decltype(core_lambda),
decltype(epilog_lambda),
FinalLambda,
true>;
dim3 grid = launchConfigGenerator<KPolicy>(m, n, shmemSize, cosineRowMajor);
cosineRowMajor<<<grid, blk, shmemSize, stream>>>(
x, y, xn, yn, m, n, k, lda, ldb, ldd, dOutput, core_lambda, epilog_lambda, fin_op);
} else {
auto cosineColMajor = pairwiseDistanceMatKernel<true,
DataT,
AccT,
OutT,
IdxT,
KPolicy,
decltype(core_lambda),
decltype(epilog_lambda),
FinalLambda,
false>;
dim3 grid = launchConfigGenerator<KPolicy>(m, n, shmemSize, cosineColMajor);
cosineColMajor<<<grid, blk, shmemSize, stream>>>(
x, y, xn, yn, m, n, k, lda, ldb, ldd, dOutput, core_lambda, epilog_lambda, fin_op);
}
}

RAFT_CUDA_TRY(cudaGetLastError());
Expand Down Expand Up @@ -207,13 +231,7 @@ void cosineAlgo1(Index_ m,
{
auto norm_op = [] __device__(AccType in) { return raft::mySqrt(in); };

// Wrap fin_op to allow computing 1 - pA before calling fin_op
auto wrapped_fin_op = [fin_op] __device__(AccType d_val, Index_ g_d_idx) {
return fin_op(static_cast<AccType>(1.0) - d_val, g_d_idx);
};

typedef std::is_same<OutType, bool> is_bool;
typedef typename std::conditional<is_bool::value, OutType, AccType>::type CosOutType;
typedef typename std::conditional<sizeof(OutType) == 1, OutType, AccType>::type CosOutType;
mdoijade marked this conversation as resolved.
Show resolved Hide resolved
CosOutType* pDcast = reinterpret_cast<CosOutType*>(pD);

ASSERT(
Expand All @@ -234,12 +252,12 @@ void cosineAlgo1(Index_ m,

if (isRowMajor) {
lda = k, ldb = k, ldd = n;
cosine<InType, AccType, CosOutType, Index_, decltype(wrapped_fin_op), true>(
m, n, k, lda, ldb, ldd, pA, pB, col_vec, row_vec, pDcast, wrapped_fin_op, stream);
cosine<InType, AccType, CosOutType, Index_, FinalLambda, true>(
m, n, k, lda, ldb, ldd, pA, pB, col_vec, row_vec, pDcast, fin_op, stream);
} else {
lda = n, ldb = m, ldd = m;
cosine<InType, AccType, CosOutType, Index_, decltype(wrapped_fin_op), false>(
n, m, k, lda, ldb, ldd, pB, pA, row_vec, col_vec, pDcast, wrapped_fin_op, stream);
cosine<InType, AccType, CosOutType, Index_, FinalLambda, false>(
n, m, k, lda, ldb, ldd, pB, pA, row_vec, col_vec, pDcast, fin_op, stream);
}
}

Expand Down
18 changes: 15 additions & 3 deletions cpp/include/raft/distance/detail/distance.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -615,6 +615,16 @@ void distance(const InType* x,
* @note if workspace is passed as nullptr, this will return in
* worksize, the number of bytes of workspace required
*/

template <typename AccType, typename OutType, typename Index>
struct default_fin_op {
__host__ __device__ default_fin_op() { };
// functor signature.
__host__ __device__ OutType operator()(AccType d_val, Index g_d_idx) const {
return d_val;
}
};

template <raft::distance::DistanceType distanceType,
typename InType,
typename AccType,
Expand All @@ -632,9 +642,11 @@ void distance(const InType* x,
bool isRowMajor = true,
InType metric_arg = 2.0f)
{
auto default_fin_op = [] __device__(AccType d_val, Index_ g_d_idx) { return d_val; };
distance<distanceType, InType, AccType, OutType, decltype(default_fin_op), Index_>(
x, y, dist, m, n, k, workspace, worksize, default_fin_op, stream, isRowMajor, metric_arg);
using final_op_type = default_fin_op<AccType, OutType, Index_>;
final_op_type fin_op;

distance<distanceType, InType, AccType, OutType, final_op_type, Index_>(
x, y, dist, m, n, k, workspace, worksize, fin_op, stream, isRowMajor, metric_arg);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}

Expand Down
Loading