cuda and cub implementation of multiple k-selection

531 views Asked by At

I'm trying to implement multiple top-k selection in parallel, where each selection select k elements from a list of n elements and there m such tasks to be executed in parallel. I use cub to do that. I got a strange error and I don't know where I did wrong. I feel that I may made some obvious mistake in my understanding, can someone help me check?

EDIT:

I made it working by adding two cudaDeviceSynchronize() calls, before each of the two code section which contains free(). So now my question is, does free behaves differently than cudaFree, in that asynchronous calls immediately proceeding it is not allowed, as opposed to another question I asked here: Does cudaFree after asynchronous call work?

// Assume dtop has size k x m and dmat has size n x m, where k < n
// Each column of dtop is supposed to obtain the top-k indices of 
// elements from the corresponding column in dmat.
template<typename ValueType, typename IndexType>
void TopKPerColumn_cub_test(DenseMatrix<IndexType, MemDev> dtop,
    DenseMatrix<ValueType, MemDev, Const> dmat);

template<typename T>
struct SelectLE {
  T x_;
  __device__ SelectLE(const T& x):x_(x){}
  __device__ bool operator() (const T& a) {
    return a > x_;
  }
};

template<typename ValueType, typename IndexType>
__global__ void k_TopKPerColumn_cub_test(DenseMatrix<IndexType, MemDev> dtop,
    DenseMatrix<ValueType, MemDev, Const> dmat) {
  int n = dmat.num_rows();
  int k = dtop.num_rows();

  cub::DoubleBuffer<ValueType> keys;
  keys.d_buffers[0] = reinterpret_cast<ValueType*>(
      malloc(sizeof(ValueType) * n));
  keys.d_buffers[1] = reinterpret_cast<ValueType*>(
      malloc(sizeof(ValueType) * n));
  memcpy(keys.d_buffers[keys.selector], dmat.get_col(blockIdx.x).data(),
      sizeof(ValueType) * n);

  void* temp_storage = 0;
  size_t temp_storage_size = 0;
  cub::DeviceRadixSort::SortKeysDescending(
      temp_storage, temp_storage_size, keys, n);
  temp_storage = malloc(temp_storage_size);
  cub::DeviceRadixSort::SortKeysDescending(
      temp_storage, temp_storage_size, keys, n);
  ValueType kth = keys.Current()[k-1];

  free(temp_storage);
  free(keys.d_buffers[0]);
  free(keys.d_buffers[1]);

  temp_storage = 0;
  temp_storage_size = 0;
  int* nb_selected = reinterpret_cast<int*>(malloc(sizeof(int)));
  SelectLE<ValueType> selector(kth);

  cub::DeviceSelect::If(temp_storage, temp_storage_size,
      const_cast<ValueType*>(dmat.get_col(blockIdx.x).data()),
      dtop.get_col(blockIdx.x).data(),
      nb_selected, n, selector);
  temp_storage = malloc(temp_storage_size);
  cub::DeviceSelect::If(temp_storage, temp_storage_size,
      const_cast<ValueType*>(dmat.get_col(blockIdx.x).data()),
      dtop.get_col(blockIdx.x).data(),
      nb_selected, n, selector);

  free(nb_selected);
  free(temp_storage);
}

template<typename ValueType, typename IndexType>
void TopKPerColumn_cub_test(DenseMatrix<IndexType, MemDev> dtop,
    DenseMatrix<ValueType, MemDev, Const> dmat) {
  k_TopKPerColumn_cub_test<<<dtop.num_cols(), 1>>>(dtop, dmat);
}
1

There are 1 answers

0
shaoyl85 On BEST ANSWER

Although I'm able to make it work, this implementation performs slower than single-threaded CPU code. I eventually implemented this with heap-sort and put the heap in shared memory. The performance is good.